id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
1667510
|
from django.test import TestCase
from .models import Comment, Profile, Post, Like
from django.urls import resolve, reverse
from .views import signup
class TestProfile(TestCase):
'''Test Class to test the Profile Class'''
def setUp(self)-> None:
'''To set up test class before running every test case'''
self.test_profile = Profile(username = 'Test', bio = 'This is just a test user')
def tearDown(self) -> None:
'''To clean up after running every testcase'''
Profile.objects.all().delete()
def test_isinstance(self):
'''To test if object is an instance of Class'''
self.assertTrue(isinstance(self.test_profile, Profile))
def test_save_profile(self):
'''To test saving the profile'''
self.test_profile.save_class()
profiles = Profile.objects.all()
self.assertTrue(len(profiles) == 1)
def test_delete_profile(self):
'''To test deleting profile'''
self.test_profile.save_class()
self.test_profile.delete_class()
profiles = Profile.objects.all()
self.assertTrue(len(profiles) == 0)
def test_update_profile(self):
'''Test the profile updating'''
self.test_profile.save_class()
self.test_profile.update_class(username = 'newTest')
self.assertEqual(self.test_profile.username, 'newTest')
def test_search_by_profile_name(self):
'''Test class for test search by profile username test case'''
self.test_profile.save_class()
profiles = Profile.search_profile('Test')
self.assertTrue(len(profiles) == 1)
def test_get_profile_by_id(self):
'''Test getting a profile by it's id'''
self.test_profile.save_class()
profile = Profile.get_profile_by_id(self.test_profile.id)
self.assertEqual(self.test_profile.username, profile.username)
class TestPost(TestCase):
'''Test Class to test the Post Class'''
def setUp(self) -> None:
'''To set up test class before running every test case'''
self.test_profile = Profile(username = 'Test', bio = 'This is just a test user')
self.test_profile.save_class()
self.test_post = Post(post_caption='What a time to be alive',user_profile = self.test_profile)
def tearDown(self) -> None:
'''To clean up after every test case'''
Post.objects.all().delete()
def test_isinstance(self):
'''To test if object is an instance of Class'''
self.assertTrue(isinstance(self.test_post, Post))
def test_save_post(self):
'''To test saving the post'''
self.test_post.save_class()
posts = Post.objects.all()
self.assertTrue(len(posts) == 1)
def test_update_post(self):
'''Test the post updating'''
self.test_post.save_class()
self.test_post.update_class(post_caption = 'You only live once')
self.assertEqual(self.test_post.post_caption, 'You only live once')
def test_get_post_by_id(self):
'''Test getting a profile by it's id'''
self.test_post.save_class()
post = Post.get_post_by_id(self.test_post.id)
self.assertEqual(self.test_post.post_caption, post.post_caption)
class TestComment(TestCase):
'''Test Class for Comment Class'''
def setUp(self)-> None:
'''To set up test class before running every test case'''
self.test_profile = Profile(username = 'Test', bio = 'This is just a test user')
self.test_profile.save_class()
self.test_post = Post(post_caption='What a time to be alive',user_profile = self.test_profile)
self.test_post.save_class()
self.test_comment = Comment(comment = 'This is a test comment I think', comment_post = self.test_post, comment_profile = self.test_profile)
def tearDown(self) -> None:
'''To clean up after running every testcase'''
Comment.objects.all().delete()
def test_isinstance(self):
'''To test if object is an instance of Class'''
self.assertTrue(isinstance(self.test_comment, Comment))
def test_save_comment(self):
'''To test saving the comment'''
self.test_comment.save_class()
comments = Comment.objects.all()
self.assertTrue(len(comments) == 1)
def test_delete_comment(self):
'''To test deleting a comment'''
self.test_comment.save_class()
self.test_comment.delete_class()
comments = Comment.objects.all()
self.assertTrue(len(comments) == 0)
def test_update_comment(self):
'''Test the comment updating'''
self.test_comment.save_class()
self.test_comment.update_class(comment = 'This is an updated comment')
self.assertEqual(self.test_comment.comment, 'This is an updated comment')
|
StarcoderdataPython
|
11266018
|
<reponame>Ian-Almeida/survey-site
from fastapi import APIRouter, Depends, HTTPException
from app.models.user import User, UserCreate
from typing import List
from app.db.config_db import getDB
from pymongo.database import Database
from app.core import dependencies
from app.crud import crud_user
router = APIRouter()
@router.get("/me/", response_model=User)
async def read_users_me(current_user: User = Depends(dependencies.get_current_active_user)):
return current_user
@router.get("/", response_model=List[User])
def get_all_users(
db: Database = Depends(getDB),
current_user: User = Depends(dependencies.get_current_active_user)
):
users = []
for user in db.User.find():
users.append(User(**user))
return users
@router.post("/", response_model=User)
async def create_user(
user: UserCreate,
db=Depends(getDB),
):
created = crud_user.create(db=db, data_in=user)
if not created:
raise HTTPException(status_code=500, detail='Não foi possível criar usuário')
return created
|
StarcoderdataPython
|
4832419
|
<reponame>cll27/pvae<gh_stars>1-10
import math
import torch
from torch import nn
from torch.nn.parameter import Parameter
from torch.nn import init
from torch.autograd import Function
from pvae.ops.mobius_poincare import *
from pvae.utils import Arcsinh, Constants
class PoincareLayer(nn.Module):
def __init__(self, in_features, out_features, c):
super(PoincareLayer, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.c = c
def get_bias(self):
raise NotImplementedError
def forward(self, input):
raise NotImplementedError
class Hypergyroplane(PoincareLayer):
def __init__(self, in_features, out_features, bias=True, c=torch.ones(1)):
super(Hypergyroplane, self).__init__(
in_features,
out_features,
c
)
self._weight = Parameter(torch.Tensor(out_features, in_features))
if bias:
self._bias = Parameter(torch.Tensor(out_features, 1))
else:
self.register_parameter('bias', None)
self.reset_parameters()
@property
def weight(self):
return parallel_transport_x(self.get_bias(), self._weight, self.c) # weight \in T_0 => weight \in T_bias
def get_bias(self):
return exp_map_zero(self._weight * self._bias, self.c) # reparameterisation of a point on the manifold
def reset_parameters(self):
init.kaiming_normal_(self._weight, a=math.sqrt(5))
if self.get_bias() is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self._weight)
bound = 4 / math.sqrt(fan_in)
init.uniform_(self._bias, -bound, bound)
def forward(self, input):
shape = input.shape
input = input.view(-1, self.in_features)
B = input.shape[0]
bias = self.get_bias()
weight = self.weight
norm_weight = weight.pow(2).sum(-1, keepdim=False).sqrt()
# poincare_norm_weight = 2 * self._weight.pow(2).sum(-1) #lambda_x(bias, self.c).t() * norm_weight
bias = bias.expand(B, self.out_features, self.in_features)
input = input.unsqueeze(1).expand(B, self.out_features, self.in_features)
dir_log_input = mob_add(-bias, input, self.c)
denom = torch.clamp((1 - self.c * dir_log_input.pow(2).sum(-1)) * norm_weight, min=Constants.eta)
hyperplane_dist = Arcsinh.apply(2 * self.c.sqrt() * (dir_log_input * weight).sum(-1) / denom) / self.c.sqrt()
res = hyperplane_dist
res = res.view(*shape[:-1], self.out_features)
return res
class MobiusLinear(PoincareLayer):
def __init__(self, in_features, out_features, bias=True, c=torch.ones(1), c_out=None):
super(MobiusLinear, self).__init__(
in_features,
out_features,
c
)
self.weight = Parameter(torch.Tensor(out_features, in_features))
self.c_out = c if (c_out is None) else c_out
if bias:
self._bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def get_bias(self):
return exp_map_zero(self._bias, self.c_out) # reparameterisation of a point on the disk
def reset_parameters(self):
init.kaiming_uniform_(self.weight, a=math.sqrt(5)) # usual init for weights
if self.get_bias() is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self._bias, -bound, bound)
def forward(self, input):
y = mob_mat_mul(self.weight.t(), input.view(-1, self.in_features), self.c, self.c_out)
if self.get_bias() is not None:
y = mob_add(y, self.get_bias(), self.c_out)
return y
class MobiusNL(nn.Module):
def __init__(self, non_lin, hyp_output=True, c=torch.ones(1), c_out=None):
super(MobiusNL, self).__init__()
self.non_lin = non_lin
self.hyp_output = hyp_output
self.c = c
self.c_out = c if (c_out is None) else c_out
def forward(self, input):
if self.non_lin is None:
if self.hyp_output:
return input
else:
return log_map_zero(input, self.c)
eucl_h = self.non_lin(log_map_zero(input, self.c))
if self.hyp_output:
return exp_map_zero(eucl_h, self.c_out)
else:
return eucl_h
class ExpZero(nn.Module):
def __init__(self, c):
super(ExpZero, self).__init__()
self.c = c
def forward(self, input):
return exp_map_zero(input, self.c)
class LogZero(nn.Module):
def __init__(self, c):
super(LogZero, self).__init__()
self.c = c
def forward(self, input):
return log_map_zero(input, self.c)
|
StarcoderdataPython
|
39559
|
<reponame>DirkZomerdijk/status<gh_stars>0
#%%
import numpy as np
import matplotlib.pyplot as plt
import json
import copy
# chronic_threshold
# repeats
# time
# stress_max
global_settings = {
"data_file": "clean",
"save_folder": "pre-test\\",
"status_type": "linear",
"distance_measure": "euclidean",
"scenario": "normal",
# "dutch_w": 0.664,
# "turkish_w": 0.075,
# "moroccan_w": 0.13,
# "ghanaian_w": 0.021,
# "suriname_w": 0.11,
"dutch_w": 0.664,
"turkish_w": 0.075,
"moroccan_w": 0.13,
"ghanaian_w": 0.021,
"suriname_w": 0.11,
"parameters": {}
}
param_range = {
"similarity_min" : {"range": [0.1, 1], "type": "f"},
"ses_noise" : {"range": [0, 4], "type": "i"},
# "vul_param" : {"range": [0.1,1], "type": "f"},
"psr_param" : {"range": [0.1,1], "type": "f"},
"recover_param" : {"range": [0.001, 0.1], "type": "f"},
"prestige_beta" : {"range": [0.005, 0.05], "type": "f"},
"prestige_param" : {"range": [0.1,1], "type": "f"},
"stressor_param" : {"range": [0.1,1], "type": "f"},
"interactions": {"range": [1,3], "type": "i"},
"coping_noise" : {"range": [0.01, 0.1], "type": "f"},
}
if __name__ == "__main__":
samples = 500
configs = []
param_samples = {}
for k in param_range.keys():
typ = param_range[k]['type']
if typ is "f":
params = np.array(np.random.uniform(param_range[k]['range'][0], param_range[k]['range'][1], samples))
else:
params = np.array(np.random.randint(param_range[k]['range'][0], param_range[k]['range'][1], samples))
param_samples[k] = params
for i in range(samples):
print(i)
config_settings = copy.deepcopy(global_settings)
for k in param_range.keys():
config_settings['parameters'][k] = [param_samples[k][i].item()]
config_settings['parameters']['repeats'] = [10]
config_settings['parameters']['population_size'] = [502]
config_settings['parameters']['chronic_threshold'] = [0.0001]
config_settings['parameters']['stress_max'] = [10000]
config_settings['parameters']['time'] = [500]
config_settings['parameters']['job_nr'] = [i]
config_settings['parameters']['vul_param'] = [0]
configs.append(config_settings)
for i, config in enumerate(configs):
filenr = "{0:03}".format(i)
# print(filenr)
with open('configs/pre-test/sample_'+str(filenr)+'.json', 'w') as outfile:
json.dump(config, outfile)
# np.random.uniform(similarity_base[0], similarity_base[1])
# np.random.uniform(ses_noise[0], ses_noise[1])
# np.random.uniform(vul_param[0], vul_param[1])
# np.random.uniform(psr_param[0], psr_param[1])
# np.random.uniform(prestige_beta[0], prestige_beta[1])
# np.random.uniform(prestige_param[0], prestige_param[1])
# np.random.uniform(stressor_param[0], stressor_param[1])
# %%
|
StarcoderdataPython
|
3522183
|
from setuptools import setup
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name='aiodiskdb',
version='0.2.4a1',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/mempoolco/aiodiskdb/',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
python_requires='>=3.8',
description='Embeddable minimal asynchronous on disk DB',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
include_package_data=True,
packages=['aiodiskdb'],
package_dir={'aiodiskdb': 'aiodiskdb'},
)
|
StarcoderdataPython
|
1999794
|
#!/usr/bin/env python
import base64, quopri
import mimetypes, email.Generator, email.Message
import cStringIO, os
# sample addresses
toAddr = "<EMAIL>"
fromAddr = "<EMAIL>"
outputFile = "dirContentsMail"
def main():
mainMsg = email.Message.Message()
mainMsg["To"] = toAddr
mainMsg["From"] = fromAddr
mainMsg["Subject"] = "Directory contents"
mainMsg["Mime-version"] = "1.0"
mainMsg["Content-type"] = "Multipart/mixed"
mainMsg.preamble = "Mime message\n"
mainMsg.epilogue = "" # to ensure that message ends with newline
# Get names of plain files (not subdirectories or special files)
fileNames = [f for f in os.listdir(os.curdir) if os.path.isfile(f)]
for fileName in fileNames:
contentType, ignored = mimetypes.guess_type(fileName)
if contentType is None: # If no guess, use generic opaque type
contentType = "application/octet-stream"
contentsEncoded = cStringIO.StringIO()
f = open(fileName, "rb")
mainType = contentType[:contentType.find("/")]
if mainType=="text":
cte = "quoted-printable"
quopri.encode(f, contentsEncoded, 1) # 1 to also encode tabs
else:
cte = "base64"
base64.encode(f, contentsEncoded)
f.close()
subMsg = email.Message.Message()
subMsg.add_header("Content-type", contentType, name=fileName)
subMsg.add_header("Content-transfer-encoding", cte)
subMsg.set_payload(contentsEncoded.getvalue())
contentsEncoded.close()
mainMsg.attach(subMsg)
f = open(outputFile, "wb")
g = email.Generator.Generator(f)
g.flatten(mainMsg)
f.close()
return None
if __name__=="__main__":
main()
|
StarcoderdataPython
|
4852811
|
<reponame>b3astyy/b3astyy<filename>narrate.py
#!/usr/bin/env python3
# narrate.py, a tool to create a narrative out of nothing by asking questions
# Copyright 2016 <NAME>
# MIT licensed, so do whatever you want with it :)
import hashlib
from os import getenv
from blessings import Terminal
# ===== Configuration. Feel free to modify these =====
narrator = 'God'
username = getenv('USER') # Not really portable, but whatever.
if username is None:
username = "User"
# Try to keep these balanced, with 'yes' being slightly favored.
# Sorting by weight is NOT necessary, but somewhat good for performance.
responses = [("Yes.", 1.1),
("No.", 0.9),
("Definitely.", 0.09),
("Absolutely not.", 0.09),
("I'm afraid it is.", 0.04),
("I'm afraid it isn't.", 0.04),
("I don't wanna say", 0.01),
("That's a good question.", 0.005),
("You'd love to know that, wouldn't you?", 0.002),
]
responses_total_weight = sum([w for (_, w) in responses])
hash_method = hashlib.sha1
hash_multiplier = responses_total_weight * 1.0 / \
int('f' * hash_method().digest_size * 2, 16)
# ===== Building blocks of "responding" =====
def to_hashable(question):
return narrator.encode() + b'\0' + question.encode()
def to_number(hashable):
return int(hash_method(hashable).hexdigest(), 16) * hash_multiplier
def lookup_response(mass):
initial_mass = mass
mass = mass - 1e-9 # Compensate for rounding errors
for (response, weight) in responses:
if mass <= weight:
return response
mass = mass - weight
assert False, ('initial mass was {}, which is {} too much. '
'Bug in hash_multiplier?').format(initial_mass, mass)
def compute_response(question):
return lookup_response(to_number(to_hashable(question)))
# ===== Building blocks of interaction =====
def greet():
print('''{t.bold}Moderator{t.normal}:
You are now talking to {}.
Please note that they are quite busy, so only yes/no questions are allowed in
order to save bandwidth.
Apart from that, feel free to ask anything.
'''.format(narrator, t=Terminal()))
def not_a_question():
print('''{t.bold}Moderator{t.normal}:
That doesn't look like a yes/no question.
The answer might not make much sense.
'''.format(t=Terminal()))
def check_question(question):
good_prefix = ['am ', 'are ', 'is ', 'will ', 'can ', 'may ', 'could ',
'should ', 'shall ', 'ought ', 'would ', 'was ', 'do ',
'does ']
bad_start = not any([question.lower().startswith(p) for p in good_prefix])
bad_end = not question.endswith('?')
if bad_start or bad_end:
not_a_question()
def respond(question):
print('{t.bold}{}{t.normal}:\n{}\n'.format(narrator,
compute_response(question),
t=Terminal()))
def read_question():
# TODO: Ideally, only trigger on '?\n\n'
# TODO: Use login-name or similar instead of 'User'
prompt = '{t.bold}{}{t.normal}:\n'.format(username, t=Terminal())
question = ''
while not question.endswith('?'):
question = input(prompt)
prompt = ''
print()
return question
# ===== All together =====
def interview():
greet()
try:
while True:
q = read_question()
check_question(q)
respond(q)
except EOFError:
pass
if __name__ == '__main__':
from sys import argv
if len(argv) > 2:
print('Usage: ./narrate.py [narrator]'
'Example: ./narrate.py "<NAME>"')
exit
if len(argv) == 2:
narrator = argv[1]
interview()
|
StarcoderdataPython
|
3244223
|
<filename>tests/test_movablestand.py
import logging
import pytest
from ophyd.sim import make_fake_device
from pcdsdevices.movablestand import MovableStand
logger = logging.getLogger(__name__)
@pytest.fixture(scope='function')
def fake_stand():
FakeStand = make_fake_device(MovableStand)
stand = FakeStand('STAND:NAME', name='stand')
return stand
def test_movablestand_sanity(fake_stand):
logger.debug('test_movablestand_sanity')
with pytest.raises(NotImplementedError):
fake_stand.move('OUT')
|
StarcoderdataPython
|
3367795
|
<filename>pyheapdump/__main__.py
#
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 by <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
=====================
Pyheapdump.__main__
=====================
Debug heap dumps.
.. warning::
This is alpha quality code.
.. autofunction:: main
"""
from __future__ import absolute_import, print_function, unicode_literals, division
import argparse
import sys
import os
from pyheapdump import debug_dump
def main(argv=None):
"""Debug a Python heap dump file.
You can invoke this function using the following command::
python -m pyheapdump [OPTIONS] pyheapdump
Use the option '-h' to get help::
python -m pyheapdump -h
"""
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(description='debug a Python heap dump', prog=os.path.basename(sys.executable) + " -m pyheapdump")
parser.add_argument('--debugger', '-d', choices=['auto', 'pdb', 'pydevd'], default="auto", help="select the debugger, default is 'auto'")
parser.add_argument('--debugger-dir', help='pydevd only: path to the Python files of PyDev, usually <ECLIPSE_INSTALATION_DIR>/plugins/org.python.pydev_<VERSION>/pysrc/')
parser.add_argument('--host', help='pydevd only: the user may specify another host, if the debug server is not in the same machine')
parser.add_argument('--port', type=int, default=5678, help='pydevd only: specifies which port to use for communicating with the server. Default is port 5678')
parser.add_argument('--stdout', choices=['server', 'console'], default='server', help='pydevd only: pass the stdout to the debug server so that it is printed in its console or to this process console')
parser.add_argument('--stderr', choices=['server', 'console'], default='server', help='pydevd only: pass the stderr to the debug server so that it is printed in its console or to this process console')
parser.add_argument('--debug-pyheapdump', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('dumpfile', type=argparse.FileType(mode='rb'), help="the heap dump file")
namespace = parser.parse_args(argv)
if namespace.debug_pyheapdump:
# It is better to use remote debugging, because of the debugger specific code later on
sys.path.append(namespace.debugger_dir)
import pydevd # @UnresolvedImport
pydevd.settrace(stdoutToServer=True, stderrToServer=True, suspend=True, trace_only_current_thread=True)
return debug_dump(dumpfile=namespace.dumpfile, debugger_options=vars(namespace))
if __name__ == '__main__':
sys.exit(main())
|
StarcoderdataPython
|
151987
|
<gh_stars>0
import typing
import numpy as np
import numba as nb
@nb.njit((nb.i8[:], ), cache=True)
def next_permutation(a: np.ndarray) -> typing.NoReturn:
n = a.size
i = -1
for j in range(n - 1, 0, -1):
if a[j - 1] >= a[j]: continue
i = j - 1
break
if i == -1:
a[:] = -1
return
a[i + 1:] = a[i + 1:][::-1]
for j in range(i + 1, n):
if a[i] >= a[j]: continue
a[i], a[j] = a[j], a[i]
break
|
StarcoderdataPython
|
3250419
|
# coding: UTF-8
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class Config(object):
"""配置参数"""
def __init__(self):
self.model_name = 'TextRCNN'
self.embedding_pretrained = None # 预训练词向量
self.dropout = 1.0 # 随机失活
self.num_classes = 2 # 类别数
self.n_vocab = 0 # 词表大小,在运行时赋值
self.padding_idx = 0 # embedding层padding_idx
self.learning_rate = 1e-3 # 学习率
self.embed = self.embedding_pretrained.size(1)\
if self.embedding_pretrained is not None else 300 # 字向量维度, 若使用了预训练词向量,则维度统一
self.hidden_size = 256 # lstm隐藏层
self.num_layers = 1 # lstm层数
'''Recurrent Convolutional Neural Networks for Text Classification'''
class Model(nn.Module):
def __init__(self, config):
super(Model, self).__init__()
if config.embedding_pretrained is not None:
self.embedding = nn.Embedding.from_pretrained(config.embedding_pretrained, freeze=False)
else:
self.embedding = nn.Embedding(config.n_vocab, config.embed, padding_idx=config.padding_idx)
self.lstm = nn.LSTM(config.embed, config.hidden_size, config.num_layers,
bidirectional=True, batch_first=True, dropout=config.dropout)
self.fc = nn.Linear(config.hidden_size * 2 + config.embed, config.num_classes)
def forward(self, x):
embed = self.embedding(x) # [batch_size, seq_len, embeding]=[64, 32, 64]
out, _ = self.lstm(embed)
out = torch.cat((embed, out), 2)
out = F.relu(out)
out = out.permute(0, 2, 1)
out = F.max_pool1d(out, out.size(2)).squeeze(dim=2)
out = self.fc(out)
return out
|
StarcoderdataPython
|
1614778
|
<filename>isitfit/cost/base_reporter.py
# Related
# https://docs.datadoghq.com/integrations/amazon_redshift/
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/redshift.html#Redshift.Paginator.DescribeClusters
from termcolor import colored
import click
from isitfit.utils import logger
class ReporterBase:
def __init__(self):
self.emailTo = None
def postprocess(self, context_all):
raise Exception("To be implemented by derived class")
def display(self, context_all):
raise Exception("To be implemented by derived class")
def email(self, context_all):
"""
ctx - click context
"""
for fx in ['dataType', 'dataVal']:
if not fx in context_all:
raise Exception("Missing field from context: %s. This function should be implemented by the derived class"%fx)
# unpack
emailTo, ctx = context_all['emailTo'], context_all['click_ctx']
# prompt user for email if not requested
if not ctx.obj['skip_prompt_email']:
from isitfit.utils import PromptToEmailIfNotRequested
pte = PromptToEmailIfNotRequested()
emailTo = pte.prompt(emailTo)
# check if email requested
if emailTo is None:
return context_all
if len(emailTo)==0:
return context_all
from isitfit.emailMan import EmailMan
em = EmailMan(
dataType=context_all['dataType'], # ec2, not redshift
dataVal=context_all['dataVal'],
ctx=ctx
)
em.send(emailTo)
return context_all
|
StarcoderdataPython
|
6681586
|
# @lc app=leetcode id=17 lang=python3
#
# [17] Letter Combinations of a Phone Number
#
# https://leetcode.com/problems/letter-combinations-of-a-phone-number/description/
#
# algorithms
# Medium (49.52%)
# Likes: 5831
# Dislikes: 515
# Total Accepted: 815.5K
# Total Submissions: 1.6M
# Testcase Example: '"23"'
#
# Given a string containing digits from 2-9 inclusive, return all possible
# letter combinations that the number could represent. Return the answer in any
# order.
#
# A mapping of digit to letters (just like on the telephone buttons) is given
# below. Note that 1 does not map to any letters.
#
#
#
#
# Example 1:
#
#
# Input: digits = "23"
# Output: ["ad","ae","af","bd","be","bf","cd","ce","cf"]
#
#
# Example 2:
#
#
# Input: digits = ""
# Output: []
#
#
# Example 3:
#
#
# Input: digits = "2"
# Output: ["a","b","c"]
#
#
#
# Constraints:
#
#
# 0 <= digits.length <= 4
# digits[i] is a digit in the range ['2', '9'].
#
#
#
# @lc tags=string;backtracking
# @lc imports=start
from imports import *
# @lc imports=end
# @lc idea=start
#
# 九宫格键盘,给定按键的顺序,返回所有可能的字母组合。
# 递归,很简单的思想。
#
# @lc idea=end
# @lc group=
# @lc rank=
# @lc code=start
class Solution:
pads = ['', '', 'abc', 'def', 'ghi', 'jkl', 'mno', 'pqrs', 'tuv', 'wxyz']
def letterCombinations(self, digits: str) -> List[str]:
if len(digits) == 0:
return []
ls = list(self.pads[int(digits[0])])
rs = self.letterCombinations(digits[1:])
result = []
for l in ls:
for r in rs:
result.append(l + r)
return result if len(result) != 0 else ls
pass
# @lc code=end
# @lc main=start
if __name__ == '__main__':
print('Example 1:')
print('Input : ')
print('digits = "23"')
print('Output :')
print(str(Solution().letterCombinations("23")))
print('Exception :')
print('["ad","ae","af","bd","be","bf","cd","ce","cf"]')
print()
print('Example 2:')
print('Input : ')
print('digits = ""')
print('Output :')
print(str(Solution().letterCombinations("")))
print('Exception :')
print('[]')
print()
print('Example 3:')
print('Input : ')
print('digits = "2"')
print('Output :')
print(str(Solution().letterCombinations("2")))
print('Exception :')
print('["a","b","c"]')
print()
pass
# @lc main=end
|
StarcoderdataPython
|
248352
|
<reponame>magnusmel/Deep-Learning-Adventures-with-PyTorch<filename>Section 3/train.py
"""
Train and test a simple RNN for language detection.
Inspired by
https://pytorch.org/tutorials/intermediate/char_rnn_classification_tutorial.html
"""
import torch
torch.manual_seed(2)
import torch.nn as nn
from prep import get_data, get_data_test, all_categories
import time
import math
import random
import string
all_letters = string.ascii_letters + " .,;'-"
n_letters = len(all_letters)
n_categories = len(all_categories)
class RNN(nn.Module):
def __init__(self, n_letters, n_categories, hidden_size=56):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.i2h = nn.Linear(n_letters + hidden_size, hidden_size)
self.i2o = nn.Linear(n_letters + hidden_size, n_categories)
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, input, hidden):
combined = torch.cat((input, hidden), 1)
hidden = self.i2h(combined)
output = self.i2o(combined)
output = self.softmax(output)
return output, hidden
def initHidden(self):
return torch.zeros(1, self.hidden_size)
def wtotensor(word):
"""
Encode a word as a tensor using a standard alphabet (defined in all_letters)
For example:
Give our alphabet:
abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ .,;'-
Each lettter has a uniqur position:
0 -> a
1 -> b
etc.
15 -> o
So, if we want to encode the word 'oro' we will encode each letter
by including 1 in it's position and left the other positions as 0:
oro->
o is in 15th position in the alphabet--V
tensor([[[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
r in 18th->1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.]],
and again o is in 15th position in the alphabet--V
[[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.]]])
"""
tensor = torch.zeros(len(word), 1, n_letters)
for li, letter in enumerate(word):
tensor[li][0][all_letters.find(letter)] = 1
return tensor
def random_value(d):
"""
Get the random value from dictonary d.
We use this function both to get the random
language/category as well as a word.
"""
return d[random.randint(0, len(d) - 1)]
def get_tensorw(all_categories, words, category=None, word=None):
"""
Get a random category and word, return tensors for both.
If category and word is specified just turn them into tensors
and return.
"""
if category is None and word is None:
category = random_value(all_categories)
word = random_value(words[category])
category_tensor = torch.LongTensor([all_categories.index(category)])
word_tensor = wtotensor(word)
return category, word, category_tensor, word_tensor
def get_category(output, categories):
"""
Return the most probable category/language
from output tensor.
"""
top_n, top_i = output.data.topk(1)
category_i = top_i[0][0]
return categories[category_i], category_i
def train(rnn, optimizer, loss_function, w_epochs, categories, words):
"""
Train rmm model using optimizer, loss_function on w_epochs words
based on categories with words.
"""
print('Starting training...')
current_loss=0
wordi=0
stats_total={}.fromkeys(categories, 0)
for w_epoch in range(1, w_epochs + 1):
wordi+=1
# Get random data for training.
category, word, category_tensor, word_tensor = get_tensorw(categories, words)
stats_total[category]+=1
# We need to initalize our
# hidden variable first.
hidden = rnn.initHidden()
# Forward pass: predict a language for each character
# in a word.
for i in range(word_tensor.size()[0]):
output, hidden = rnn(word_tensor[i], hidden)
# Calculate the difference between
# what we've predicted and what we should
# predict.
loss = loss_function(output, category_tensor)
# Because changes('gradients') are accumulated
# from one iteration to another we need to
# clean up the last ones, so we can propagate
# the ones from this iteration.
# Note: always call it before
# loss.backward() and optimizer.step()
optimizer.zero_grad()
# Backward pass: accumulate changes('gradients')
# that we've learned about in this iteration.
loss.backward()
# Backward pass: propagate changes trough the network.
optimizer.step()
loss=loss.data.item()
current_loss += loss
# Print progress every now and then.
if wordi % 1000 == 0:
guess, _ = get_category(output, categories)
if guess == category:
msg = 'V'
else:
msg='X (%s)' % category
print('%d %d%% %s %s %s %f' % (w_epoch, w_epoch / w_epochs * 100,
word.ljust(20), guess, msg.ljust(8), current_loss / 1000))
current_loss=0.0
print('Fnished training on %d words' % wordi)
for c in categories:
print('Trained on %d words for %s' % (stats_total[c], c))
def test(rnn, optimizer, categories, test_words):
"""
Test data on all test dataset, calculate how
much images have been classified correctly.
We testing the model in a similar way that we do
training, but we're going trough test set word by word
(not randomly like in training).
We're counting the total number of words for each language
and also a number of words that were detected correctly.
"""
stats_correct={}.fromkeys(categories, 0)
stats_total={}.fromkeys(categories, 0)
print('Starting testing...')
with torch.no_grad():
for cat in categories:
for w in test_words[cat]:
_, _, category_tensor, word_tensor = get_tensorw(categories, test_words, cat, w)
hidden = rnn.initHidden()
for i in range(word_tensor.size()[0]):
output, hidden = rnn(word_tensor[i], hidden)
guess, _ = get_category(output, categories)
stats_total[cat]+=1
if (guess == cat):
stats_correct[cat]+=1
for c in categories:
print('Test accuracy for %s on %d (%d correct) words:%d %%' % (c, stats_total[c], stats_correct[c], 100 * stats_correct[c] / stats_total[c]))
if __name__ == '__main__':
# Initialize our language detector
rnn = RNN(n_letters, n_categories)
# Initialize optimizer
optimizer = torch.optim.Adam(rnn.parameters())
# Initialize our loss function
loss_function = nn.CrossEntropyLoss()
# Get training data
print('Getting training data...')
categories, train_words=get_data()
# Train using 10000 words choosen randomly for
# each language, in general we get around 50% words
# for each language.
train(rnn, optimizer, loss_function, 10000, categories, train_words)
# Get test data, don't include words from training set.
print('Getting test data...')
test_categories, test_words=get_data_test(
exclude_words=[ train_words[c] for c in all_categories ])
# Test our model on totally fresh and unique list of words.
test(rnn, optimizer, test_categories, test_words)
# Save our model,so we can use it for detection.
torch.save(rnn.state_dict(), 'model.ckpt')
|
StarcoderdataPython
|
6408518
|
<reponame>stjordanis/datar
"""Chop and unchop
https://github.com/tidyverse/tidyr/blob/master/R/chop.R
"""
from collections import defaultdict
from typing import Iterable, List, Mapping, Tuple, Union
import numpy
import pandas
from pandas import DataFrame, Series
from pipda import register_verb
from pipda.utils import CallingEnvs
from ..core.types import IntOrIter, StringOrIter, Dtype, is_scalar
from ..core.utils import (
df_getitem,
df_setitem,
vars_select,
copy_attrs,
apply_dtypes,
keep_column_order,
reconstruct_tibble,
)
from ..core.exceptions import ColumnNotExistingError
from ..core.contexts import Context
from ..core.grouped import DataFrameGroupBy
from ..base import union, NA
from ..dplyr import bind_cols, group_by, arrange, group_data
from .drop_na import drop_na
@register_verb(DataFrame, context=Context.SELECT)
def chop(
data: DataFrame,
cols: Union[IntOrIter, StringOrIter] = None,
base0_: bool = None,
) -> DataFrame:
"""Makes data frame shorter by converting rows within each group
into list-columns.
Args:
data: A data frame
cols: Columns to chop
base0_: Whether `cols` are 0-based
if not provided, will use `datar.base.get_option('index.base.0')`
Returns:
Data frame with selected columns chopped
"""
if cols is None:
return data.copy()
all_columns = data.columns
cols = vars_select(all_columns, cols, base0=base0_)
cols = all_columns[cols]
# when cols is empty
# order may change for all_columns.difference([])
key_cols = all_columns.difference(cols) if len(cols) > 0 else all_columns
vals = data[cols]
keys = data[key_cols]
compacted = []
if data.shape[0] == 0:
split_key = keys
else:
split = _vec_split(vals, keys)
try:
split_key = df_getitem(split, "key")
except (KeyError, ColumnNotExistingError):
split_key = None
split_val = df_getitem(split, "val")
for val in split_val:
compacted.append(_compact_df(val))
if not compacted:
vals = DataFrame(columns=cols)
else:
vals = pandas.concat(compacted, ignore_index=True)
out = bind_cols(split_key, vals, __calling_env=CallingEnvs.REGULAR)
return reconstruct_tibble(data, out, keep_rowwise=True)
@register_verb(DataFrame, context=Context.SELECT)
def unchop(
data: DataFrame,
cols: Union[IntOrIter, StringOrIter] = None,
keep_empty: bool = False,
ptype: Union[Dtype, Mapping[str, Dtype]] = None,
base0_: bool = None,
) -> DataFrame:
"""Makes df longer by expanding list-columns so that each element
of the list-column gets its own row in the output.
See https://tidyr.tidyverse.org/reference/chop.html
Recycling size-1 elements might be different from `tidyr`
>>> df = tibble(x=[1, [2,3]], y=[[2,3], 1])
>>> df >> unchop([f.x, f.y])
>>> # tibble(x=[1,2,3], y=[2,3,1])
>>> # instead of following in tidyr
>>> # tibble(x=[1,1,2,3], y=[2,3,1,1])
Args:
data: A data frame.
cols: Columns to unchop.
keep_empty: By default, you get one row of output for each element
of the list your unchopping/unnesting.
This means that if there's a size-0 element
(like NULL or an empty data frame), that entire row will be
dropped from the output.
If you want to preserve all rows, use `keep_empty` = `True` to
replace size-0 elements with a single row of missing values.
ptype: Providing the dtypes for the output columns.
Could be a single dtype, which will be applied to all columns, or
a dictionary of dtypes with keys for the columns and values the
dtypes.
For nested data frames, we need to specify `col$a` as key. If `col`
is used as key, all columns of the nested data frames will be casted
into that dtype.
base0_: Whether `cols` are 0-based
if not provided, will use `datar.base.get_option('index.base.0')`
Returns:
A data frame with selected columns unchopped.
"""
all_columns = data.columns
cols = vars_select(all_columns, cols, base0=base0_)
if len(cols) == 0 or data.shape[0] == 0:
return (
data.copy(copy_grouped=True)
if isinstance(data, DataFrameGroupBy)
else data.copy()
)
cols = all_columns[cols]
key_cols = all_columns.difference(cols).tolist()
out = _unchopping(data, cols, key_cols, keep_empty)
apply_dtypes(out, ptype)
return reconstruct_tibble(data, out, keep_rowwise=True)
def _vec_split(
x: Union[DataFrame, Series], by: Union[DataFrame, Series]
) -> DataFrame:
"""Split a vector into groups
Returns a data frame with columns `key` and `val`. `key` is a stacked column
with data from by.
"""
if isinstance(x, Series): # pragma: no cover, always a data frame?
x = x.to_frame()
if isinstance(by, Series): # pragma: no cover, always a data frame?
by = by.to_frame()
df = bind_cols(x, by, __calling_env=CallingEnvs.REGULAR)
if df.shape[0] == 0:
return DataFrame(columns=["key", "val"])
df = group_by(df, *by.columns, __calling_env=CallingEnvs.REGULAR)
gdata = group_data(df, __calling_env=CallingEnvs.REGULAR)
gdata = arrange(gdata, gdata._rows, __calling_env=CallingEnvs.REGULAR)
out = DataFrame(index=gdata.index)
out = df_setitem(out, "key", gdata[by.columns])
return df_setitem(
out,
"val",
[x.iloc[rows, :].reset_index(drop=True) for rows in gdata._rows],
)
def _compact_df(data: DataFrame) -> DataFrame:
"""Compact each series as list in a data frame"""
out = DataFrame(index=[0], columns=data.columns)
for col in data.columns:
out.loc[0, col] = data[col].values.tolist()
return out
def _unchopping(
data: DataFrame,
data_cols: Iterable[str],
key_cols: Iterable[str],
keep_empty: bool,
) -> DataFrame:
"""Unchop the data frame
See https://stackoverflow.com/questions/53218931/how-to-unnest-explode-a-column-in-a-pandas-dataframe
""" # noqa: E501
# key_cols could be empty
rsize = None
val_data = {}
for dcol in data_cols:
# check dtype first so that we don't need to check
# other types of columns element by element
is_df_col = data_cols.dtype == object and all(
# it's either null or a dataframe
(is_scalar(val) and pandas.isnull(val))
or isinstance(val, DataFrame)
for val in data[dcol]
)
if is_df_col:
vdata, sizes, dtypes = _unchopping_df_column(data[dcol])
else:
vdata, sizes, dtypes = _unchopping_nondf_column(data[dcol])
val_data.update(vdata)
if rsize is None:
rsize = sizes
else:
tmpsize = []
for prevsize, cursize in zip(rsize, sizes):
if prevsize != cursize and 1 not in (prevsize, cursize):
raise ValueError(
f"Incompatible lengths: {prevsize}, {cursize}."
)
tmpsize.append(max(prevsize, cursize))
rsize = tmpsize
key_data = {key: numpy.repeat(data[key].values, rsize) for key in key_cols}
key_data.update(val_data)
# DataFrame(key_data) may have nested dfs
# say y$a, then ['y'] will not select it
out = keep_column_order(DataFrame(key_data), data.columns)
if not keep_empty:
out = drop_na(
out,
*val_data,
how_="all",
__calling_env=CallingEnvs.REGULAR,
)
apply_dtypes(out, dtypes)
copy_attrs(out, data)
return out
def _unchopping_df_column(
series: Series,
) -> Tuple[Mapping[str, List], List[int], Mapping[str, Dtype]]:
"""Unchopping dataframe column"""
# Get union column names
union_cols = []
# try to keep the same dtype
dtypes = None
for val in series:
if isinstance(val, DataFrame):
union_cols = union(
union_cols,
val.columns,
__calling_env=CallingEnvs.REGULAR,
)
if dtypes is None:
dtypes = {col: val[col].dtype for col in val}
else:
for col in val:
# TODO: test
if col in dtypes and dtypes[col] != val[col].dtype:
del dtypes[col] # pragma: no cover
sizes = []
val_data = defaultdict(list)
# add missing columns to each df
for val in series:
if isinstance(val, DataFrame):
for col in union_cols:
val_data[f"{series.name}${col}"].extend(
val[col] if col in val else [NA] * val.shape[0]
)
sizes.append(val.shape[0])
else: # null
for col in union_cols:
val_data[f"{series.name}${col}"].append(NA)
sizes.append(1)
return val_data, sizes, dtypes
def _unchopping_nondf_column(
series: Series,
) -> Tuple[Mapping[str, List], List[int], Mapping[str, Dtype]]:
"""Unchopping non-dataframe column"""
val_data = {}
vals = [[val] if is_scalar(val) else val for val in series]
val_data[series.name] = Series(
numpy.concatenate(
vals,
axis=None,
# casting="no" # only for numpy 1.20.0+
),
dtype=series.dtype,
)
return val_data, [len(val) for val in vals], {}
|
StarcoderdataPython
|
8132072
|
<gh_stars>1-10
#!/usr/bin/env python
import rospy
import tf
import tf.transformations as tr
from std_msgs.msg import String, Header, ColorRGBA
from nav_msgs.msg import OccupancyGrid, MapMetaData, Odometry
from geometry_msgs.msg import Twist, PoseStamped, Point
from sensor_msgs.msg import LaserScan
from visualization_msgs.msg import Marker, MarkerArray
from math import sqrt, cos, sin, pi, atan2
from threading import Thread, Lock
from math import pi, log, exp
import random
import numpy as np
import sys
import pickle
class Particle(object):
def __init__(self, id, x,y, theta):
self.x = x
self.y = y
self.id = id
self.theta = theta
class ParticleFilter(object):
def __init__(self, num_particles, occ_grid_map, xmin, xmax, ymin, ymax,
laser_min_range, laser_max_range, laser_min_angle, laser_max_angle,
dynamics_translation_noise_std_dev,
dynamics_orientation_noise_std_dev,
beam_range_measurement_noise_std_dev):
self.num_particles = num_particles
self.ogm = occ_grid_map
self.grid_map = np.array(self.ogm.data, dtype='int8')
self.grid_map = self.grid_map.reshape((self.ogm.info.height, self.ogm.info.width))
self.grid_bin = (self.grid_map == 0).astype('uint8') # Cell is True iff probability of being occupied is zero
# Workspace boundaries
self.xmax = xmax
self.xmin = xmin
self.ymin = ymin
self.ymax = ymax
self.laser_max_angle = laser_max_angle
self.laser_min_angle = laser_min_angle
self.laser_max_range = laser_max_range
self.laser_min_range = laser_min_range
# Std deviation of noise affecting translation in the dynamics model for particles
self.dynamics_translation_noise_std_dev = dynamics_translation_noise_std_dev
# Std deviation of noise affecting orientation in the dynamics model for particles
self.dynamics_orientation_noise_std_dev = dynamics_orientation_noise_std_dev
# Std deviation of noise affecting measured range from the laser measurement model
self.beam_range_measurement_noise_std_dev = beam_range_measurement_noise_std_dev
# Number of laser beams to simulate when predicting what a
# particle's measurement is going to be
self.eval_beams = 32
# Previous odometry measurement of the robot
self.last_robot_odom = None
# Current odometry measurement of the robot
self.robot_odom = None
# Relative motion since the last time particles were updated
self.dx = 0
self.dy = 0
self.dyaw = 0
self.particles = []
self.weights = []
def get_random_free_state(self):
while True:
# Note: we initialize particles closer to the robot's initial
# position in order to make the initialization easier
xrand = np.random.uniform(self.xmin*0.2, self.xmax*0.2)
yrand = np.random.uniform(self.ymin*0.2, self.ymax*0.2)
row, col = self.metric_to_grid_coords(xrand, yrand)
if self.grid_bin[row, col]:
theta = np.random.uniform(0, 2*pi)
return xrand, yrand, theta
def init_particles(self):
"""Initializes particles uniformly randomly with map frame coordinates,
within the boundaries set by xmin,xmax, ymin,ymax"""
for i in xrange(self.num_particles):
xrand, yrand, theta = self.get_random_free_state()
# Note: same orientation as the initial orientation of the robot
# to make initialization easier
self.particles.append(Particle(i, xrand, yrand, 0))
def handle_observation(self, laser_scan, dt):
"""Does prediction, weight update, and resampling."""
# TODO: for every particle
# 1) Predict its relative motion since the last time an observation was received using
# predict_particle_odometry().
# 2) Compute the squared norm of the difference between the particle's predicted laser scan
# and the actual laser scan
# TODO: exponentiate the prediction errors you computed above
# using numerical stability tricks such as
# http://timvieira.github.io/blog/post/2014/02/11/exp-normalize-trick/
# if you think it is necessary
errors = []
#self.weights = [0] * len(self.particles)
for particle in self.particles:
self.predict_particle_odometry(particle)
#for each particle, compute the laser scan difference
error = self.get_prediction_error_squared(laser_scan, particle)
#sig_error = self.sigmoid(error)
errors.append(error)
#self.weights.append(exp(-error))
self.weights = [exp(-error) for error in errors]
weight_sum = sum(self.weights)
N_eff = 0
#N_eff = sum([1 / ((weight / weight_sum) ** 2) for weight in self.weights])
#print "effective sample size", N_eff
# Do resampling. Depending on how you implement it you might
# or might not need to normalize your weights by their sum, so
# they are treated as probabilities
# particle deprivation:
# approach 1: calculate sample size by variance
# get the partice id among the survived particles after resampling
new_particles = []
self.resample(new_particles)
#particle_indexes = self.resample(new_particles)
#remove duplicates
"""
N_eff_id = set(particle_indexes)
print "N_eff_id",[id for id in N_eff_id]
#effective sample size based on particle's contribution
N_eff_size = len(N_eff_id)
#print "N_eff_size", N_eff_size
"""
# approach 2: calculate the effective sample size by weight
sig_weight = [self.sigmoid(error) for error in errors]
N_eff_weight = sum([1 / (weight ** 2) for weight in sig_weight])
#print "N_eff_weight", N_eff_weight
N_eff = N_eff_weight
#N_eff = N_eff_size
# address particle deprivation
# 1. resample only when N_eff > N_thresh
if N_eff > 50:
self.particles = new_particles
#print [particle.id for particle in self.particles]
#print "weight", self.weights
def divide_up(self, id, particle, num, particle_list):
for i in xrange(int(num)):
xrand = np.random.uniform(particle.x*-0.5, particle.x*0.5)
yrand = np.random.uniform(particle.y*-0.5, particle.y*0.5)
theta = np.random.uniform(particle.theta*-0.5, particle.theta*0.5)
particle_list.append(Particle(id, xrand, yrand, theta))
id += 1
def sigmoid(self, x):
"""Numerically-stable sigmoid function."""
if x >= 0:
z = exp(-x)
return 1 / (1 + z)
else:
# if x is less than zero then z will be small, denom can't be
# zero because it's 1+z.
z = exp(x)
return z / (1 + z)
def resample(self, new_particles):
# TODO: sample particle i with probability that
# is proportional to its weight w_i. Sampling
# can be done with repetition/replacement, so
# you can sample the same particle more than once.
#particle_indexes = []
sample_u = np.random.uniform(0,1)
index = int(sample_u * (self.num_particles - 1))
beta = 0.0
if self.weights == []:
self.weights = [1] * self.num_particles
print self.weights
max_w = max(self.weights)
#print "max_w", max_w
#print "weight", self.weights
for particle in self.particles:
beta += np.random.uniform(0,1) * 2.0 * max_w
while beta > self.weights[index]:
beta -= self.weights[index]
index = (index + 1) % self.num_particles
particle = self.particles[index]
#particle_indexes.append(particle.id)
new_particles.append(Particle(particle.id, particle.x, particle.y, particle.theta))
#self.particles = new_particles
#return particle_indexes
def simulate_laser_scan_for_particle(self, x, y, yaw_in_map, angles, min_range, max_range):
"""If the robot was at the given particle, what would its laser scan
be (in the known map)? Returns the predicted laser ranges if a particle with state (x,y,yaw_in_map)
is to scan along relative angles in angles."""
# for every relative angle in angles
# 1. The absolute angle based on the robot's orientation is computed
# 2. Ray tracing from (x,y) along the abosulte angle using step size range_step is done
# (a) If the currently examined point is within the bounds of the workspace
# stop if it meets an obstacle or if it reaches max_range
# (b) If the currently examined point is outside the bounds of the workspace
# stop if it reaches max_range
# 3. The computed collection of ranges corresponding to the given angles is returned
ranges = []
range_step = self.ogm.info.resolution
for angle in angles:
phi = yaw_in_map + angle
r = min_range
while r <= max_range:
xm = x + r*cos(phi)
ym = y + r*sin(phi)
if xm > self.xmax or xm < self.xmin:
break
if ym > self.ymax or ym < self.ymin:
break
row, col = self.metric_to_grid_coords(xm, ym)
free = self.grid_bin[row, col].all()
if not free:
break
r += range_step
ranges.append(r)
return ranges
def subsample_laser_scan(self, laser_scan_msg):
"""Subsamples a set number of beams (self.eval_beams) from the incoming actual laser scan. It also
converts the Inf range measurements into max_range range measurements, in order to be able to
compute a difference."""
# To convert the laser points from the husky_1/base_laser frame, whose z-axis points downwards
# to the same frame pointing upwards
N = len(laser_scan_msg.ranges)
ranges_in_upwards_baselaser_frame = laser_scan_msg.ranges
angles_in_baselaser_frame = [(laser_scan_msg.angle_max - laser_scan_msg.angle_min)*float(i)/N + laser_scan_msg.angle_min for i in xrange(N)]
step = N/self.eval_beams
angles_in_upwards_baselaser_frame = angles_in_baselaser_frame[::step]
ranges_in_upwards_baselaser_frame = ranges_in_upwards_baselaser_frame[::-step]
assert (len(ranges_in_upwards_baselaser_frame) == len(angles_in_upwards_baselaser_frame))
actual_ranges = []
for r in ranges_in_upwards_baselaser_frame:
if r >= self.laser_min_range and r <= self.laser_max_range:
actual_ranges.append(r)
if r < self.laser_min_range:
actual_ranges.append(self.laser_min_range)
if r > self.laser_max_range:
actual_ranges.append(self.laser_max_range)
return actual_ranges, angles_in_upwards_baselaser_frame
def get_prediction_error_squared(self, laser_scan_msg, particle):
"""
This function evaluates the squared norm of the difference/error between the
scan in laser_scan_msg and the one that was predicted by the given particle.
Assume that the bearing of each beam relative to the robot's orientation has zero noise,
so the only noise in the measurement comes from the range of each beam and is
distributed as N(0, beam_range_measurement_std_dev^2)
"""
# If the particle is out of the bounds of the workspace
# give it a large error
if particle.x < self.xmin or particle.x > self.xmax:
return 300
if particle.y < self.ymin or particle.y > self.ymax:
return 300
# If the particle falls inside an obstacle
# give it a large error
row, col = self.metric_to_grid_coords(particle.x, particle.y)
if row >= 201 or col >=201:
return 300
if not self.grid_bin[row, col]:
return 300
assert (self.laser_min_range >= 0)
assert (self.laser_max_range > 0)
# TODO: subsample the recived actual laser scan using the
# subsample_laser_scan method above
# actual ranges and angles
[actual_ranges, angles] = self.subsample_laser_scan(laser_scan_msg)
min_range = min(actual_ranges)
max_range = max(actual_ranges)
#print "min_range", min_range
#print "max_range", max_range
# TODO: simulate a laser scan using one of the methods of this class
predict_ranges = self.simulate_laser_scan_for_particle(particle.x, particle.y, particle.theta, angles, self.laser_min_range, self.laser_max_range)
#predict_ranges = self.simulate_laser_scan_for_particle(particle.x, particle.y, particle.theta, angles, min_range, max_range)
# TODO: compute the difference bwteen predicted ranges and actual ranges
diff = [actual_range - predict_range for actual_range, predict_range in zip(actual_ranges, predict_ranges)]
#print "diff", diff
# Take the squared norm of that difference
norm_error = 0
norm_error = np.linalg.norm(diff)
print "norm_error", norm_error
return norm_error**2
def handle_odometry(self, robot_odom):
"""Compute the relative motion of the robot from the previous odometry measurement
to the current odometry measurement."""
self.last_robot_odom = self.robot_odom
self.robot_odom = robot_odom
if self.last_robot_odom:
p_map_currbaselink = np.array([self.robot_odom.pose.pose.position.x,
self.robot_odom.pose.pose.position.y,
self.robot_odom.pose.pose.position.z])
p_map_lastbaselink = np.array([self.last_robot_odom.pose.pose.position.x,
self.last_robot_odom.pose.pose.position.y,
self.last_robot_odom.pose.pose.position.z])
q_map_lastbaselink = np.array([self.last_robot_odom.pose.pose.orientation.x,
self.last_robot_odom.pose.pose.orientation.y,
self.last_robot_odom.pose.pose.orientation.z,
self.last_robot_odom.pose.pose.orientation.w])
q_map_currbaselink = np.array([self.robot_odom.pose.pose.orientation.x,
self.robot_odom.pose.pose.orientation.y,
self.robot_odom.pose.pose.orientation.z,
self.robot_odom.pose.pose.orientation.w])
R_map_lastbaselink = tr.quaternion_matrix(q_map_lastbaselink)[0:3,0:3]
p_lastbaselink_currbaselink = R_map_lastbaselink.transpose().dot(p_map_currbaselink - p_map_lastbaselink)
q_lastbaselink_currbaselink = tr.quaternion_multiply(tr.quaternion_inverse(q_map_lastbaselink), q_map_currbaselink)
_, _, yaw_diff = tr.euler_from_quaternion(q_lastbaselink_currbaselink)
self.dyaw += yaw_diff
self.dx += p_lastbaselink_currbaselink[0]
self.dy += p_lastbaselink_currbaselink[1]
def predict_particle_odometry(self, particle):
"""
Where will the particle go after time dt passes?
This function modifies the particle's state by simulating the effects
of the given control forward in time.
Assume Dubins dynamics with variable forward velocity for the Husky.
"""
nx = random.gauss(0, self.dynamics_translation_noise_std_dev)
ny = random.gauss(0, self.dynamics_translation_noise_std_dev)
ntheta = random.gauss(0, self.dynamics_orientation_noise_std_dev)
v = sqrt(self.dx**2 + self.dy**2)
# Don't let the particle propagation be dominated by noise
if abs(v) < 1e-10 and abs(self.dyaw) < 1e-5:
return
particle.x += v * cos(particle.theta) + nx
particle.y += v * sin(particle.theta) + ny
particle.theta += self.dyaw + ntheta
def metric_to_grid_coords(self, x, y):
"""Converts metric coordinates to occupancy grid coordinates"""
gx = (x - self.ogm.info.origin.position.x) / self.ogm.info.resolution
gy = (y - self.ogm.info.origin.position.y) / self.ogm.info.resolution
row = min(max(int(gy), 0), self.ogm.info.height)
col = min(max(int(gx), 0), self.ogm.info.width)
return (row, col)
class MonteCarloLocalization(object):
def __init__(self, num_particles, xmin, xmax, ymin, ymax):
rospy.init_node('monte_carlo_localization', anonymous=True)
self.map_file = rospy.get_param("~map_file")
dynamics_translation_noise_std_dev = rospy.get_param("~dynamics_translation_noise_std_dev")
dynamics_orientation_noise_std_dev = rospy.get_param("~dynamics_orientation_noise_std_dev")
beam_range_measurement_noise_std_dev = rospy.get_param("~beam_range_measurement_noise_std_dev")
pkl_file = open(self.map_file, 'rb')
self.ogm = pickle.load(pkl_file)
pkl_file.close()
self.q_baselink_baselaser = np.array([1.0, 0, 0, 0])
self.R_baselink_baselaser = tr.quaternion_matrix(self.q_baselink_baselaser)[0:3,0:3]
self.p_baselink_baselaser = np.array([0.337, 0.0, 0.308])
self.pf = ParticleFilter(num_particles, self.ogm, xmin, xmax, ymin, ymax, 0, 0, 0, 0,
dynamics_translation_noise_std_dev,
dynamics_orientation_noise_std_dev,
beam_range_measurement_noise_std_dev)
self.pf.init_particles()
self.last_scan = None
self.mutex = Lock()
self.laser_points_marker_pub = rospy.Publisher('/husky_1/debug/laser_points', Marker, queue_size=1)
self.particles_pub = rospy.Publisher('/husky_1/particle_filter/particles', MarkerArray, queue_size=1)
self.odom_sub = rospy.Subscriber('/husky_1/odometry/ground_truth', Odometry, self.odometry_callback, queue_size=1)
self.laser_sub = rospy.Subscriber('/husky_1/scan', LaserScan, self.laser_scan_callback, queue_size=1)
def odometry_callback(self, msg):
self.mutex.acquire()
self.pf.handle_odometry(msg)
self.mutex.release()
def get_2d_laser_points_marker(self, timestamp, frame_id, pts_in_map, marker_id, rgba):
msg = Marker()
msg.header.stamp = timestamp
msg.header.frame_id = frame_id
msg.ns = 'laser_points'
msg.id = marker_id
msg.type = 6
msg.action = 0
msg.points = [Point(pt[0], pt[1], pt[2]) for pt in pts_in_map]
msg.colors = [rgba for pt in pts_in_map]
for pt in pts_in_map:
assert((not np.isnan(pt).any()) and np.isfinite(pt).all())
msg.scale.x = 0.1
msg.scale.y = 0.1
msg.scale.z = 0.1
return msg
def laser_scan_callback(self, msg):
self.pf.laser_min_angle = msg.angle_min
self.pf.laser_max_angle = msg.angle_max
self.pf.laser_min_range = msg.range_min
self.pf.laser_max_range = msg.range_max
dt_since_last_scan = 0
if self.last_scan:
dt_since_last_scan = (msg.header.stamp - self.last_scan.header.stamp).to_sec()
self.mutex.acquire()
self.publish_laser_pts(msg)
self.pf.handle_observation(msg, dt_since_last_scan)
self.pf.dx = 0
self.pf.dy = 0
self.pf.dyaw = 0
self.mutex.release()
self.last_scan = msg
def publish_laser_pts(self, msg):
"""Publishes the currently received laser scan points from the robot, after we subsampled
them in order to comparse them with the expected laser scan from each particle."""
if self.pf.robot_odom is None:
return
subsampled_ranges, subsampled_angles = self.pf.subsample_laser_scan(msg)
N = len(subsampled_ranges)
x = self.pf.robot_odom.pose.pose.position.x
y = self.pf.robot_odom.pose.pose.position.y
_, _ , yaw_in_map = tr.euler_from_quaternion(np.array([self.pf.robot_odom.pose.pose.orientation.x,
self.pf.robot_odom.pose.pose.orientation.y,
self.pf.robot_odom.pose.pose.orientation.z,
self.pf.robot_odom.pose.pose.orientation.w]))
pts_in_map = [ (x + r*cos(theta + yaw_in_map),
y + r*sin(theta + yaw_in_map),
0.3) for r,theta in zip(subsampled_ranges, subsampled_angles)]
lpmarker = self.get_2d_laser_points_marker(msg.header.stamp, 'map', pts_in_map, 30000, ColorRGBA(1.0, 0.0, 0, 1.0))
self.laser_points_marker_pub.publish(lpmarker)
def get_particle_marker(self, timestamp, particle, marker_id):
"""Returns an rviz marker that visualizes a single particle"""
msg = Marker()
msg.header.stamp = timestamp
msg.header.frame_id = 'map'
msg.ns = 'particles'
msg.id = marker_id
msg.type = 0 # arrow
msg.action = 0 # add/modify
msg.lifetime = rospy.Duration(1)
yaw_in_map = particle.theta
vx = cos(yaw_in_map)
vy = sin(yaw_in_map)
msg.color = ColorRGBA(0, 1.0, 0, 1.0)
msg.points.append(Point(particle.x, particle.y, 0.2))
msg.points.append(Point(particle.x + 0.3*vx, particle.y + 0.3*vy, 0.2))
msg.scale.x = 0.05
msg.scale.y = 0.15
msg.scale.z = 0.1
return msg
def publish_particle_markers(self):
""" Publishes the particles of the particle filter in rviz"""
ma = MarkerArray()
ts = rospy.Time.now()
for i in xrange(len(self.pf.particles)):
ma.markers.append(self.get_particle_marker(ts, self.pf.particles[i], i))
self.particles_pub.publish(ma)
def run(self):
rate = rospy.Rate(20)
while not rospy.is_shutdown():
self.publish_particle_markers()
rate.sleep()
if __name__ == '__main__':
num_particles = 50
# Workspace boundaries in meters
xmin = -20
xmax = 20
ymin = -20
ymax = 20
mcl = MonteCarloLocalization(num_particles, xmin, xmax, ymin, ymax)
mcl.run()
|
StarcoderdataPython
|
1603285
|
from gym.envs.registration import register
register(
id='GridDrawBw-v0',
entry_point='grid_draw.envs:GridDrawBwEnv',
)
register(
id='GridDrawRgb-v0',
entry_point='grid_draw.envs:GridDrawRgbEnv',
)
|
StarcoderdataPython
|
1981298
|
class UnionFindNode:
"""Nodo di una struttura dati union-find."""
def __init__(self, e):
self.elem = e
self.father = None
self.sons = []
class UnionFindQuickFind:
"""Rappresenta una collezione di alberi QuickFind."""
def __init__(self):
self.nodes = [] # lista contenente tutti i nodi creati
def makeset(self, e):
"""Crea un nuovo albero.
L'albero sara' composto dal nodo contenente l'elemento passato come
parametro piu' un nodo radice avente medesimo nome.
"""
root = UnionFindNode(e)
node = UnionFindNode(e)
root.sons.append(node)
node.father = root
self.nodes.append(node)
def find(self, node):
return node.father.elem
def union(self, rootA, rootB):
if rootA == rootB: # si vuole fondere lo stesso albero!
return
for sonB in rootB.sons:
rootA.sons.append(sonB)
sonB.father = rootA
def main():
uf = UnionFindQuickFind()
for i in range(10):
print("makeset(" + str(i) + ")")
uf.makeset(i)
for i in range(10):
print("find(" + str(i) + ")= " + str(uf.find(uf.nodes[i])))
print("union effettuata sull'albero contenete il nodo 0 e l'albero"
" contentente il nodo 2")
uf.union(uf.nodes[0].father, uf.nodes[2].father)
for i in range(10):
print("find(" + str(i) + ")= " + str(uf.find(uf.nodes[i])))
print("union effettuata sull'albero contenete il nodo 8 e l'albero"
" contentente il nodo 4")
uf.union(uf.nodes[8].father, uf.nodes[4].father)
for i in range(10):
print("find(" + str(i) + ")= " + str(uf.find(uf.nodes[i])))
print("union effettuata sull'albero contenete il nodo 0 e l'albero"
" contentente il nodo 8")
uf.union(uf.nodes[0].father, uf.nodes[8].father)
for i in range(10):
print("find(" + str(i) + ")= " + str(uf.find(uf.nodes[i])))
print("union effettuata sull'albero contenete il nodo 5 e l'albero"
" contentente il nodo 8")
uf.union(uf.nodes[5].father, uf.nodes[8].father)
for i in range(10):
print("find(" + str(i) + ")= " + str(uf.find(uf.nodes[i])))
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
8056687
|
from core.exceptions.exceptions import LinTimException
class InputFileException(LinTimException):
"""Exception to throw if an input file cannot be found."""
def __init__(self, file_name: str):
"""
Initialise a new exception
:param file_name: name of the file that could not be found
"""
super().__init__("Error I1: File {} cannot be found.".format(file_name))
class InputFormatException(LinTimException):
"""
Exception to throw if the input file is not formatted correctly, i.e., if the wrong number of columns is given.
"""
def __init__(self, file_name: str, columns_given: int, columns_required: int):
"""
Initialise a new exception
:param file_name: the read file
:param columns_given: the number of columns given
:param columns_required: the required number of columns
"""
super().__init__("Error I2: File {} is not formatted correctly: {} columns given, {} needed."
.format(file_name, columns_given, columns_required))
class InputTypeInconsistencyException(LinTimException):
"""
Exception to throw if the input file has a type inconsistent.
"""
def __init__(self, file_name: str, column_index: int, line_number: int, expected_type: str, found: str):
"""
Exception to throw if the input file has a type inconsistent.
:param file_name: input file name
:param column_index: column in which exception occurs
:param line_number: number of line in which the exception occurs
:param expected_type: expected type
:param found: entry of wrong type
"""
super().__init__("Error I3: Column {} of file {} should be of type {} but entry in line {} is {}."
.format(column_index, file_name, expected_type, line_number, found))
|
StarcoderdataPython
|
6668869
|
<gh_stars>0
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\careers\rabbithole_career_gig.py
# Compiled at: 2020-02-05 22:23:21
# Size of source mod 2**32: 10792 bytes
from careers.career_enums import GigResult
from careers.career_gig import Gig, TELEMETRY_GIG_PROGRESS_TIMEOUT, TELEMETRY_GIG_PROGRESS_COMPLETE
from sims4.localization import TunableLocalizedStringFactory
from sims4.tuning.instances import lock_instance_tunables
from sims4.tuning.tunable import OptionalTunable, Tunable, TunablePercent, TunableTuple
from ui.ui_dialog_picker import OddJobPickerRow
import random, sims.sim_info_tests, sims4, statistics.skill_tests
from date_and_time import TimeSpan
logger = sims4.log.Logger('RabbitholeGig', default_owner='madang')
class RabbitholeGig(Gig):
INSTANCE_TUNABLES = {'negative_mood_tuning':TunableTuple(description='\n Tuning for the negative mood test. If the Sim has the any of the \n negative mood buffs (the Buff test passes), the failure chance \n tunable will be used to determine whether or not to apply the \n FAILURE outcome.\n ',
negative_mood_test=sims.sim_info_tests.BuffTest.TunableFactory(),
failure_chance=TunablePercent(description='\n Chance of a FAILURE outcome if the negative mood test passes.\n ',
default=0.0)),
'recommended_skill_tuning':OptionalTunable(description="\n Tuning for the (optional) recommended skill. If the Sim has this\n skill, the outcome will depend on the Sim's skill level relative \n to the recommended skill level.\n ",
tunable=TunableTuple(recommended_skill_test=statistics.skill_tests.SkillRangeTest.TunableFactory(description='\n The recommended skill test for this gig. For Home \n Assignment gigs, the skill range min and max should be the \n same.\n '),
great_success_chance_multiplier=Tunable(description='\n The multiplier for determining the chance the Sim will\n receive the GREAT_SUCCESS outcome.\n ',
tunable_type=float,
default=0.0),
failure_chance_multiplier=Tunable(description='\n The multiplier for determining the chance the Sim will\n receive the FAILURE outcome.\n ',
tunable_type=float,
default=0.0),
critical_failure_skill_level_delta=Tunable(description='\n The difference in skill levels lower than the recommended\n skill level for a Sim to qualify for a CRITICAL FAILURE \n outcome.\n ',
tunable_type=int,
default=0))),
'gig_picker_localization_format':TunableLocalizedStringFactory(description='\n String used to format the description in the gig picker. Currently\n has tokens for name, payout, gig time, tip title, and tip text.\n ')}
@classmethod
def _verify_tuning_callback(cls):
if not cls.tip:
logger.error('No tip tuned for Rabbithole Gig {}. Rabbithole Gigs must have a tip.', cls)
def _determine_gig_outcome(self):
if not self.has_attended_gig():
self._gig_result = GigResult.CRITICAL_FAILURE
self._send_gig_telemetry(TELEMETRY_GIG_PROGRESS_TIMEOUT)
return
else:
if self._gig_result == GigResult.CANCELED:
self._gig_result = GigResult.FAILURE
return
self._send_gig_telemetry(TELEMETRY_GIG_PROGRESS_COMPLETE)
resolver = self.get_resolver_for_gig()
if resolver(self.negative_mood_tuning.negative_mood_test) and random.random() <= self.negative_mood_tuning.failure_chance:
self._gig_result = GigResult.FAILURE
return
if self.recommended_skill_tuning:
skill = self._owner.get_statistic((self.recommended_skill_tuning.recommended_skill_test.skill), add=False)
sim_skill_level = 0
if skill:
sim_skill_level = skill.get_user_value()
else:
recommended_level = self.recommended_skill_tuning.recommended_skill_test.skill_range_max
if sim_skill_level > recommended_level:
chance = (sim_skill_level - recommended_level) * self.recommended_skill_tuning.great_success_chance_multiplier
if random.random() <= chance:
self._gig_result = GigResult.GREAT_SUCCESS
else:
self._gig_result = GigResult.SUCCESS
else:
if sim_skill_level == recommended_level:
self._gig_result = GigResult.SUCCESS
else:
skill_level_difference = recommended_level - sim_skill_level
if skill_level_difference >= self.recommended_skill_tuning.critical_failure_skill_level_delta:
self._gig_result = GigResult.CRITICAL_FAILURE
else:
chance = skill_level_difference * self.recommended_skill_tuning.failure_chance_multiplier
if random.random() <= chance:
self._gig_result = GigResult.FAILURE
else:
self._gig_result = GigResult.CRITICAL_FAILURE
else:
self._gig_result = GigResult.SUCCESS
@classmethod
def create_picker_row(cls, description=None, scheduled_time=None, owner=None, gig_customer=None, enabled=True, **kwargs):
tip = cls.tip
duration = TimeSpan.ONE
finishing_time = None
if scheduled_time is None:
logger.error('Rabbit Hole Gig {} : Not a valid scheduled_time.', cls)
return
for start_time, end_time in cls.gig_time().get_schedule_entries():
if scheduled_time.day() == start_time.day() and scheduled_time.hour() == start_time.hour() and scheduled_time.minute() == start_time.minute():
duration = end_time - start_time
finishing_time = scheduled_time + duration
break
if finishing_time == None:
logger.error('Rabbit Hole Gig {} : No gig start_time found for scheduled_time {} ', cls, scheduled_time)
return
pay_rate = cls.gig_pay.lower_bound / duration.in_hours()
description = cls.gig_picker_localization_format(cls.gig_pay.lower_bound, pay_rate, scheduled_time, finishing_time, tip.tip_title(), tip.tip_text(), gig_customer)
if (enabled or cls.disabled_tooltip) is not None:
row_tooltip = lambda *_: cls.disabled_tooltip(owner)
else:
if cls.display_description is None:
row_tooltip = None
else:
row_tooltip = lambda *_: cls.display_description(owner)
customer_description = cls.odd_job_tuning.customer_description(gig_customer)
row = OddJobPickerRow(customer_id=(gig_customer.id), customer_description=customer_description,
tip_title=(tip.tip_title()),
tip_text=(tip.tip_text()),
tip_icon=(tip.tip_icon),
name=(cls.display_name(owner)),
icon=(cls.display_icon),
row_description=description,
row_tooltip=row_tooltip,
is_enable=enabled)
return row
lock_instance_tunables(RabbitholeGig, gig_prep_tasks=None,
audio_on_prep_task_completion=None,
career_events=None,
gig_cast_rel_bit_collection_id=None,
gig_cast=None,
end_of_gig_dialog=None,
payout_stat_data=None)
|
StarcoderdataPython
|
6525277
|
<filename>find_parsimonious_assignments.py
from treelib import Node, Tree
import sys
import io
import argparse
import gzip
def create_tree(tree_filename):
tree = Tree()
# Read Newick file line
f = open(tree_filename)
line = f.readline().rstrip()
f.close()
# Get leaves names in s2
s1 = line.split(',')
s2 = [s.split(':')[0].replace('(', '').split(')')[0] for s in s1]
stack = [(w.count('('), w.count(')')) for w in s1]
num_open = sum([s[0] for s in stack])
num_close = sum([s[1] for s in stack])
if ((num_open != num_close)):
print ('ERROR: PhyloTree in incorrect format!')
sys.exit()
curr_node = '0'
curr_idx = 0
parent_stack = []
# Construct the tree in treelib format
for (k, species) in enumerate(s2):
no = stack[k][0]
nc = stack[k][1]
for i in range(no):
curr_node = str(int(curr_node)+1)
if len(parent_stack) == 0:
tree.create_node(curr_node, curr_node)
else:
tree.create_node(curr_node, curr_node, parent=parent_stack[-1])
parent_stack.append(curr_node)
tree.create_node(species, species, parent=parent_stack[-1])
for i in range(nc):
nid = parent_stack[-1]
curr_idx += 1
parent_stack.pop()
return tree
# Convert tree back to Newick string
def get_newick_string (tree):
dfs = [n for n in tree.expand_tree(mode=1, sorting=False, reverse=True)]
depths = [tree.level(n) for n in dfs]
is_leaf = [tree.get_node(n).is_leaf() for n in dfs]
curr_depth = -1
newick_string = ''
prev_open = True
stack = []
for (k,s) in enumerate(dfs):
tag = tree.get_node(s).tag
depth = depths[k]
if (curr_depth < depth):
if (not prev_open):
newick_string += ','
for i in range(depth-max(0,curr_depth)):
newick_string += '('
prev_open = True
if (is_leaf[k]):
newick_string += tag
prev_open = False
else:
stack.append(tag)
elif (curr_depth > depth):
prev_open = False
for i in range(curr_depth-depth):
newick_string += ')'
newick_string += stack[-1]
stack.pop()
if (is_leaf[k]):
newick_string += ','+tag
else:
stack.append(tag)
else:
prev_open = False
if (is_leaf[k]):
newick_string += ','+tag
else:
stack.append(tag)
curr_depth = depth
for i in range(curr_depth):
newick_string += ')'
newick_string += stack[-1]
stack.pop()
newick_string += ';'
return newick_string
# Sankoff algorithm using a simple scoring to make most parsimonious
# assignments to tree nodes and return transitions
def get_most_parsimonious_transitions (tree, bfs, bfs_idx, var_ids):
s = [(0, 1e6) for i in range(len(bfs_idx.keys()))]
states = [0 for i in range(len(bfs_idx.keys()))]
forward_mutations = []
back_mutations = []
leaves_affected_forward = []
leaves_affected_backward = []
for nid in var_ids:
idx = bfs_idx[nid]
s[idx] = (1e6, 0)
#forward pass
for nid in bfs[::-1]:
node = tree.get_node(nid)
node_idx = bfs_idx[nid]
if (not node.is_leaf()):
s_ref = 0
s_alt = 0
children = tree.children(nid)
for child in children:
c_id = child.identifier
c_idx = bfs_idx[c_id]
s_ref += min([s[c_idx][0], 1+s[c_idx][1]])
s_alt += min([1+s[c_idx][0], s[c_idx][1]])
s[node_idx] = (s_ref, s_alt)
flagged_leaves = []
#backward pass
for nid in bfs:
node_idx = bfs_idx[nid]
(s_ref, s_alt) = s[node_idx]
state = 0
par_state = 0
if (nid != tree.root):
par = tree.parent(nid)
par_id = par.identifier
par_idx = bfs_idx[par_id]
par_state = states[par_idx]
if (s_ref < s_alt):
state = 0
elif (s_ref == s_alt):
state = par_state
else:
state = 1
states[node_idx] = state
if (state != par_state):
leaves = tree.leaves(nid)
if (state == 1):
forward_mutations.append(nid)
else:
back_mutations.append(nid)
for nid in forward_mutations:
leaves = tree.leaves(nid)
l_f = set([l for l in leaves if states[bfs_idx[l.identifier]] == 1])
leaves_affected_forward.append(l_f)
if (len(leaves) < 4):
for l in leaves:
if (states[bfs_idx[l.identifier]] == 1):
flagged_leaves.append(l.identifier)
for nid in back_mutations:
leaves = tree.leaves(nid)
l_b = set([l for l in leaves if states[bfs_idx[l.identifier]] == 0])
leaves_affected_backward.append(l_b)
if (len(leaves) < 4):
for l in leaves:
if (states[bfs_idx[l.identifier]] == 0):
flagged_leaves.append(l.identifier)
for i in range(len(leaves_affected_forward)):
for j in range(i):
leaves_affected_forward[j] = leaves_affected_forward[j] - \
leaves_affected_forward[i]
for i in range(len(leaves_affected_backward)):
for j in range(i):
leaves_affected_backward[j] = leaves_affected_backward[j] - \
leaves_affected_backward[i]
return forward_mutations, back_mutations,\
[str(len(l)) for l in leaves_affected_forward], \
[str(len(l)) for l in leaves_affected_backward], \
flagged_leaves
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Compute most parsimonious '
'character assignments for each internal '
'tree node given the set of variants for '
'its leaves.')
parser.add_argument("-tree", type=str,
help="input tree (in Newick format)")
parser.add_argument("-vcf", type=str,
help="vcf file corresponding to the tree")
parser.add_argument("-variants", type=str,
help="restrict to a list of variants (':' "
"separated) from VCF file [OPTIONAL]")
args = vars(parser.parse_args())
tree_filename = args.get('tree', '')
if (tree_filename == None):
parser.print_help()
sys.exit(1)
vcf_filename = args.get('vcf', '')
if (vcf_filename == None):
parser.print_help()
sys.exit(1)
variants_args = args.get('variants', '')
variant_list = None if (variants_args == None) else variants_args.split(':')
tree = create_tree(tree_filename)
labelled_tree_newick = get_newick_string(tree)
print 'Tree (in newick format) with internal nodes labelled: '
print labelled_tree_newick
bfs = [n for n in tree.expand_tree(mode=2, sorting=False, reverse=True)]
bfs_idx = {}
for (k,v) in enumerate(bfs):
bfs_idx[v] = k
header_found = False
header_line = ''
vcf_ids = []
total_variants = 0
total_parsimony_score = 0
# Check vcf file type
if vcf_filename.endswith(".gz"):
vcf_file = io.TextIOWrapper(io.BufferedReader(gzip.open(vcf_filename)))
else:
vcf_file = file(vcf_filename,'r')
with vcf_file as f:
for line in f:
if (not header_found):
if ('REF' in line):
header_found = True
words = line.split()
vcf_ids = words[9:]
# vcf_ids = [w.replace('/', '_').replace('|', '_') for w in \
# words[9:]]
else:
total_variants += 1
print_variant = False
words = line.split()
variant_pos = int(words[1])
variant = words[2]
variant_ref = words[3]
variant_ids = [vcf_ids[k] for (k, w) in enumerate(words[9:]) \
if (w.split(':')[0] == '1')]
if ((variant_list == None) or (variant in variant_list)):
f_mut, b_mut, l_f, l_b, flagged_leaves = \
get_most_parsimonious_transitions \
(tree, bfs, bfs_idx, variant_ids)
print variant+'\talt_alleles='+str(len(variant_ids))+\
'\tparsimony_score='+ str(len(f_mut)+len(b_mut))+\
'\tforward_mutation_nodes='+','.join(f_mut)+\
'\tback_mutation_nodes='+','.join(b_mut)+\
'\tforward_mutation_clade_sizes='+','.join(l_f)+\
'\tback_mutation_clade_sizes='+','.join(l_b)+\
'\tflagged_leaves='+','.join(flagged_leaves)
total_parsimony_score += len(f_mut)+len(b_mut)
print 'Total leaf nodes: ', len(tree.leaves())
print 'Total variants: ', total_variants
print 'Total parsimony score: ', total_parsimony_score
|
StarcoderdataPython
|
11326132
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-17 15:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('movies', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='rating',
name='raw_time',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='rating',
name='timestamp',
field=models.DateTimeField(null=True),
),
]
|
StarcoderdataPython
|
3501800
|
import sys
import os
import yaml
from munch import munchify
from ott.netcdf import IfcbMetadata, csdir2netcdf
# constants
CONFIG_FILE = 'config.yml'
def load_config(config_file):
with open(config_file) as fin:
return munchify(yaml.safe_load(fin))
def process_dir(in_dir, out_dir):
# load configuration
config = load_config(CONFIG_FILE)
# configure metadata
md = IfcbMetadata(**config.metadata)
# configure summary processing
freq = config.summary.frequency
thresh = config.summary.threshold
try:
os.makedirs(out_dir)
except:
pass
csdir2netcdf(in_dir, out_dir, frequency=freq, threshold=thresh, metadata=md)
if __name__=='__main__':
assert len(sys.argv) == 3, "must specify an input and output directory"
in_dir = sys.argv[1]
out_dir = sys.argv[2]
process_dir(in_dir, out_dir)
|
StarcoderdataPython
|
3509686
|
<filename>libs/send2trash/compat.py<gh_stars>1000+
# Copyright 2017 <NAME>
# This software is licensed under the "BSD" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.hardcoded.net/licenses/bsd_license
import sys
import os
PY3 = sys.version_info[0] >= 3
if PY3:
text_type = str
binary_type = bytes
if os.supports_bytes_environ:
# environb will be unset under Windows, but then again we're not supposed to use it.
environb = os.environb
else:
text_type = unicode
binary_type = str
environb = os.environ
|
StarcoderdataPython
|
1767792
|
<filename>AutomatedTesting/Gem/PythonTests/physics/C15096740_Material_LibraryUpdatedCorrectly.py
"""
Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
Test case ID : C15096740
Test Case Title : Verify that clearing a material library on all systems that use it,
assigns the default material library
"""
# fmt: off
class Tests():
create_entity = ("Entity created successfully", "Failed to create Entity")
add_physx_component = ("PhysX Component added successfully", "Failed to add PhysX Component")
override_default_library = ("Material library overrided successfully", "Failed to override material library")
update_to_default_library = ("Library updated to default", "Failed to update library to default")
new_library_updated = ("New library updated successfully", "Failed to add new library")
# fmt: on
def C15096740_Material_LibraryUpdatedCorrectly():
"""
Summary:
Load level with Entity having PhysX Component. Override the material library to be the same one as the
default material library. Change the default material library into another one.
Expected Behavior:
The material library gets updated correctly when the default material is changed.
Test Steps:
1) Load the level
2) Create new Entity with PhysX Character Controller
3) Override the material library to be the same one as the default material library
4) Switch it back again to the default material library.
5) Change the default material library into another one.
6) Close the editor
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""
# Built-in Imports
import os
import ImportPathHelper as imports
imports.init()
# Helper file Imports
from editor_python_test_tools.editor_entity_utils import EditorEntity
from editor_python_test_tools.utils import Report
from editor_python_test_tools.utils import TestHelper as helper
from asset_utils import Asset
# Open 3D Engine Imports
import azlmbr.asset as azasset
# Constants
library_property_path = "Configuration|Physics Material|Library"
default_material_path = "surfacetypemateriallibrary.physmaterial"
new_material_path = os.path.join("physicssurfaces", "default_phys_materials.physmaterial")
helper.init_idle()
# 1) Load the level
helper.open_level("Physics", "Base")
# 2) Create new Entity with PhysX Character Controller
test_entity = EditorEntity.create_editor_entity("TestEntity")
Report.result(Tests.create_entity, test_entity.id.IsValid())
test_component = test_entity.add_component("PhysX Character Controller")
Report.result(Tests.add_physx_component, test_entity.has_component("PhysX Character Controller"))
# 3) Override the material library to be the same one as the default material library
default_asset = Asset.find_asset_by_path(default_material_path)
test_component.set_component_property_value(library_property_path, default_asset.id)
default_asset.id = test_component.get_component_property_value(library_property_path)
Report.result(Tests.override_default_library, default_asset.get_path() == default_material_path)
# 4) Switch it back again to the default material library.
test_component.set_component_property_value(library_property_path, azasset.AssetId())
Report.result(
Tests.update_to_default_library,
test_component.get_component_property_value(library_property_path) == azasset.AssetId(),
)
# 5) Change the default material library into another one.
new_asset = Asset.find_asset_by_path(new_material_path)
test_component.set_component_property_value(library_property_path, new_asset.id)
new_asset.id = test_component.get_component_property_value(library_property_path)
Report.result(Tests.new_library_updated, new_asset.get_path() == new_material_path.replace(os.sep, '/'))
if __name__ == "__main__":
import ImportPathHelper as imports
imports.init()
from editor_python_test_tools.utils import Report
Report.start_test(C15096740_Material_LibraryUpdatedCorrectly)
|
StarcoderdataPython
|
239725
|
<gh_stars>0
import pygame
from desky.button import ButtonState
class Scheme:
pass
def add_default_methods(clsname):
def default_setup(self, panel, gui):
pass
setattr(Scheme, "setup_" + clsname, default_setup)
def default_layout(self, panel, w, h):
panel.layout_children(self, w, h)
setattr(Scheme, "layout_" + clsname, default_layout)
def default_render(self, panel, surface, clock, w, h):
panel.render_children(self, surface, clock, w, h)
setattr(Scheme, "render_" + clsname, default_render)
add_default_methods("panel")
add_default_methods("label")
add_default_methods("text_button")
add_default_methods("checkbox")
add_default_methods("text_entry")
add_default_methods("context_menu_item")
add_default_methods("context_menu_sub_item")
add_default_methods("context_menu_panel")
add_default_methods("scroll_panel")
add_default_methods("scroll_bar")
add_default_methods("scroll_bar_button")
add_default_methods("adjustable_divider")
add_default_methods("adjustable_divider_grabber")
def render_text_entry_text(panel, surface, clock, w, h):
# Get measurements.
ascender = panel.font.get_sized_ascender()
descender = panel.font.get_sized_descender()
th = panel.font.get_sized_height()
_, _, startx, _ = panel.font.get_rect(panel.text[:panel.caret])
basex, basey, _, _ = panel.font.get_rect(panel.text)
# Update the view.
if panel.caret == 0:
panel.viewx = -panel.xoffset
else:
if startx - panel.viewx > panel.width:
panel.viewx = startx - int(panel.width * 0.66)
if startx - panel.viewx < 0:
panel.viewx = startx - int(panel.width * 0.33)
panel.viewx = max(panel.viewx, -panel.xoffset)
# Draw main text portion.
x = -panel.viewx
y = int(0.5 * (h - th))
tx = x
ty = y - basey + descender + th
textsurf, _ = panel.font.render(None, (255, 255, 255))
surface.blit(textsurf, (tx, ty))
if panel.focus:
# Determine caret / highlight start coordinates.
start, end = sorted((panel.caret, panel.select_start))
_, _, startx, _ = panel.font.get_rect(panel.text[:start])
# Default endx and color so we get a white caret with a 1px width.
endx = startx + 1
color = (255, 255, 255)
# Determine highlight end coordinate and adjust endx and color.
if start != end:
_, _, endx, _ = panel.font.get_rect(panel.text[:end])
color = (128, 128, 255)
# Draw the caret or selection area.
pygame.draw.rect(surface, color, pygame.Rect(tx + startx, 0.5 * (h - th), endx - startx, th))
if start != end:
# Draw selection text. To get the selected text correctly
# positioned we align the last pixel of the selected text with
# endx.
basex, basey, tw, _ = panel.font.get_rect(panel.text[start:end])
ty = y -basey + descender + th
textsurf, _ = panel.font.render(None, (0, 0, 127))
surface.blit(textsurf, (tx + endx - tw, ty))
|
StarcoderdataPython
|
6503875
|
<reponame>UniSerj/ai-research
import os
import torch
import torchvision
import numpy as np
from enum import Enum
from datasets.data_preprocessing import get_preprocessing, PreProcessing
from datasets.data_augmentation import get_augmentation, Augmentation
from torch.utils.data import DataLoader
class Dataset(Enum):
CIFAR10 = 0
ImageNet = 1
CIFAR_IMAGE_SIZE = 32
IMAGENET_CROP_SIZE = 224
IMAGENET_RESIZE_SIZE = 256
def _fast_collate(batch):
imgs = [img[0] for img in batch]
c = 1 if len(np.asarray(imgs[0]).shape) == 2 else 3
targets = torch.tensor([target[1] for target in batch], dtype=torch.int64)
w = imgs[0].size[0]
h = imgs[0].size[1]
tensor = torch.zeros((len(imgs), c, h, w), dtype=torch.uint8)
for i, img in enumerate(imgs):
nump_array = np.asarray(img, dtype=np.uint8)
if (nump_array.ndim < 3):
nump_array = np.expand_dims(nump_array, axis=-1)
nump_array = np.rollaxis(nump_array, 2)
tensor[i] += torch.from_numpy(nump_array)
return tensor, targets
def _get_dataset_augmentation_normalization(dataset_enum, distributed=True, enable_auto_augmentation=False):
if dataset_enum == Dataset.CIFAR10:
normalization = get_preprocessing(PreProcessing.CIFAR)
train_transform = get_augmentation(Augmentation.CropAndHorizontalFlip,
crop_size=CIFAR_IMAGE_SIZE,
distributed=distributed,
enable_auto_augmentation=enable_auto_augmentation)
validation_transform = get_augmentation(Augmentation.NoAugmentation,
crop_size=CIFAR_IMAGE_SIZE,
distributed=distributed)
elif dataset_enum == Dataset.ImageNet:
normalization = get_preprocessing(PreProcessing.IMAGENET)
train_transform = get_augmentation(Augmentation.ResizeCropAndHorizontalFlip, crop_size=IMAGENET_CROP_SIZE,
distributed=distributed)
validation_transform = get_augmentation(Augmentation.ResizeCenterCrop, crop_size=IMAGENET_CROP_SIZE,
resize_size=IMAGENET_RESIZE_SIZE, distributed=distributed)
else:
raise NotImplemented
if not distributed and normalization is not None:
train_transform.transforms.append(normalization)
validation_transform.transforms.append(normalization)
return train_transform, validation_transform
def get_dataset(dataset: Dataset, data_path: str, batch_size: int, num_workers: int = 4, distributed=True,
enable_auto_augmentation=False):
"""
This function return the dataset loaders for the validation and training sets also
with training sampler for multiple gpu usage
:param dataset: the dataset enum (CIFAR10 or ImageNet)
:param data_path: the data folder in ImageNet
:param batch_size: the training and validation batch size
:param num_workers: the number of working
:param distributed: working in distributed mode
:param enable_auto_augmentation: this flag enable the auto augmentation
:return: train loader, validation loader and training sampler.
"""
train_transform, test_transform = _get_dataset_augmentation_normalization(dataset,
distributed=distributed,
enable_auto_augmentation=enable_auto_augmentation)
if dataset == Dataset.CIFAR10:
trainset = torchvision.datasets.CIFAR10(root=data_path, train=True,
download=True,
transform=train_transform) # transformation (preporcess and augmentation)
testset = torchvision.datasets.CIFAR10(root=data_path, train=False,
download=True, transform=test_transform)
elif dataset == Dataset.ImageNet:
trainset = torchvision.datasets.ImageFolder(os.path.join(data_path, 'train'),
transform=train_transform)
testset = torchvision.datasets.ImageFolder(os.path.join(data_path, 'validation'),
transform=test_transform)
else:
raise NotImplemented
train_sampler = None
val_sampler = None
if distributed:
print("Starting Distributed Datasets")
train_sampler = torch.utils.data.distributed.DistributedSampler(trainset)
val_sampler = torch.utils.data.distributed.DistributedSampler(testset)
train_loader = DataLoader(trainset, batch_size=batch_size,
shuffle=(train_sampler is None),
num_workers=num_workers,
pin_memory=True,
sampler=train_sampler,
collate_fn=_fast_collate if distributed else None) # loading data using multipy therd
test_loader = None
if testset is not None:
test_loader = DataLoader(testset, batch_size=batch_size,
shuffle=False, num_workers=num_workers, pin_memory=True,
sampler=val_sampler,
collate_fn=_fast_collate if distributed else None
)
return train_loader, test_loader, train_sampler
|
StarcoderdataPython
|
6509848
|
<gh_stars>1-10
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import json
from api_client.ado.constants import ADOConstants
from api_client.exceptions import FailedToAttachWorkItemError, FailedToUpdateFieldsError
from core.api.caller import get, patch, post
from api_client.ado.endpoints import endpoint_map
from core.api.interfaces.work_item_api_client import FieldValueObject, WorkItemApiClient
from core.exceptions import APICallFailedError
from core.utils.map import resources
class AdoFieldValueObject(FieldValueObject):
"""
Object that will be used while making updates to a list of work items using patch calls
"""
def __init__(self):
self.payload = []
def update_field(self, field_path, field_value):
update_field = {
"op": "add",
"path": field_path,
"value": field_value
}
self.payload.append(update_field)
def remove_field(self, field_path):
remove_field = {
"op": "remove",
"path": field_path,
}
self.payload.append(remove_field)
def payload_str(self):
return json.dumps(self.payload)
class AdoWorkItemClient(WorkItemApiClient):
__logger = resources.get('LOGGER')
__name = 'WorkItemApiClient'
def __init__(self):
super().__init__()
def get_work_items(self, entity, items: list):
items = []
for wi in items:
items.append(self.get_work_item_by_id(entity, wi))
return items
def get_work_item_by_id(self, entity, item_id):
endpoint = endpoint_map['work_item_by_id'].format(entity.org, item_id)
wi_data = get(endpoint, entity.pat)
return wi_data
def get_work_item_by_id_with_relations(self, entity, work_item_id):
endpoint = endpoint_map['work_item_by_id_with_relations'].format(entity.org, work_item_id)
wi_data = get(endpoint, entity.pat)
return wi_data
def get_work_items_by_query_id(self, entity, query_id):
"""
retrieves query metadata from the query_id and returns the list of workitems
"""
endpoint = endpoint_map['ado_query_by_id'] \
.format(entity.org, entity.project, query_id, entity.ado_version)
# retrieve query metadata
query_md = get(endpoint, entity.pat)
if query_md is None:
raise ValueError("Couldn't retrieve query metadata for query_id : {}".format(query_id))
# retrieve list of workitems which is the result of the query
self.__logger.info(self.__name, "Query Filter Title: '{}'".format(query_md['name']))
wi_results = get(query_md['_links']['wiql']['href'], entity.pat)
if 'workItems' not in wi_results or wi_results['workItems'] is None or len(wi_results['workItems']) == 0:
self.__logger.info(self.__name, 'no WorkItems found for query {}: {}'.format(query_id, query_md['name']))
return []
self.__logger.info(self.__name, 'Found {} WorkItem(s) for query filter "{}"...\n'
.format(len(wi_results['workItems']), query_md['name']))
return wi_results['workItems']
def attach_work_item(self, item_id, pr_entity, artifact_id):
"""
Method to attach work item to PR, if work item is available in VSO URl, while PR doe not have link
"""
url = endpoint_map['work_item_update_by_id'].format(pr_entity.org, "MSTeams", item_id)
querystring = {"api-version": pr_entity.ado_version}
payload = [{
"op": "add",
"path": "/relations/-",
"value": {
"rel": "ArtifactLink",
"url": artifact_id,
"attributes": {
"name": "Pull request"
}
}
}]
try:
response = patch(url, pr_entity.pat, querystring, json.dumps(payload))
except APICallFailedError:
raise FailedToAttachWorkItemError(item_id)
self.__logger.info(self.__name, "Attached work item successfully!" + response.text)
def create(self, entity, work_item_type, payload,
query_string='{}',
content_type='application/json-patch+json'):
endpoint = endpoint_map['create_work_item'] \
.format(entity.org, entity.project, work_item_type, entity.ado_version)
resp = post(endpoint=endpoint,
pat=entity.pat,
query_str=query_string,
payload=payload,
content_type=content_type)
return resp
def linked_parent_work_items(self, entity, work_item_id):
json_response = self.get_work_item_by_id_with_relations(entity, work_item_id)
relations = json_response['relations']
parent = []
for relation in relations:
self.__logger.debug(self.__name, 'Relation' + str(relation))
if relation['rel'] == ADOConstants.work_item_relations['parent']:
parent.append(relation['url'])
return parent
def update_fields(self, pr_entity, work_item_id, field_value_obj: FieldValueObject):
payload = field_value_obj.payload_str()
response = None
querystring = {"api-version": pr_entity.ado_version}
try:
url = endpoint_map['work_item_by_id'].format(pr_entity.org, work_item_id)
response = patch(url, pr_entity.pat, querystring, payload=payload)
self.__logger.info(self.__name, "Successfully updated the fields {} for work item {}!"
.format(payload, work_item_id))
except APICallFailedError:
self.__logger.error(self.__name, "API response: {}".format(response.text if response else None))
raise FailedToUpdateFieldsError(work_item_id, payload)
|
StarcoderdataPython
|
107039
|
<gh_stars>0
class Block(object):
def __init__(self, name) -> None:
super().__init__()
self.__name__ = name
|
StarcoderdataPython
|
9759905
|
DEBUG = False
PLUGINS = [
"myapp"
]
ASGI_APP = "myapp.asgi:app"
|
StarcoderdataPython
|
11275390
|
#from ._version import get_versions
#
#__version__ = get_versions()["version"]
#del get_versions
#
#global_config = None
|
StarcoderdataPython
|
8049584
|
"""
# Python Handout
Turn Python scripts into handouts with Markdown comments and inline figures. An
alternative to Jupyter notebooks without hidden state that supports any text
editor.
"""
import handout
import matplotlib.pyplot as plt
import numpy as np
"""Start your handout with an output directory."""
doc = handout.Handout('./output')
"""
## Markdown comments
Comments with triple quotes are converted to text blocks.
Text blocks support [Markdown formatting][1], for example:
- Headlines
- Hyperlinks
- Inline `code()` snippets
- **Bold** and *italic*
- LaTeX math $f(x)=x^2$
[1]: https://commonmark.org/help/
"""
"""
## Add text and variables
Write to our handout using the same syntax as Python's `print()`:
"""
for index in range(3):
doc.add_text('Iteration', index)
doc.show()
"""
## Add Matplotlib figures
Display matplotlib figures on the handout:
"""
fig, ax = plt.subplots(figsize=(4, 3))
ax.plot(np.arange(100))
fig.tight_layout()
doc.add_figure(fig)
doc.show() # Display figure below this line.
"""
Set the width to display multiple figures side by side:
"""
for iteration in range(3):
fig, ax = plt.subplots(figsize=(3, 2))
ax.plot(np.sin(np.linspace(0, 20 / (iteration + 1), 100)))
doc.add_figure(fig, width=0.33)
doc.show()
"""
## Add images and videos
This requires the `imageio` pip package.
"""
image_a = np.random.uniform(0, 255, (200, 400, 3)).astype(np.uint8)
image_b = np.random.uniform(0, 255, (100, 200, 1)).astype(np.uint8)
doc.add_image(image_a, 'png', width=0.4)
doc.add_image(image_b, 'jpg', width=0.4)
doc.show()
video = np.random.uniform(0, 255, (100, 64, 128, 3)).astype(np.uint8)
doc.add_video(video, 'gif', fps=30, width=0.4)
doc.add_video(video, 'mp4', fps=30, width=0.4)
doc.show()
"""
## Exclude lines
Hide code from the handout with the `# handout: exclude` comment:
"""
# Invisible below:
value = 13 # handout: exclude
"""
Exclude whole ranges between `# handout: begin-exclude` and `# handout:
end-exclude` lines.
"""
"""
## View the handout
The handout is automatically saved when you call `doc.show()`. Just open
`output/index.html` in your browser.
"""
|
StarcoderdataPython
|
5006536
|
from . import InitDB
from configs import config
from typing import Dict, List
class ChatDB(InitDB):
@staticmethod
def _get(chats: List) -> Dict[str, str]:
result = {}
for chat in chats:
(
owner_id,
chat_id,
lang,
quality,
admin_only,
gcast_type,
del_cmd,
player_mode,
duration_limit,
selecting_photo,
queued_photo,
now_streaming_photo,
settings_photo
) = chat
admin_mode = bool(admin_only)
del_cmd_mode = bool(del_cmd)
player_mode = bool(player_mode)
result.update(
{
"owner_id": owner_id,
"chat_id": chat_id,
"lang": lang,
"quality": quality,
"admin_only": admin_mode,
"gcast_type": gcast_type,
"del_cmd_mode": del_cmd_mode,
"player_mode": player_mode,
"duration_limit": duration_limit,
"selecting_photo": selecting_photo,
"queued_photo": queued_photo,
"now_streaming_photo": now_streaming_photo,
"settings_photo": settings_photo
}
)
return result
@staticmethod
def _get_chats(chats: List) -> List[Dict[str, str]]:
result = []
for chat in chats:
(
owner_id,
chat_id,
lang,
quality,
admin_only,
gcast_type,
del_cmd,
player_mode,
duration_limit,
selecting_photo,
queued_photo,
now_streaming_photo,
settings_photo
) = chat
admin_mode, del_cmd, player_mode = bool(admin_only), bool(del_cmd), bool(player_mode)
x = {
"owner_id": owner_id,
"chat_id": chat_id,
"lang": lang,
"quality": quality,
"admin_only": admin_mode,
"gcast_type": gcast_type,
"del_cmd_mode": del_cmd,
"player_mode": player_mode,
"duration_limit": duration_limit,
"selecting_photo": selecting_photo,
"queued_photo": queued_photo,
"now_streaming_photo": now_streaming_photo,
"settings_photo": settings_photo
}
result.append(x.copy())
return result
async def get_chat_id(self) -> List[int]:
results = await self.db.fetch_all("select chat_id from chat_db")
chat_list = []
for result in results:
chat_list.append(result[0])
return chat_list
async def get_chat(self, chat_id: int) -> Dict[str, str]:
results = list(
await self.db.fetch_all(
"select * from chat_db where chat_id = :chat_id", {"chat_id": chat_id}
)
)
return self._get(results) if results else {}
async def add_chat(self, chat_id: int, lang: str = "en"):
cur = self.db
x = await self.get_chat(chat_id)
if not x:
await cur.execute(
"""
insert into chat_db values (
:owner_id,
:chat_id,
:lang,
:quality,
:admin_only,
:gcast_type,
:del_cmd,
:player_mode,
:duration_limit,
:selecting_photo,
:queued_photo,
:now_streaming_photo,
:setting_photo
)
""",
{
"owner_id": config.OWNER_ID,
"chat_id": chat_id,
"lang": lang,
"quality": "medium",
"admin_only": False,
"gcast_type": "bot",
"del_cmd": True,
"player_mode": True,
"duration_limit": 0,
"selecting_photo": "https://telegra.ph/file/cc8d02e67023005905405.png",
"queued_photo": "https://telegra.ph/file/73fbe8adadbd9d061d3e1.png",
"now_streaming_photo": "https://telegra.ph/file/3a4d2300271b92787f79f.png",
"setting_photo": "https://telegra.ph/file/53b33624eafa208525a27.png"
},
)
return "add_chat_success"
return "chat_already_added"
async def del_chat(self, chat_id: int):
cur = self.db
x = await self.get_chat(chat_id)
if x:
await cur.execute("delete from chat_db where chat_id = ?", (chat_id,))
return "delete_chat_success"
return "chat_already_deleted"
async def set_lang(self, chat_id: int, lang: str):
chat = await self.get_chat(chat_id)
if lang == chat["lang"]:
return "lang_already_used"
await self.db.execute(
"update chat_db set lang = :lang where chat_id = :chat_id",
{"lang": lang, "chat_id": chat_id},
)
return "lang_changed"
async def set_quality(self, chat_id: int, quality: str):
chat = await self.get_chat(chat_id)
if quality == chat["quality"]:
return "quality_already_used"
await self.db.execute(
"update chat_db set quality = :quality where chat_id = :chat_id",
{"quality": quality, "chat_id": chat_id},
)
return "quality_changed"
async def set_admin_mode(self, chat_id: int, admin_mode: bool):
chat = await self.get_chat(chat_id)
if admin_mode == chat["admin_only"]:
return "admin_mode_already_used"
await self.db.execute(
"update chat_db set admin_only = :admin_only where chat_id = :chat_id",
{"admin_only": admin_mode, "chat_id": chat_id},
)
return "admin_mode_changed"
async def set_gcast_type(self, chat_id: int, gcast_type: str):
chat = await self.get_chat(chat_id)
if gcast_type == chat["gcast_type"]:
return "gcast_type_already_used"
await self.db.execute(
"update chat_db set gcast_type = :gcast_type where chat_id = :chat_id",
{"gcast_type": gcast_type, "chat_id": chat_id},
)
return "gcast_type_changed"
async def set_del_cmd_mode(self, chat_id: int, del_cmd_mode: bool):
chat = await self.get_chat(chat_id)
if del_cmd_mode == chat["del_cmd_mode"]:
return "del_cmd_mode_already_used"
await self.db.execute(
"update chat_db set del_cmd_mode = :del_cmd where chat_id = :chat_id",
{"del_cmd": del_cmd_mode, "chat_id": chat_id},
)
return "del_cmd_mode_changed"
async def set_player_mode(self, chat_id: int, player_mode: bool):
chat = await self.get_chat(chat_id)
if player_mode == chat["player_mode"]:
return "player_mode_already_used"
await self.db.execute(
"update chat_db set player_mode = :player_mode where chat_id = :chat_id",
{"player_mode": player_mode, "chat_id": chat_id},
)
return "player_mode_changed"
async def set_duration_limit(self, chat_id: int, duration_limit: int):
chat = await self.get_chat(chat_id)
if duration_limit == chat["duration_limit"]:
return "duration_limit_already_used"
await self.db.execute(
"update chat_db set duration_limit = :duration_limit where chat_id = :chat_id",
{"duration_limit": duration_limit, "chat_id": chat_id},
)
return "duration_limit_changed"
async def get_stats(self):
chats = list(await self.db.fetch_all("select * from chat_db"))
group = pm = 0
results = self._get_chats(chats)
for chat in results:
chat_id = str(chat["chat_id"])
if chat_id.startswith("-"):
group += 1
else:
pm += 1
return pm, group
chat_db = ChatDB()
|
StarcoderdataPython
|
6629955
|
<reponame>plocandido/docinfrati<gh_stars>100-1000
import logging
from typing import Callable, Dict
from lunr.exceptions import BaseLunrException
from lunr.token import Token
log = logging.getLogger(__name__)
class Pipeline:
"""lunr.Pipelines maintain a list of functions to be applied to all tokens
in documents entering the search index and queries ran agains the index.
"""
registered_functions: Dict[str, Callable] = {}
def __init__(self):
self._stack = []
def __len__(self):
return len(self._stack)
def __repr__(self):
return '<Pipeline stack="{}">'.format(",".join(fn.label for fn in self._stack))
# TODO: add iterator methods?
@classmethod
def register_function(cls, fn, label=None):
"""Register a function with the pipeline."""
label = label or fn.__name__
if label in cls.registered_functions:
log.warning("Overwriting existing registered function %s", label)
fn.label = label
cls.registered_functions[fn.label] = fn
@classmethod
def load(cls, serialised):
"""Loads a previously serialised pipeline."""
pipeline = cls()
for fn_name in serialised:
try:
fn = cls.registered_functions[fn_name]
except KeyError:
raise BaseLunrException(
"Cannot load unregistered function {}".format(fn_name)
)
else:
pipeline.add(fn)
return pipeline
def add(self, *args):
"""Adds new functions to the end of the pipeline.
Functions must accept three arguments:
- Token: A lunr.Token object which will be updated
- i: The index of the token in the set
- tokens: A list of tokens representing the set
"""
for fn in args:
self.warn_if_function_not_registered(fn)
self._stack.append(fn)
def warn_if_function_not_registered(self, fn):
try:
return fn.label in self.registered_functions
except AttributeError:
log.warning(
'Function "{}" is not registered with pipeline. '
"This may cause problems when serialising the index.".format(
getattr(fn, "label", fn)
)
)
def after(self, existing_fn, new_fn):
"""Adds a single function after a function that already exists in the
pipeline."""
self.warn_if_function_not_registered(new_fn)
try:
index = self._stack.index(existing_fn)
self._stack.insert(index + 1, new_fn)
except ValueError as e:
raise BaseLunrException("Cannot find existing_fn") from e
def before(self, existing_fn, new_fn):
"""Adds a single function before a function that already exists in the
pipeline.
"""
self.warn_if_function_not_registered(new_fn)
try:
index = self._stack.index(existing_fn)
self._stack.insert(index, new_fn)
except ValueError as e:
raise BaseLunrException("Cannot find existing_fn") from e
def remove(self, fn):
"""Removes a function from the pipeline."""
try:
self._stack.remove(fn)
except ValueError:
pass
def run(self, tokens):
"""Runs the current list of functions that make up the pipeline against
the passed tokens."""
for fn in self._stack:
results = []
for i, token in enumerate(tokens):
# JS ignores additional arguments to the functions but we
# force pipeline functions to declare (token, i, tokens)
# or *args
result = fn(token, i, tokens)
if not result:
continue
if isinstance(result, (list, tuple)): # simulate Array.concat
results.extend(result)
else:
results.append(result)
tokens = results
return tokens
def run_string(self, string, metadata=None):
"""Convenience method for passing a string through a pipeline and
getting strings out. This method takes care of wrapping the passed
string in a token and mapping the resulting tokens back to strings."""
token = Token(string, metadata)
return [str(tkn) for tkn in self.run([token])]
def reset(self):
self._stack = []
def serialize(self):
return [fn.label for fn in self._stack]
|
StarcoderdataPython
|
6657362
|
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression, LogisticRegression
from justcause.learners import (
CausalForest,
DoubleRobustEstimator,
DragonNet,
PSWEstimator,
RLearner,
SLearner,
TLearner,
XLearner,
)
from justcause.metrics import pehe_score
def test_slearner(ihdp_data):
rep = ihdp_data[0]
x, t, y = rep.np.X, rep.np.t, rep.np.y
slearner = SLearner(LinearRegression())
slearner.fit(x, t, y)
pred = slearner.predict_ite(x, t, y)
assert len(pred) == len(t)
assert str(slearner) == "SLearner(learner=LinearRegression)"
true_ate = np.mean(rep["ite"].values)
ate = slearner.estimate_ate(x, t, y)
assert abs(ate - true_ate) < 0.2
def test_tlearner(ihdp_data):
"""Construct T-Learner in different ways and ensure it works"""
rep = ihdp_data[0]
x, t, y = rep.np.X, rep.np.t, rep.np.y
tlearner = TLearner(LinearRegression())
tlearner.fit(x, t, y)
pred = tlearner.predict_ite(x, t, y)
assert len(pred) == len(t)
assert (
str(tlearner) == "TLearner(control=LinearRegression, "
"treated=LinearRegression)"
)
true_ate = np.mean(rep["ite"].values)
ate = tlearner.estimate_ate(x, t, y)
assert abs(ate - true_ate) < 0.2
tlearner = TLearner(
learner_c=LinearRegression(), learner_t=RandomForestRegressor(random_state=42)
)
tlearner.fit(x, t, y)
pred = tlearner.predict_ite(x, t, y)
assert len(pred) == len(t)
assert (
str(tlearner) == "TLearner(control=LinearRegression, "
"treated=RandomForestRegressor)"
)
ate = tlearner.estimate_ate(x, t, y)
assert abs(ate - true_ate) < 0.2
tlearner = TLearner(LinearRegression())
tlearner.fit(x, t, y)
pred = tlearner.predict_ite(x, t, y)
assert len(pred) == len(t)
def test_rlearner(ihdp_data):
rep = ihdp_data[0]
x, t, y = rep.np.X, rep.np.t, rep.np.y
# with RF explicitly
rlearner = RLearner(RandomForestRegressor())
rlearner.fit(x, t, y)
pred = rlearner.predict_ite(x)
assert len(pred) == len(t)
# With default
rlearner = RLearner()
rlearner.fit(x, t, y)
pred = rlearner.predict_ite(x)
assert len(pred) == len(t)
assert (
str(rlearner) == "RLearner(outcome=LinearRegression, effect=LinearRegression)"
)
def test_xlearner(ihdp_data):
rep = ihdp_data[0]
x, t, y = rep.np.X, rep.np.t, rep.np.y
true_ite = rep["ite"].values
# With LinearRegression
xlearner = XLearner(LinearRegression())
xlearner.fit(x, t, y)
pred = xlearner.predict_ite(x, t, y)
assert len(pred) == len(t)
# With default
xlearner = XLearner(LinearRegression())
xlearner.fit(x, t, y)
pred = xlearner.predict_ite(x, t, y)
assert len(pred) == len(t)
assert abs(pehe_score(true_ite, pred) - 0.5) < 0.2
pred_ate = xlearner.estimate_ate(x, t, y)
true_ate = np.mean(rep["ite"].values)
assert abs(pred_ate - true_ate) < 0.2
def test_causalforest(ihdp_data, grf):
rep = ihdp_data[0]
x, t, y = rep.np.X, rep.np.t, rep.np.y
cf = CausalForest()
cf.fit(x, t, y)
pred_ate = cf.estimate_ate(x, t, y)
true_ate = np.mean(rep["ite"].values)
assert abs(pred_ate - true_ate) < 0.2
# Try passing keyword arguments to the R implenetation
cf = CausalForest(num_trees=50, alpha=0.1, honesty=False)
cf.fit(x, t, y)
pred_ate = cf.estimate_ate(x, t, y)
true_ate = np.mean(rep["ite"].values)
assert abs(pred_ate - true_ate) < 0.2
def test_dre(ihdp_data):
rep = ihdp_data[0]
x, t, y = rep.np.X, rep.np.t, rep.np.y
dre = DoubleRobustEstimator(LogisticRegression(random_state=42))
ate = dre.estimate_ate(x, t, y)
true_ate = np.mean(rep["ite"].values)
assert abs(ate - true_ate) < 0.3
# With default learner
ate = dre.estimate_ate(x, t, y)
assert abs(ate - true_ate) < 0.4
def test_psw(ihdp_data):
rep = ihdp_data[0]
x, t, y = rep.np.X, rep.np.t, rep.np.y
psw = PSWEstimator(LogisticRegression(random_state=42))
ate = psw.estimate_ate(x, t, y)
true_ate = np.mean(rep["ite"].values)
assert abs(ate - true_ate) < 1
psw = PSWEstimator()
ate = psw.estimate_ate(x, t, y)
assert ate > 0
def test_dragonnet(ihdp_data):
rep = ihdp_data[0]
x, t, y = rep.np.X, rep.np.t, rep.np.y
dragon = DragonNet()
dragon.fit(x, t, y)
ate = np.mean(dragon.predict_ite(x, t, y))
true_ate = np.mean(rep["ite"].values)
assert abs(ate - true_ate) < 0.5
|
StarcoderdataPython
|
6686679
|
import rbnf.zero as ze
import sys, os
from rbnf.easy import build_parser
from Redy.Tools.PathLib import Path
pwd = Path(__file__).parent().__str__()
sys.path.append(pwd)
os.chdir(pwd)
def test_predicate():
ze_exp = ze.compile(
"""
[python] import predicate_helpers.[*]
lexer_helper := R'.'
a ::= (_{is_ok})+
b ::= (_+){not_ok}
""",
use='a')
assert len(ze_exp.match("123234").result.item) == 2
ze_exp = ze.compile(
"""
[python] import predicate_helpers.[*]
lexer_helper := R'.'
a ::= (_{is_ok})+
b ::= (_+){not_ok}
""",
use='b')
assert ze_exp.match("123234").result == None
print(ze_exp.dumps())
|
StarcoderdataPython
|
4871959
|
import numpy as np
import torch
import argparse
import json
import os
import traceback
from tqdm import tqdm
import gym
import pickle
from tensorboardX import SummaryWriter
# TODO: this is ugly as hell but python sucks sometimes, should try to put everything in packages?
import os,sys,inspect
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
from .rl_model import Policy
from .agents import PPOAgent
from .runners import PPORunner
from model.envs.envs import BillardsEnv, AvoidanceTask, MaxDistanceTask, MinDistanceTask
from model.main import main as load_vin
def parse_args(args):
parser = argparse.ArgumentParser(description='RL')
parser.add_argument(
'--experiment_id',
help='name of the experiment')
parser.add_argument(
'--env', default='billards',
help='environment to use: billards')
parser.add_argument(
'--task', default='avoidance',
help='task to learn if training: avoidance | maxdist | mindist')
parser.add_argument(
'--seed', type=int, default=0,
help='random seed')
parser.add_argument(
'--lr', type=float, default=2e-4,
help='learning rate (default: 1e-4)')
parser.add_argument(
'--clip-param', type=float, default=0.1,
help='ppo clip parameter (default: 0.2)')
parser.add_argument(
'--num-steps', type=int, default=128,
help='number of forward steps in PPO (default: 128)')
parser.add_argument(
'--batch-size', type=int, default=32,
help='number of trajectories sampled per batch (default: 32)')
parser.add_argument(
'--num-env-steps', type=int, default=10000000,
help='number of total environment steps (default: 10000000)')
parser.add_argument(
'--num-ppo-mb', type=int, default=32,
help='number of batches for ppo (default: 32)')
parser.add_argument(
'--num-ppo-epochs', type=int, default=4,
help='number of epochs for ppo (default: 4)')
parser.add_argument(
'--save-interval', type=int, default=100,
help='save interval, one save per n batches (default: 100)')
parser.add_argument(
'--value-loss-coef', type=float, default=0.5,
help='value loss coefficient (default: 0.5)')
parser.add_argument(
'--entropy-coef', type=float, default=0.01,
help='entropy term coefficient (default: 0.01)')
parser.add_argument(
'--eps', type=float, default=1e-5,
help='Adam optimizer epsilon (default: 1e-5)')
parser.add_argument(
'--max-grad-norm', type=float, default=40,
help='max norm of gradients (default: 40)')
parser.add_argument(
'--summary-dir', type=str, default='./summary/',
help='directory to save agent tb summaries and args (default: ./summary/)')
parser.add_argument(
'--model-dir', type=str, default='./models/',
help='directory to save models (default: ./models/)')
parser.add_argument(
'--hidden-size', type=int, default=512,
help='Hidden-size in policy head')
parser.add_argument(
'--gym', action='store_true', default=False,
help='whether or not to use gym as environment')
parser.add_argument(
'--use-states', action='store_true', default=False,
help='use states instead of images when true')
parser.add_argument(
'--use-deep-layers', action='store_true', default=False,
help='use more linear layers in head')
parser.add_argument(
'--num-balls', type=int, default=3,
help='Number of balls to simulate in the environment (default: 3)')
parser.add_argument(
'--radius', type=float, default=1.,
help='Radius of each ball in the environment (default: 1.)')
parser.add_argument(
'--gran', type=int, default=2,
help='Granularity of the simulation in the environment (default: 2)')
parser.add_argument(
'--dt', type=float, default=1.,
help='dt of the simulation computed in each time step. Lower dt leads to a lower percieved env speed (default: 1.)')
parser.add_argument(
'--action-force', type=float, default=.3,
help='action force of the task simulation (default: .3)')
parser.add_argument(
'--num-stacked-frames', type=int, default=4,
help='Number of frames stacked as inputs (default: 4)')
parser.add_argument(
'--use-grid', action='store_true', default=False,
help='whether or not to add grid information to visual input of network')
parser.add_argument(
'--use-pretrained-model', action='store_true', default=False,
help='use the pretrained model as world model'
)
args = parser.parse_args(args)
assert args.task in ['avoidance', 'maxdist', 'mindist']
assert not(args.gym and args.use_states)
assert not(args.gym and args.use_pretrained_model)
assert not(args.use_states and args.use_pretrained_model)
if args.gym:
pass
else:
assert args.env in ['billards']
return args
def load_and_encode(path, state_model, device):
with open(path, 'rb') as file:
data = pickle.load(file)
X = torch.tensor(data['X']).float().to(device)
actions = torch.tensor(data['action']).float().to(device)
encoded_state_data = torch.zeros(X.shape[0], X.shape[1] - 8, 3, 18)
all_appearances = torch.zeros(X.shape[0], X.shape[1] - 8, 3, 3)
for step in range(len(X[0]) - 8):
_, init_state, _ = state_model(X[:, step:step+8].permute(0, 1, 4, 3, 2), 0, action=actions[:, step:step+8],
pretrain=False, future=None)
red = torch.argmax(init_state['obj_appearances'][:, -1][:, :, 0], dim=1).view(-1).detach().cpu().numpy() # shape 1000, 3, 3
others = lambda i: list(set([0, 1, 2]) - set([i]))
idxs = [[i, *others(i)] for i in red]
states = init_state['z'][:, -1]
appearances = init_state['obj_appearances'][:, -1]
sorted_states = [state[idx] for idx, state in zip(idxs, states)]
sorted_apps = [state[idx] for idx, state in zip(idxs, appearances)]
sorted_states = torch.stack(sorted_states)
sorted_apps = torch.stack(sorted_apps)
encoded_state_data[:, step] = sorted_states
all_appearances[:, step] = sorted_apps
return encoded_state_data.view(-1, *encoded_state_data.shape[2:]), all_appearances.view(-1, *all_appearances.shape[2:])
def save_args(args, path):
with open(os.path.join(path,'args.json'), 'w') as fp:
print(f'Saved Args to {os.path.join(path,"args.json")}')
json.dump(vars(args), fp, sort_keys=True, indent=4)
def load_model():
user = 'user'
restore = '/home/{}/Documents/physics/rl/good_rl_run/'.format(user)
extras = {'nolog':True,
'traindata': '/home/{}/share/data/billards_w_actions_train_data.pkl'.format(user),
'testdata': '/home/{}/share/data/billards_w_actions_test_data.pkl'.format(user)}
trainer = load_vin(extras=extras, restore=restore)
model = trainer.stove
return model
def main(args):
args = parse_args(args)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.gym:
try:
env = gym.make(args.env)
gym_action_size = env.action_space.n
except:
print("You did not enter a valid gym control environment; environments must have discrete actions")
raise NotImplementedError
task = None
else:
greyscale = False
if args.env == 'billards':
env = BillardsEnv(n=args.num_balls, r=args.radius, granularity=args.gran, t=args.dt)
else:
raise NotImplementedError
if args.task == 'avoidance':
task = AvoidanceTask(env, num_stacked=args.num_stacked_frames, action_force=args.action_force, greyscale=greyscale)
elif args.task == 'maxdist':
task = MaxDistanceTask(env, num_stacked=args.num_stacked_frames, action_force=args.action_force, greyscale=greyscale)
elif args.task == 'mindist':
task = MinDistanceTask(env, num_stacked=args.num_stacked_frames, action_force=args.action_force, greyscale=greyscale)
else:
raise NotImplementedError
model_path = os.path.join(args.model_dir, args.experiment_id)
summary_path = os.path.join(args.summary_dir, args.experiment_id)
os.makedirs(model_path, exist_ok=True)
os.makedirs(summary_path, exist_ok=True)
save_args(args, summary_path)
if args.gym:
actor_critic = Policy(env.observation_space.shape[0], gym_action_size, hidden_size=32, base='control')
elif args.use_states:
actor_critic = Policy((env.get_state_shape()[0] * env.get_state_shape()[1]), task.get_action_space(), hidden_size=64, base='control')
elif args.use_pretrained_model:
actor_critic = Policy((env.get_state_shape()[0] * env.get_state_shape()[1]), task.get_action_space(), hidden_size=64, base='control')
else:
actor_critic = Policy(task.get_framebuffer_shape(), task.get_action_space(),
hidden_size=args.hidden_size, stacked_frames=args.num_stacked_frames,
use_grid=args.use_grid, use_deep_layers=args.use_deep_layers)
agent = PPOAgent(actor_critic, args.clip_param, args.num_ppo_epochs,
args.num_ppo_mb, args.value_loss_coef, args.entropy_coef,
args.lr, args.eps, args.max_grad_norm)
runner = PPORunner(env=env, task=task, device='cuda',
summary_path=summary_path,
agent=agent, actor_critic=actor_critic,
num_steps=args.num_steps, batch_size=args.batch_size, discount=0.95)
num_batches = int(args.num_env_steps) // args.num_steps // args.batch_size
try:
if args.gym:
run_method = runner.run_gym_batch
elif args.use_states:
run_method = runner.run_batch_states
elif args.use_pretrained_model:
run_method = runner.run_pretrained_batch
state_model = load_model()
else:
run_method = runner.run_batch
user = 'user'
for i in tqdm(range(num_batches)):
if run_method.__name__ == 'run_pretrained_batch':
path = '/home/{}/share/data/billards_w_actions_train_data.pkl'.format(user)
encoded_state_data, appearences = load_and_encode(path, state_model, 'cuda')
run_method(state_model, encoded_state_data, appearences)
else:
run_method()
if i % args.save_interval == 0:
torch.save(actor_critic, os.path.join(model_path, 'model' + str(i) + ".pt"))
except:
torch.save(actor_critic, os.path.join(model_path, 'model' + str(i) + ".pt"))
print(traceback.format_exc())
if not os.path.isfile(os.path.join(model_path, 'model' + str(i) + ".pt")):
torch.save(actor_critic, os.path.join(model_path, 'model' + str(i) + ".pt"))
|
StarcoderdataPython
|
1879553
|
# Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pdo.common.crypto as crypto
import logging
import sys
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARN)
logger = logging.getLogger()
logging.basicConfig(level=logging.WARN)
# TEST ECDSA
try:
esk = crypto.SIG_PrivateKey()
esk.Generate()
epk = esk.GetPublicKey()
except Exception as exc:
logger.error("ERROR: Signature Private and Public keys generation test failed: ", exc)
sys.exit(-1)
logger.debug("Signature Private and Public keys generation test successful!")
try:
eskString = esk.Serialize()
epkString = epk.Serialize()
hepkString = epk.SerializeXYToHex()
esk1 = crypto.SIG_PrivateKey(eskString)
epk1 = crypto.SIG_PublicKey(epkString)
eskString1 = esk1.Serialize()
epkString1 = epk1.Serialize()
esk2 = crypto.SIG_PrivateKey()
esk2.Generate()
epk2 = crypto.SIG_PublicKey(esk2)
eskString = esk.Serialize()
esk2.Deserialize(eskString1)
epk2.Deserialize(epkString1)
eskString2 = esk2.Serialize()
epkString2 = epk2.Serialize()
except Exception as exc:
logger.error("ERROR: Signature Private and Public keys serialize/deserialize test failed: ", exc)
sys.exit(-1)
logger.debug("Signature Private and Public keys serialize/deserialize test successful!")
try:
esk1.Deserialize(epkString1)
logger.error("ERROR: Signature invalid private key deserialize test failed: not detected.")
sys.exit(-1)
except Exception as exc:
if (type(exc) == ValueError):
logger.debug("Signature invalid private key deserialize test successful!")
else:
logger.error("ERROR: Signature invalid private key deserialize test failed: ", exc)
sys.exit(-1)
try:
epk1.Deserialize(eskString1)
logger.error("ERROR: Signature invalid public key deserialize test failed: not detected.")
sys.exit(-1)
except Exception as exc:
if (type(exc) == ValueError):
logger.debug("Signature invalid public key deserialize test successful!")
else:
logger.error("ERROR: Signature invalid public key deserialize test failed: ", exc)
sys.exit(-1)
try:
msg = b'A message!'
sig = esk.SignMessage(msg)
res = epk.VerifySignature(msg, sig)
except Exception as exc:
logger.error("ERROR: Signature creation and verification test failed: ", exc)
sys.exit(-1)
if (res == 1):
logger.debug("Signature creation and verification test successful!")
else:
logger.error("ERROR: Signature creation and verification test failed: signature does not verify.")
exit(-1)
try:
res = epk.VerifySignature(msg, bytes("invalid signature", 'ascii'))
except Exception as exc:
logger.error("ERROR: Invalid signature detection test failed: ", exc)
sys.exit(-1)
if (res != 1):
logger.debug("Invalid signature detection test successful!")
else:
logger.error("ERROR: Invalid signature detection test failed.")
exit(-1)
# TEST RSA
try:
rsk = crypto.PKENC_PrivateKey()
rsk.Generate()
rpk = crypto.PKENC_PublicKey(rsk)
except Exception as exc:
logger.error("ERROR: Asymmetric encryption Private and Public keys generation test failed: ", exc)
sys.exit(-1)
logger.debug("Asymmetric encryption Private and Public keys generation test successful!")
try:
rskString = rsk.Serialize()
rpkString = rpk.Serialize()
rsk1 = crypto.PKENC_PrivateKey(rskString)
rpk1 = crypto.PKENC_PublicKey(rpkString)
rskString1 = rsk1.Serialize()
rpkString1 = rpk1.Serialize()
rsk2 = crypto.PKENC_PrivateKey()
rsk2.Generate();
rpk2 = crypto.PKENC_PublicKey(rsk2)
rsk2.Deserialize(rskString1)
rpk2.Deserialize(rpkString1)
rskString2 = rsk2.Serialize()
rpkString2 = rpk2.Serialize()
except Exception as exc:
logger.error("ERROR: Asymmetric encryption Private and Public keys serialize/deserialize test failed: ", exc)
sys.exit(-1)
try:
rsk1.Deserialize(rpkString1)
logger.error("error: Asymmetric encryption invalid private key deserialize test failed: not detected.")
sys.exit(-1)
except Exception as exc:
if (type(exc) == ValueError):
logger.debug("Asymmetric encryption invalid private key deserialize test successful!")
else:
logger.error("error: Asymmetric encryption invalid private key deserialize test failed: ", exc)
sys.exit(-1)
try:
rpk1.Deserialize(rskString1)
logger.error("error: Asymmetric encryption invalid public key deserialize test failed: not detected.")
sys.exit(-1)
except Exception as exc:
if (type(exc) == ValueError):
logger.debug("Asymmetric encryption invalid public key deserialize test successful!")
else:
logger.error("error: Asymmetric encryption invalid public key deserialize test failed: ", exc)
sys.exit(-1)
try:
ciphertext = rpk.EncryptMessage(msg)
plaintext = rsk.DecryptMessage(ciphertext)
except Exception as exc:
logger.error("ERROR: Asymmetric encryption/decryption test failed: ", exc)
sys.exit(-1)
if (bytearray(plaintext) == bytearray(msg)):
logger.debug("Asymmetric encryption/decryption test successful!")
else:
logger.error("ERROR: Asymmetric encryption/decryption failed.\n")
exit(-1)
#TEST AES-GCM
try:
iv = crypto.SKENC_GenerateIV()
except Exception as exc:
logger.error("ERROR: Symmetric encryption iv generation test failed: ", exc)
sys.exit(-1)
try:
key = crypto.SKENC_GenerateKey()
except Exception as exc:
logger.error("ERROR: Symmetric encryption key generation test failed: ", exc)
sys.exit(-1)
try:
crypto.SKENC_EncryptMessage(iv, None, msg)
logger.error("ERROR: Symmetric encryption invalid key detection test failed: not detected.")
sys.exit(-1)
except Exception as exc:
if (type(exc) == ValueError):
logger.debug("Symmetric encryption invalid key detection test successful!")
else:
logger.error("ERROR: Symmetric encryption invalid key detection test failed: ", exc)
sys.exit(-1)
try:
crypto.SKENC_EncryptMessage(None, key, msg)
logger.error("ERROR: Symmetric encryption invalid iv detection test failed: not detected.")
sys.exit(-1)
except Exception as exc:
if (type(exc) == ValueError):
logger.debug("Symmetric encryption invalid iv detection test successful!")
else:
logger.error("ERROR: Symmetric encryption invalid iv detection test failed: ", exc)
sys.exit(-1)
try:
ciphertext = crypto.SKENC_EncryptMessage(key, iv, msg)
crypto.SKENC_DecryptMessage(key, iv, ciphertext)
except Exception as exc:
logger.error("ERROR: Symmetric encryption test failed: ", exc)
sys.exit(-1)
if (bytearray(plaintext) == bytearray(msg)):
logger.debug("Symmetric encryption/decryption test successful!\n")
else:
logger.errpr("ERROR:Symmetric encryption/decryption test failed: decrypted text and plaintext mismatch.\n")
exit(-1)
c = list(ciphertext)
c[0] = (c[0] + 1) % 256 # Make sure it stays a byte or swig might fail find the correct C++ function below
ciphertext = tuple(c)
try:
crypto.SKENC_DecryptMessage(key, iv, ciphertext)
logger.error("ERROR: Symmetric decryption ciphertext tampering detection test failed: not detected.")
sys.exit(-1)
except Exception as exc:
if (type(exc) == ValueError):
logger.debug("Symmetric decryption ciphertext tampering detection test successful!")
else:
logger.error("ERROR: Symmetric decryption ciphertext tampering detection test failed: ", exc)
sys.exit(-1)
try:
crypto.SKENC_DecryptMessage(iv, iv, ciphertext)
logger.error("ERROR: Symmetric decryption invalid key detection test failed: not detected.")
sys.exit(-1)
except Exception as exc:
if (type(exc) == ValueError):
logger.debug("Symmetric decryption invalid key detection test successful!")
else:
logger.error("ERROR: Symmetric decryption invalid key detection test failed: ", exc)
sys.exit(-1)
try:
crypto.SKENC_DecryptMessage(plaintext, key, ciphertext)
logger.error("ERROR: Symmetric decryption invalid iv detection test failed: not detected.")
sys.exit(-1)
except Exception as exc:
if (type(exc) == ValueError):
logger.debug("Symmetric decryption invalid iv detection test successful!")
else:
logger.error("ERROR: Symmetric decryption invalid iv detection test failed: ", exc)
sys.exit(-1)
try:
crypto.SKENC_EncryptMessage(None, ciphertext)
logger.error("ERROR: Symmetric encryption invalid key detection test failed: not detected.")
sys.exit(-1)
except Exception as exc:
if (type(exc) == ValueError):
logger.debug("Symmetric encryption invalid key detection test successful!")
else:
logger.error("ERROR: Symmetric encryption invalid key detection test failed: ", exc)
sys.exit(-1)
try:
crypto.SKENC_EncryptMessage(None, key, ciphertext)
logger.error("ERROR: Symmetric encryption invalid iv detection test failed: not detected.")
sys.exit(-1)
except Exception as exc:
if (type(exc) == ValueError):
logger.debug("Symmetric encryption invalid iv detection test successful!")
else:
logger.error("ERROR: Symmetric encryption invalid iv detection test failed: ", exc)
sys.exit(-1)
#Random IV
try:
ciphertext = crypto.SKENC_EncryptMessage(key, msg)
crypto.SKENC_DecryptMessage(key, ciphertext)
except Exception as exc:
logger.error("ERROR: Symmetric encryption (random IV) test failed: ", exc)
sys.exit(-1)
if (bytearray(plaintext) == bytearray(msg)):
logger.debug("Symmetric encryption (random IV)/decryption test successful!\n")
else:
logger.errpr("ERROR:Symmetric encryption (random IV)/decryption test failed: decrypted text and plaintext mismatch.\n")
exit(-1)
c = list(ciphertext)
c[0] = c[0] + 1
ciphertext = tuple(c)
try:
crypto.SKENC_DecryptMessage(key, ciphertext)
logger.error("ERROR: Symmetric decryption (random IV) ciphertext tampering detection test failed: not detected.")
sys.exit(-1)
except Exception as exc:
if (type(exc) == ValueError):
logger.debug("Symmetric decryption (random IV) ciphertext tampering detection test successful!")
else:
logger.error("ERROR: Symmetric decryption (random IV) ciphertext tampering detection test failed: ", exc)
sys.exit(-1)
try:
crypto.SKENC_DecryptMessage(iv, ciphertext)
logger.error("ERROR: Symmetric decryption (random IV) invalid key detection test failed: not detected.")
sys.exit(-1)
except Exception as exc:
if (type(exc) == ValueError):
logger.debug("Symmetric decryption (random IV) invalid key detection test successful!")
else:
logger.error("ERROR: Symmetric decryption (random IV) invalid key detection test failed: ", exc)
sys.exit(-1)
try:
iv = crypto.SKENC_GenerateIV("A message")
except Exception as exc:
logger.error("ERROR: Symmetric encryption deterministic iv generation test failed: ", exc)
sys.exit(-1)
logger.debug("Symmetric encryption deterministic iv generation test successful!")
try:
rand = crypto.random_bit_string(16)
except Exception as exc:
logger.error("ERROR: Random number generation failed: ", exc)
sys.exit(-1)
logger.debug("Random number generation successful!")
hash = crypto.compute_message_hash(rand)
bhash = bytearray(hash)
b64hash = crypto.byte_array_to_base64(bhash)
logger.debug("Hash computed!")
crypto.base64_to_byte_array(b64hash)
logger.debug("SWIG CRYPTO_WRAPPER TEST SUCCESSFUL!")
sys.exit(0)
|
StarcoderdataPython
|
6439221
|
<filename>test/tests/name_mangling.py
# Simple test:
class MyClass(object):
__a = 1
print sorted(locals().items()) # This should contain the mangled name
print hasattr(MyClass, "__a")
print hasattr(MyClass, "_MyClass__a")
# Names in functions get mangled:
class MyClass(object):
def __init__(self):
# attributes get mangled:
self.__x = 1
def f(self):
print __g
__local = 1
print sorted(locals().keys()) # This should contain the mangled name
print __k
del __k # this should delete the mangled name
print sorted(locals().keys())
print self.__x
__g = 5
_MyClass__g = 6
try:
MyClass().f()
except NameError, e:
print e
print MyClass()._MyClass__x
# This includes function arguments!
class MyClass(object):
def f(self, __x):
print __x
def g(self, *__args, **__kw):
print __args, __kw
MyClass().f(5)
try:
MyClass().f(__x=5)
except TypeError, e:
print e
MyClass().f(_MyClass__x=6)
MyClass().g(5, a=5)
# Function names get mangled
class MyClass(object):
def __f(self):
pass
# But they still keep their old name for display:
print MyClass._MyClass__f.im_func.__name__
# And classdefs get mangled too I guess:
class MyClass(object):
class __InnerClass(object):
pass
# And imports do too??
class MyClass(object):
try:
import __foo
except ImportError, e:
print e
class MyClass(object):
try:
import __foo as __bar
except ImportError, e:
print e
class MyClass(object):
import sys as __bar
print __bar
class MyClass(object):
try:
from __foo import __bar
except ImportError, e:
print e
#TODO enable this once we support `import *` in functions
#class MyClass(object):
# try:
# from __foo import *
# except ImportError, e:
# print e
class MyClass(object):
try:
from sys import __bar
except ImportError, e:
print e
class MyClass(object):
try:
# Except if it's a dotted name:
import __foo.__bar
except ImportError, e:
print e.message
# names inside classes with mangled names don't get the mangled class name:
class MyClass(object):
class __InnerClass(object):
__inner_inner = "hi"
print sorted(locals().items())
print MyClass._MyClass__InnerClass._InnerClass__inner_inner
# This class gets accessed through the mangled name, but its stored name is still the original name.
print MyClass._MyClass__InnerClass
class MyClass(object):
def f(self):
self.__x = 1
def inner():
# Names inner functions also get mangled:
return self.__x
return inner
print MyClass().f()()
# Eval not supported:
"""
class MyClass(object):
def f(self):
# Sanity check:
y = 2
print eval("y")
__x = 1
try:
# Names do not get mangled inside of an eval:
print eval("__x")
except NameError, e:
print e
MyClass().f()
"""
# Things get mangled in different types of sub-scopes: lambdas, generator expressions, generators:
class MyClass(object):
def f(self):
__x = 2
def g():
yield __x
print list(g())
print list((__k * __x for __k in xrange(5)))
print map(lambda x: x ** __x, range(5))
print [x - __x for x in xrange(4)]
MyClass().f()
_MyClass__x = 0
class MyClass(object):
def f(self):
global __x
__x = 1
MyClass().f()
print "_MyClass__x:", _MyClass__x
# Random tests from looking at the behavior of _Py_Mangle:
# Leading underscores on the class name get stripped:
class _MyClass(object):
__a = 3
print _MyClass._MyClass__a
# But if the class is all underscores, it doesn't mangle anything
# (weird corner case but makes sense, since that new name would be hard
# to reference)
class ___(object):
__a = 2
print ___.__a
# this fails if the type analysis does not mangle the function name
class C():
def f():
def __f2():
print "hi"
if 1:
pass
f()
|
StarcoderdataPython
|
4949169
|
import re
from datetime import datetime
# function for parsing templates
def replace_var(text: str, variables: dict, row: dict):
text = text
for i in variables.keys():
text = re.sub(f"<{i}>", row[variables[i]], text)
return text
# string format of current time
def now():
return datetime.now().strftime("%d/%m/%Y, %H:%M:%S")
# default variables for gmail env
gmailvars = """\nSMTP_HOST='smtp.gmail.com'
SMTP_PORT='587'
SENDER_EMAIL='<EMAIL>'
SENDER_PASSWORD='<PASSWORD>'
"""
# template message
template_message = """Dear <var1>,
This is a test message. Please change this template according to your will before sending your <var2> mails.
Thank you
Regards,
<var3>
"""
|
StarcoderdataPython
|
4900422
|
<reponame>hughperkins/ShapeWorld<gh_stars>0
from random import choice, random
from shapeworld.captions import Attribute
from shapeworld.captioners import WorldCaptioner
class RegularAttributeCaptioner(WorldCaptioner):
def __init__(
self,
pragmatical_redundancy_rate=1.0,
pragmatical_tautology_rate=0.0,
logical_redundancy_rate=1.0,
logical_tautology_rate=0.0,
logical_contradiction_rate=0.0,
existing_attribute_rate=1.0
):
super(RegularAttributeCaptioner, self).__init__(
internal_captioners=(),
pragmatical_redundancy_rate=pragmatical_redundancy_rate,
pragmatical_tautology_rate=pragmatical_tautology_rate,
logical_redundancy_rate=logical_redundancy_rate,
logical_tautology_rate=logical_tautology_rate,
logical_contradiction_rate=logical_contradiction_rate
)
self.existing_attribute_rate = existing_attribute_rate
def set_realizer(self, realizer):
if not super(RegularAttributeCaptioner, self).set_realizer(realizer):
return False
self.shapes = list(realizer.attributes.get('shape', ()))
self.colors = list(realizer.attributes.get('color', ()))
self.textures = list(realizer.attributes.get('texture', ()))
assert self.shapes or self.colors or self.textures
return True
def rpn_length(self):
return 1
def rpn_symbols(self):
return super(RegularAttributeCaptioner, self).rpn_symbols() | \
{'{}-{}-{}'.format(Attribute.__name__, 'shape', value) for value in self.shapes} | \
{'{}-{}-{}'.format(Attribute.__name__, 'color', value) for value in self.colors} | \
{'{}-{}-{}'.format(Attribute.__name__, 'texture', value) for value in self.textures}
def sample_values(self, mode, predication):
if not super(RegularAttributeCaptioner, self).sample_values(mode=mode, predication=predication):
return False
attributes = list()
redundant_valid = self.logical_tautology and self.logical_contradition
if len(self.shapes) > 1 and (redundant_valid or not predication.redundant(predicate='shape')):
attributes.append('shape')
if len(self.colors) > 1 and (redundant_valid or not predication.redundant(predicate='color')):
attributes.append('color')
if len(self.textures) > 1 and (redundant_valid or not predication.redundant(predicate='texture')):
attributes.append('texture')
if len(attributes) == 0:
return False
self.attribute = choice(attributes)
if self.existing_attribute_rate == 0.0:
self.existing_attribute = False
elif self.existing_attribute_rate == 1.0:
self.existing_attribute = True
else:
self.existing_attribute = random() < self.existing_attribute_rate
predication.apply(predicate=self.attribute)
predication.block(predicate=self.attribute)
return True
def incorrect_possible(self):
return True
def model(self):
model = super(RegularAttributeCaptioner, self).model()
model.update(
attribute=self.attribute,
existing_attribute=self.existing_attribute
)
return model
def caption(self, predication, world):
if predication.num_agreeing == 0:
return None
values = list()
for entity in predication.agreeing:
if self.attribute == 'shape':
values.append(entity.shape.name)
elif self.attribute == 'color':
values.append(entity.color.name)
elif self.attribute == 'texture':
values.append(entity.texture.name)
value = choice(values)
if self.attribute == 'shape':
attribute = Attribute(predtype='shape', value=value)
elif self.attribute == 'color':
attribute = Attribute(predtype='color', value=value)
elif self.attribute == 'texture':
attribute = Attribute(predtype='texture', value=value)
if predication.contradictory(predicate=attribute):
assert False
elif not self.pragmatical_redundancy and predication.num_entities > 1 and predication.redundant(predicate=attribute):
assert False
return None
attribute.apply_to_predication(predication=predication)
return attribute
def incorrect(self, caption, predication, world):
if self.attribute == 'shape': # random (existing) shape
if self.existing_attribute:
values = list(set(entity.shape.name for entity in world.entities if entity.shape.name in self.shapes and entity.shape.name != caption.value))
if not self.existing_attribute or len(values) == 0:
values = self.shapes
elif self.attribute == 'color': # random (existing) color
if self.existing_attribute:
values = list(set(entity.color.name for entity in world.entities if entity.color.name in self.colors and entity.color.name != caption.value))
if not self.existing_attribute or len(values) == 0:
values = self.colors
elif self.attribute == 'texture': # random (existing) texture
if self.existing_attribute:
values = list(set(entity.texture.name for entity in world.entities if entity.texture.name in self.textures and entity.texture.name != caption.value))
if not self.existing_attribute or len(values) == 0:
values = self.textures
caption.value = choice(values)
caption.apply_to_predication(predication=predication)
return True
|
StarcoderdataPython
|
11291945
|
'''
Lib de pilotage des gbf BK serie 4050 (4052,4053,4054,4055)
'''
import time
import sys
import telnetlib
import string
class telnet_das220_240(object):
'''
classdocs
'''
# Timeout on frame receive
TIMEOUT = 1
DEST = '192.168.0.115'
PORT = 23
tn = 0
PR_OUT_CMD = 0
PR_OUT = 1
def __init__(self,ip_addr):
'''
Constructor
'''
self.DEST = ip_addr
self.tn = telnetlib.Telnet(self.DEST, self.PORT)
print("Connected with : ")
self.get_id()
def close(self):
self.tn.close()
##################### Low level functions #####################
def print_dbg(self,text):
if self.PR_OUT_CMD == 1:
print(text)
def print(self,text):
if self.PR_OUT == 1:
print(text)
# Remove end of line chars to print
def extractCmd(self,cmd):
cmd = cmd.replace("\n", "")
cmd = cmd.replace("\r", "")
return cmd
# Send a frame and wait for response
def sendQuery(self,cmd):
cmd = cmd + "\n"
self.print_dbg(">> Send : " + self.extractCmd(cmd))
self.tn.write(cmd.encode('ascii'))
res = self.tn.read_until(b'\n', self.TIMEOUT).decode('ascii','ignore')
if len(res) == 0:
print("Send frame Timeout")
time.sleep(10)
else:
self.print_dbg("<< Rcv : " + res)
return res
# Send a frame and not wait for response
def sendFrame(self,cmd):
cmd = cmd + "\n"
self.print_dbg(">> Send : " + self.extractCmd(cmd))
self.tn.write(cmd.encode('ascii'))
##################### High level functions #####################
def get_id(self):
print(self.sendQuery("*IDN ?;"))
#Chan_number shall be A1/A2...K1/FA1...
def set_chan_valid(self,chan_number) :
self.sendFrame("VALID "+ chan_number + ",ON;")
def all_chan_off(self,) :
self.sendFrame("VALID ALL,OFF;")
'''
This function activate automaticaly the channel and set is
type of mesure and is
channel is channel number A1/A2.....,
mes shall one of : VOLtage, SHUNT, PT100,PT1000,THErmo
RESistance, FREQ, PWM, COUNTer
'''
def set_chanmes(self,channel,mes,cal,zero,pos):
self.sendFrame("CHAN "+ channel + ";")
self.sendFrame("TYPE:" + mes + ";")
self.sendFrame("RANGE " + str(cal) + "," + str(zero) +","+ str(pos) + ";")
self.sendFrame("VALID "+ channel + ",ON;")
#lecture de la valeur d'une voie
def get_chanmes(self,channel):
#self.sendFrame("CHAN "+ channel + ";")
res = self.sendQuery("CHAN "+ channel + ";"+"CHAN ?;")
self.print(res)
res = res.split(";")
num=""
mul = 1
for i in res[1]:
if i == "°":
#temperature measure break the loop
break
elif i.isdecimal():
num = num + i
elif i == "." :
num = num + "."
#If the multiplier is found end of the parsing
elif (i == "m" and mul != 0):
mul = 0.001
break
elif ((i == "K" or i == 'k')and mul != 0):
mul = 1000
break
elif (i == "M" and mul != 0):
mul = 1000000
break
return (float(num)*mul)
|
StarcoderdataPython
|
3314074
|
<reponame>yamamon75/PmagPy<filename>programs/forc_diagram.py
#!/usr/bin/env python
# --*-- coding:utf-8 --*--
'''
#=================================================
/this is for processing and plotting forc diagrams,
/including the conventional and irregualar forc.
/author: Jiabo
/GFZ potsdam
#=================================================
'''
import sys
import os
import numpy as np
import itertools
import matplotlib
matplotlib.use('TKAgg')
from matplotlib import pyplot as plt
import pandas as pd
from scipy.interpolate import griddata
import time
from pmagpy import pmagplotlib
from pmagpy import pmag
class Forc(object):
def __init__(self, irData=None, fileAdres=None, SF=None):
'''
#=================================================
/process the raw data
/do the fit
#=================================================
'''
self.rawData = dataLoad(fileAdres)
# self.matrix_z,self.x_range,self.y_range=dataLoad(fileAdres).initial()
if irData != None:
self.rawData = irData # dataLoad(fileAdres)
else:
self.rawData = dataLoad(fileAdres)
self.fit(SF=SF,
x_range=self.rawData.x_range,
y_range=self.rawData.y_range,
matrix_z=self.rawData.matrix_z)
def fit(self, SF, x_range, y_range, matrix_z):
'''
#=================================================
/the main fitting process
/xx,yy,zz = Hb,Ha,p
/p is the FORC distribution
/m0,n0 is the index of values on Ha = Hb
/then loop m0 and n0
/based on smooth factor(SF)
/select data grid from the matrix_z for curve fitting
#=================================================
'''
xx, yy, zz = [], [], []
m0, n0 = [], []
for m, n in itertools.product(np.arange(0, len(x_range), step=SF), np.arange(0, len(y_range), step=SF)):
if x_range[m] > y_range[n]: # Ha nearly equal Hb
m0.append(m)
n0.append(n)
aa, bb, cc = [], [], []
for m, n in zip(m0, n0):
s = 0
try:
grid_data = []
a_ = x_range[m+s]
b_ = y_range[n-s]
for i, j in itertools.product(np.arange(3*SF+1), np.arange(3*SF+1)):
try:
grid_data.append(
[x_range[m+s+i], y_range[n-s-j], matrix_z.item(n-s-j, m+s+i)])
except:
try:
for i, j in itertools.product(np.arange(3), np.arange(3)):
grid_data.append(
[x_range[m+i], y_range[n-j], matrix_z.item(n-j, m+i)])
except:
pass
# print(grid_data)
'''
#=================================================
/when SF = n
/data grid as (2*n+1)x(2*n+1)
/grid_list: convert grid to list
/every grid produce on FORC distritution p
/the poly fitting use d2_func
#=================================================
'''
x, y, z = grid_list(grid_data)
try:
p = d2_func(x, y, z)
# print(p)
xx.append((a_-b_)/2)
yy.append((a_+b_)/2)
zz.append(p)
except Exception as e:
# print(e)
pass
except:
pass
'''
#=================================================
/the data will be save as pandas dataframe
/all the data with nan values will be delete be dropna()
#=================================================
'''
# print(zz)
df = pd.DataFrame({'x': xx, 'y': yy, 'z': zz})
#df = df.replace(0,np.nan)
df = df.dropna()
'''
#=================================================
/due to the space near Bc = zero
/the Bi values when Bc <0.003 will be mirrored to -Bc
#=================================================
'''
df_negative = df[(df.x < 0.03)].copy()
df_negative.x = df_negative.x*-1
df = df.append(df_negative)
df = df.drop_duplicates(['x', 'y'])
df = df.sort_values('x')
# plt.scatter(df.x,df.y,c=df.z)
# plt.show()
'''
#=================================================
/reset the Bc and Bi range by X,Y
/use linear interpolate to obtain FORC distribution
#=================================================
'''
xrange = [0, int((np.max(df.x)+0.05)*10)/10]
yrange = [int((np.min(df.y)-0.05)*10)/10,
int((np.max(df.y)+0.05)*10)/10]
X = np.linspace(xrange[0], xrange[1], 200)
Y = np.linspace(yrange[0], yrange[1], 200)
self.yi, self.xi = np.mgrid[yrange[0]:yrange[1]:200j, xrange[0]:xrange[1]:200j]
#self.xi,self.yi = np.mgrid[0:0.2:400j,-0.15:0.15:400j]
z = df.z/np.max(df.z)
z = np.asarray(z.tolist())
self.zi = griddata((df.x, df.y), z, (self.xi, self.yi), method='cubic')
def plot(self, save=False, fmt="svg"):
fig = plt.figure(figsize=(6, 5), facecolor='white')
fig.subplots_adjust(left=0.18, right=0.97,
bottom=0.18, top=0.9, wspace=0.5, hspace=0.5)
#ax = fig.add_subplot(1,1,1)
plt.contour(self.xi*1000, self.yi*1000, self.zi, 9,
colors='k', linewidths=0.5) # mt to T
# plt.pcolormesh(X,Y,Z_a,cmap=plt.get_cmap('rainbow'))#vmin=np.min(rho)-0.2)
plt.pcolormesh(self.xi*1000, self.yi*1000, self.zi,
cmap=plt.get_cmap('rainbow')) # vmin=np.min(rho)-0.2)
plt.colorbar()
# plt.xlim(0,0.15)
# plt.ylim(-0.1,0.1)
plt.xlabel('B$_{c}$ (mT)', fontsize=12)
plt.ylabel('B$_{i}$ (mT)', fontsize=12)
from pmagpy import pmagplotlib
if save:
pmagplotlib.save_plots({'forc': 1}, {'forc': 'forc.{}'.format(fmt)})
return
else:
pmagplotlib.draw_figs({'forc': 1})
res = pmagplotlib.save_or_quit()
if res == 'a':
pmagplotlib.save_plots({'forc': 1}, {'forc': 'forc.{}'.format(fmt)})
class dataLoad(object):
'''
#=================================================
/process the measured forc data.
/convert the raw data into matrix
/with x range and y range
/empty postion replaced with np.nan
#=================================================
'''
def __init__(self, fileAdres=None):
self.rawData(fileAdres)
def rawData(self, fileAdres=None):
# skip skiprows
skiprows = None
skip_from = [b'Field', b'Moment']
with open(fileAdres, 'rb') as fr:
#f = fr.read()
for i, line in enumerate(fr, 1):
# print(line.split())
if skip_from == line.split():
skiprows = i+2
break
# else:
# print('file format wrong, cannot find the data row.')
skiprows = 34 if skiprows == None else skiprows
df = pd.read_csv(fileAdres, skiprows=skiprows, sep='\s+',
delimiter=',', names=['H', 'M'], skipfooter=1,
engine='python')
H = df.H # measured field
M = df.M # measured magnetic moment
'''
#=================================================
/datainterval_H/_M
/slice the measured data into pieces
/for every measured FORC
#=================================================
'''
dataInterval_H = []
dataInterval_M = []
# print(H)
cretia = df.H.mean() # edge of linear programing for selecting data
H0 = df.H.max() # the maximum field
self.x, self.y, self.z = [[], [], []]
for i in np.arange(1, len(H)):
dataInterval_H.append(H[i])
dataInterval_M.append(M[i])
if abs(H[i]-H0) <= 0.001: # when the filed reach the max, a new forc
if len(dataInterval_H) >= 0 and len(dataInterval_H) <= 200:
# print(dataInterval_H)
Ha = dataInterval_H[0]
dataInterval_H.pop(-1)
dataInterval_M.pop(-1)
Hb = dataInterval_H[1:-1]
Hm = dataInterval_M[1:-1]
for t in np.arange(len(Hb)):
self.x.append(Hb[t])
self.y.append(Ha)
self.z.append(Hm[t])
# print(Ha)
dataInterval_H = []
dataInterval_M = []
self.rawdf = df
'''
#=================================================
transfer the data set to matrix as len(x)*len(y) with z value
/mesh up the rawdata
/select the data area by X,Y ranges
/obtain regular spaced data potins by np.linspace
/use interplote to caculate the Hm values
/loop Ha(Y),Hb(X)
/fill every position with Hm, else with np.nan
#=================================================
'''
self.z = self.z/np.max(self.z)
# print(int(np.min(self.x)*100)/100,np.max(self.x))
xrange = [int((np.min(self.x)-0.1)*10)/10,
int((np.max(self.x)+0.1)*10)/10]
yrange = [int((np.min(self.y)-0.1)*10)/10,
int((np.max(self.y)+0.1)*10)/10]
X = np.linspace(xrange[0], xrange[1], 200)
Y = np.linspace(yrange[0], yrange[1], 200)
yi, xi = np.mgrid[yrange[0]:yrange[1]:200j, xrange[0]:xrange[1]:200j]
#X = np.linspace(-0.2,0.3,200)
#Y = np.linspace(-0.2,0.3,200)
#xi,yi = np.mgrid[-0.2:0.3:200j,-0.2:0.3:200j]
zi = griddata((self.x, self.y), self.z, (xi, yi),
method='linear') # !!! must linear
self.matrix_z = zi
self.x_range = X
self.y_range = Y
def d2_func(x, y, z):
'''
#=================================================
/poly fit for every SF grid data
#=================================================
'''
X, Y = np.meshgrid(x, y, copy=False)
X = X.flatten()
Y = Y.flatten()
A = np.array([np.ones(len(X)), X, X**2, Y, Y**2, X*Y]).T
Z = np.array(z)
B = Z.flatten()
# print(A.shape,B.shape)
coeff, r, rank, s = np.linalg.lstsq(A, B, rcond=None)
return -coeff[5]
def grid_list(data):
'''
#=================================================
/process the grid data
/convert to list data for poly fitting
#=================================================
'''
a = []
b = []
M = []
for i in data:
a.append(i[0]) # np.array([i[1] for i in data], dtype=np.float64)
b.append(i[1]) # np.array([i[0] for i in data], dtype=np.float64)
M.append(i[2]) # np.array([i[2] for i in data], dtype=np.float64)
a = np.array(a, dtype=np.float64).tolist()
b = np.array(b, dtype=np.float64).tolist()
M = np.array(M, dtype=np.float64).tolist()
a = list(set(a))
b = list(set(b))
return a, b, M
def param_argvs(inputs=None):
docm = '''
NAME
forc_diagram.py
DESCRIPTION
This is for FORC diagrams, including conventional and irregualar FORCs.
OPTIONS
-h prints help message and quits
-f input file name
-sf smooth factor
-fmt [svg,png,pdf,eps,jpg] specify format for image, default is svg
-sav save figure and quit
INPUT FILE:
the measured FORC data file must contain the line " Field Moment "
before the measured data.
SYNTAX
forc_diagram.py -f path_to_file/file.txt [command line options]
'''
fileAdres, SF = None, None
if '-h' in inputs:
print(docm)
sys.exit(0)
save = False
if '-sav' in inputs:
save = True
fmt = pmag.get_named_arg('-fmt', "svg")
fileAdres = pmag.get_named_arg('-f', reqd=True)
if not os.path.isfile(fileAdres):
print('-f file not exist')
return
SF = pmag.get_named_arg('-sf', reqd=True)
try:
SF = int(inputs[4])
except:
print('-sf has to be int')
return
return fileAdres, SF, save, fmt
def main():
#start_time = time.time()
fileAdres, SF, save, fmt = param_argvs(inputs=sys.argv)
if fileAdres != None:
try:
Forc(fileAdres=fileAdres, SF=SF).plot(save, fmt)
pass
except Exception as e:
print(e)
pass
else:
print('!please include filename and smooth_factor, e.g.:\nforc_diagram.py -f /data_path/forc_file_name.text -sf 5')
#end_time = time.time()
#print(end_time - start_time)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1753232
|
<filename>burog_auth/urls.py<gh_stars>0
from django.urls import path
from django.contrib.auth import views as auth_views
from .views import FormWizardView
from .forms import UserAuthForm
urlpatterns = [
path('register/', FormWizardView.as_view(), name='register'),
path(
'login/',
auth_views.LoginView.as_view(
form_class=UserAuthForm, template_name='burog_auth/login.html'
),
name='login',
),
path('logout/', auth_views.LogoutView.as_view(), name='logout'),
]
|
StarcoderdataPython
|
26468
|
from __future__ import division
import numpy as np
from sklearn.utils import shuffle
from sklearn.metrics import *
"""
Module with different fitness functions implemented to be used by the CRO algorithm.
The functions' only argument must be an individual (coral) and return its fitness, a number.
The fitness might require other arguments, in that case the partial function in python's functools module is a very good option
"""
def max_ones(coral):
"""
Description: Returns the percentage of 1's in the coral. This function assumes 'coral' is a list,
it could be further improved if it was a numpy array
Input:
- coral
Output:
- fitness
"""
return 100*(sum(coral) / len(coral))
def feature_selection(coral, X, y, model,
get_prediction = lambda model, X: model.predict(X),
metric=roc_auc_score, random_seed=None):
"""
Description: Returns the fitness (given by metric) of the selected features given by coral,
when using Xt and yt for training the model clf
Input:
- coral : an individual
- X: Data input
- y: Data output
- model: instance of the model to be trained
- get_prediction: function that accepts the model and X and outputs the vector
that will be used in the metric (predictions, scores...)
- metric: metric that will be used as fitness
Output:
- fitness
"""
# offset % of data for training, the rest for testing
offset = int(X.shape[0] * 0.9)
Xs, ys = shuffle(X, y, random_state=random_seed)
Xs = np.multiply(Xs, coral)
X_train, y_train = Xs[:offset], ys[:offset]
X_test, y_test = Xs[offset:], ys[offset:]
# train model
model.fit(X_train, y_train)
# Compute metric
y_pred = get_prediction(model, X_test)
fitness = metric(y_test, y_pred)
return fitness
|
StarcoderdataPython
|
1891617
|
"""@package plot_run_stats
Function to compare objective function and norm grad L between analyses.
"""
#import matplotlib.pyplot as plt
from .mpl_import import *
import pandas as pd
import numpy as np
def multi_plot_obj_grad_lag(fun_log_files, plot_names=None, file_names=None, legend_both=False, log_both=False):
""" Plots the value of the objective function and norm of the grad Lagrangian.
:param list fun_log_files: [str] (m, ) Paths to the optimization log files.
:param list plot_names: (m, ) Labels for the plot legends, if None then no legend is added to the figures.
:param list file_names: (2, ) Full file path to save the figures, if None then doesn't save and just displays.
:param bool legend_both: If False only puts a legend on the objective function plot, True legends both plots.
:param bool log_both: If False uses log scale for the gradient plot, True uses log scale for both plots.
:return list: (2, ) Figure handles for the objective function, norm grad Lagrangian plots.
- The log files should be comma delimited and must contain the columns: iteration, function, norm_grad_Lagr
- Only up to 4 log files are supported at the moment (i.e., m <= 4)
- Your local plt.rcParams will govern much of the overall appearance of the plots
- The file names are for the objective function, and the norm grad Lagrangian plots, respectively
"""
# Line styles
ls = ['-', '--', '-.', ':']
color = '0.15'
# Set-up plot axes
obj_fun_fig, obj_fun_ax = plt.subplots()
obj_fun_ax.set_xlabel('Iteration')
obj_fun_ax.set_ylabel(r'$f(\mathbf{x})$')
if log_both:
obj_fun_ax.set_yscale('log')
norm_grad_fig, norm_grad_ax = plt.subplots()
norm_grad_ax.set_xlabel('Iteration')
norm_grad_ax.set_ylabel(r'$\Vert \nabla\!_x \,L \Vert$')
norm_grad_ax.set_yscale('log')
# Plot each of the files
for i, f in enumerate(fun_log_files):
data = pd.read_csv(f, skipinitialspace=True)
if plot_names is None:
obj_fun_ax.plot(data['iteration'], data['function'], color, ls=ls[i])
norm_grad_ax.plot(data['iteration'], data['norm_grad_Lagr'], color, ls=ls[i])
else:
obj_fun_ax.plot(data['iteration'], data['function'], color, ls=ls[i], label=plot_names[i])
norm_grad_ax.plot(data['iteration'], data['norm_grad_Lagr'], color, ls=ls[i], label=plot_names[i])
# Finalize the plots
if plot_names is not None:
obj_fun_ax.legend(loc='upper right')
if legend_both:
norm_grad_ax.legend(loc='upper right')
obj_fun_fig.tight_layout()
norm_grad_fig.tight_layout()
# Show or save the figures
if file_names is None:
plt.show()
else:
obj_fun_fig.savefig(file_names[0])
norm_grad_fig.savefig(file_names[1])
return [obj_fun_fig, norm_grad_fig]
def multi_plot_x_values(x_log_files, plot_names=None, file_name=None, model_type='VC'):
""" Plots the values of the optimization variables.
:param list x_log_files: [str] (m, ) Paths to the optimization log files for the variable values.
:param list plot_names: (m, ) Labels for the plot legends, if None then no legend is added to the figures.
:param str file_name: Full file path to save the figure, if None then doesn't save and just displays.
:param str model_type: 'VC' for Voce-Chaboche model, 'UVC' for the updated Voce-Chaboche model.
:return plt.figure: Figure handle for plot.
- The log files should be space delimited and follow the convention of parameters for the VC/UVC models
- Only up to 4 log files are supported at the moment (i.e., m <= 4)
- Your local plt.rcParams will govern much of the overall appearance of the plots
"""
# Load a single value to get the number of backstresses
x_test = np.loadtxt(x_log_files[0])[-1]
if model_type == 'VC':
n_back = (len(x_test) - 4) // 2
basic_x_rows = 2
# Parameter Name List
y_names = [r'$E$ [MPa]', r'$\sigma_{y,0}$ [MPa]', r'$Q_{\infty}$ [MPa]', r'$b$']
else:
n_back = (len(x_test) - 6) // 2
basic_x_rows = 3
# Parameter Name List
y_names = [r'$E$ [MPa]', r'$\sigma_{y,0}$ [MPa]', r'$Q_{\infty}$ [MPa]', r'$b$', r'$D_{\infty}$ [MPa]', r'$a$']
for i in range(n_back):
y_names.append(r'$C_{0}$ [MPa]'.format(i + 1))
y_names.append(r'$\gamma_{0}$'.format(i + 1))
# Line styles
ls = ['-', '--', '-.', ':']
color = '0.15'
# Set-up plot axes
fig_h = 1.5 * (basic_x_rows + n_back)
fig = plt.figure(figsize=(6., fig_h))
axes = []
for i, _ in enumerate(y_names):
axes.append(plt.subplot(basic_x_rows + n_back, 2, i + 1))
# Plot each of the files
for op_j, f in enumerate(x_log_files):
param_vals = np.loadtxt(f)
iterations = np.arange(1, len(param_vals) + 1)
for i, ax in enumerate(axes):
if plot_names is None:
ax.plot(iterations, param_vals[:, i], color, ls=ls[i])
else:
ax.plot(iterations, param_vals[:, i], color, ls=ls[op_j], label=plot_names[op_j])
ax.set_ylabel(y_names[i])
# Finalize the plots
if plot_names is not None:
axes[0].legend(loc='best')
axes[-2].set_xlabel('Iteration')
axes[-1].set_xlabel('Iteration')
fig.tight_layout()
# Show or save the figures
if file_name is None:
plt.show()
else:
fig.savefig(file_name)
return fig
|
StarcoderdataPython
|
3269034
|
from flask import Flask
from flask_bootstrap import Bootstrap
from config import Config
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_moment import Moment
import os
app = Flask(__name__)
app.config.from_object(Config)
bootstrap = Bootstrap(app)
db = SQLAlchemy(app)
login = LoginManager(app)
login.login_view = 'login'
login.login_message_category = 'info'
moment = Moment(app)
uploads_folder = os.path.join(app.root_path, 'static', 'uploads')
if not os.path.exists(uploads_folder):
os.mkdir(os.path.join(app.root_path, 'static', 'uploads'))
print('upload folder created')
from app import views_main, views_admin, models, errors
|
StarcoderdataPython
|
6410627
|
<reponame>chenxuanshu/sayhello
'''
@Author: <NAME>
@Date: 2020-05-23 09:05:32
@LastEditTime: 2020-05-23 09:05:33
@LastEditors: Please set LastEditors
@Description: In User Settings Edit
@FilePath: \sayhello\errors.py
'''
|
StarcoderdataPython
|
5044733
|
<gh_stars>1-10
# Copyright 2011 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from util.typedispatch import *
from language.python import ast
import collections
class DFS(object):
def __init__(self, pre):
self.pre = pre
self.visited = set()
def visit(self, node, force=False):
if isinstance(node, ast.Code):
if node in self.visited:
return
else:
self.visited.add(node)
self.pre(node)
# Don't recurse on leaf nodes
if isinstance(node, ast.leafTypes): return
# Recurse
# HACK we must analyze the code inside MakeFunction
doForce = isinstance(node, ast.MakeFunction)
if force or doForce:
node.visitChildrenForcedArgs(self.visit, doForce)
else:
node.visitChildren(self.visit)
def process(self, node):
# Force the traversal of the entry point.
self.visit(node, force=True)
class DefUseVisitor(TypeDispatcher):
def __init__(self):
TypeDispatcher.__init__(self)
self.lcldef = collections.defaultdict(list)
self.lcluse = collections.defaultdict(list)
self.globaldef = collections.defaultdict(list)
self.globaluse = collections.defaultdict(list)
self.celldef = collections.defaultdict(list)
self.celluse = collections.defaultdict(list)
def define(self, location, lcl):
if isinstance(lcl, ast.Local):
self.lcldef[lcl].append(location)
elif isinstance(lcl, ast.DoNotCare):
pass
elif isinstance(lcl, ast.Cell):
self.celldef[lcl].append(location)
else:
assert False, (location, repr(lcl))
def use(self, location, lcl):
if isinstance(lcl, ast.Local):
self.lcluse[lcl].append(location)
elif isinstance(lcl, ast.DoNotCare):
pass
elif isinstance(lcl, ast.Cell):
self.celluse[lcl].append(location)
elif lcl==None:
pass
else:
pass #assert False, (location, lcl)
def defineGlobal(self, location, gname):
assert isinstance(gname, ast.Existing)
name = gname.constantValue()
assert isinstance(name, str)
self.globaldef[name].append(location)
def useGlobal(self, location, gname):
assert isinstance(gname, ast.Existing)
name = gname.constantValue()
assert isinstance(name, str)
self.globaluse[name].append(location)
@dispatch(ast.Delete)
def visitDelete(self, node):
self.use(node, node.lcl)
@dispatch(ast.GetGlobal)
def visitGetGlobal(self, node):
# TODO use internal self? Difficult, as defuse may analyize multiple code in a single pass
self.useGlobal(node, node.name)
@dispatch(ast.SetGlobal)
def visitSetGlobal(self, node):
# TODO use internal self? Difficult, as defuse may analyize multiple code in a single pass
self.defineGlobal(node, node.name)
self.use(node, node.value)
@dispatch(ast.DeleteGlobal)
def visitDeleteGlobal(self, node):
# TODO use internal self?
self.defineGlobal(node, node.name)
@dispatch(ast.CodeParameters)
def visitCodeParameters(self, node):
if node.selfparam:
self.define(node, node.selfparam)
for param in node.params:
self.define(node, param)
if node.vparam:
self.define(node, node.vparam)
if node.kparam:
self.define(node, node.kparam)
@dispatch(ast.UnaryPrefixOp, ast.Not, ast.Yield, ast.GetIter, ast.ConvertToBool)
def visitUnary(self, node):
self.use(node, node.expr)
@dispatch(ast.BinaryOp, ast.Is)
def visitBinaryOp(self, node):
self.use(node, node.left)
self.use(node, node.right)
@dispatch(ast.Assert)
def visitAssert(self, node):
self.use(node, node.test)
self.use(node, node.message)
def handleArgs(self, node):
for arg in node.args:
self.use(node, arg)
for name, arg in node.kwds:
self.use(node, arg)
if node.vargs:
self.use(node, node.vargs)
if node.kargs:
self.use(node, node.kargs)
@dispatch(ast.Call)
def visitCall(self, node):
self.use(node, node.expr)
self.handleArgs(node)
@dispatch(ast.DirectCall)
def visitDirectCall(self, node):
if node.selfarg:
self.use(node, node.selfarg)
self.handleArgs(node)
@dispatch(ast.MethodCall)
def visitMethodCall(self, node):
self.use(node, node.expr)
self.use(node, node.name)
self.handleArgs(node)
@dispatch(ast.BuildTuple, ast.BuildList)
def visitBuildContainer(self, node):
for arg in node.args:
self.use(node, arg)
@dispatch(ast.GetCell)
def visitGetCell(self, node):
self.use(node, node.cell)
@dispatch(ast.GetCellDeref)
def visitGetCellDeref(self, node):
self.use(node, node.cell)
@dispatch(ast.SetCellDeref)
def visitSetCellDeref(self, node):
self.define(node, node.cell)
self.use(node, node.value)
@dispatch(ast.SetAttr)
def visitSetAttr(self, node):
self.use(node, node.value)
self.use(node, node.expr)
self.use(node, node.name)
@dispatch(ast.GetAttr)
def visitGetAttr(self, node):
self.use(node, node.expr)
self.use(node, node.name)
@dispatch(ast.DeleteAttr)
def visitDeleteAttr(self, node):
self.use(node, node.expr)
self.use(node, node.name)
@dispatch(ast.BuildSlice)
def visitBuildSlice(self, node):
if node.start: self.use(node, node.start)
if node.stop: self.use(node, node.stop)
if node.step: self.use(node, node.step)
@dispatch(ast.GetSlice)
def visitGetSlice(self, node):
self.use(node, node.expr)
if node.start: self.use(node, node.start)
if node.stop: self.use(node, node.stop)
if node.step: self.use(node, node.step)
@dispatch(ast.SetSlice)
def visitSetSlice(self, node):
self.use(node, node.value)
self.use(node, node.expr)
if node.start: self.use(node, node.start)
if node.stop: self.use(node, node.stop)
if node.step: self.use(node, node.step)
@dispatch(ast.DeleteSlice)
def visitDeleteSlice(self, node):
self.use(node, node.expr)
if node.start: self.use(node, node.start)
if node.stop: self.use(node, node.stop)
if node.step: self.use(node, node.step)
@dispatch(ast.Switch)
def visitSwitch(self, node):
assert len(self.lcluse[node.condition]) == 0
@dispatch(ast.Condition)
def visitCondition(self, node):
self.use(node, node.conditional)
@dispatch(ast.TypeSwitchCase)
def visitTypeSwitchCase(self, node):
self.define(node, node.expr)
@dispatch(ast.TypeSwitch)
def visitTypeSwitch(self, node):
self.use(node, node.conditional)
@dispatch(ast.ExceptionHandler)
def visitExceptionHandler(self, node):
if node.type: self.use(node, node.type)
if node.value: self.define(node, node.value)
@dispatch(ast.Break, ast.Continue, ast.For, ast.While, ast.TryExceptFinally,
ast.ShortCircutAnd, ast.ShortCircutOr,
ast.Local, ast.DoNotCare, ast.Existing, ast.Cell, ast.Import, ast.Suite,
ast.Code, ast.BuildMap,
ast.leafTypes, list, tuple)
def visitLeaf(self, node):
pass
@dispatch(ast.GetSubscript)
def visitGetSubscript(self, node):
self.use(node, node.expr)
self.use(node, node.subscript)
@dispatch(ast.SetSubscript)
def visitSetSubscript(self, node):
self.use(node, node.value)
self.use(node, node.expr)
self.use(node, node.subscript)
@dispatch(ast.DeleteSubscript)
def visitDeleteSubscript(self, node):
self.use(node, node.expr)
self.use(node, node.subscript)
@dispatch(ast.Return)
def visitReturn(self, node):
for expr in node.exprs:
if not isinstance(expr, ast.Existing):
self.use(node, expr)
@dispatch(ast.InputBlock)
def visitInputBlock(self, node):
for input in node.inputs:
self.define(node, input.lcl)
@dispatch(ast.OutputBlock)
def visitOutputBlock(self, node):
for output in node.outputs:
if not isinstance(output.expr, ast.Existing):
self.use(node, output.expr)
@dispatch(ast.Input, ast.Output, ast.IOName)
def visitOutput(self, node):
pass
@dispatch(ast.Raise)
def visitRaise(self, node):
if node.exception: self.use(node, node.exception)
if node.parameter: self.use(node, node.parameter)
if node.traceback: self.use(node, node.traceback)
@dispatch(ast.Assign)
def visitAssign(self, node):
for lcl in node.lcls:
self.define(node, lcl)
if isinstance(node.expr, ast.Local):
self.use(node, node.expr)
@dispatch(ast.UnpackSequence)
def visitUnpackSequence(self, node):
for lcl in node.targets:
self.define(node, lcl)
self.use(node, node.expr)
@dispatch(ast.Discard)
def visitDiscard(self, node):
if isinstance(node.expr, ast.Local):
# Sure, this node will be killed later, but until then it counts as a use.
self.use(node, node.expr)
@dispatch(ast.Print)
def visitPrint(self, node):
if node.target: self.use(node, node.target)
if node.expr: self.use(node, node.expr)
@dispatch(ast.MakeFunction)
def visitMakeFunction(self, node):
#self.use(node, node.code)
for default in node.defaults:
self.use(node, default)
for cell in node.cells:
self.use(node, cell)
@dispatch(ast.Allocate)
def visitAllocate(self, node):
self.use(node, node.expr)
@dispatch(ast.Load)
def visitLoad(self, node):
self.use(node, node.expr)
self.use(node, node.name)
@dispatch(ast.Store)
def visitStore(self, node):
self.use(node, node.expr)
self.use(node, node.name)
self.use(node, node.value)
@dispatch(ast.Check)
def visitCheck(self, node):
self.use(node, node.expr)
self.use(node, node.name)
def evaluateCode(compiler, code):
duv = DefUseVisitor()
visitor = DFS(duv)
visitor.process(code)
return (duv.lcldef, duv.lcluse), (duv.globaldef, duv.globaluse)
|
StarcoderdataPython
|
1740493
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Decode the trained multi-task CTC outputs (TIMIT corpus)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os.path import join, abspath
import sys
import tensorflow as tf
import yaml
import argparse
sys.path.append(abspath('../../../'))
from experiments.timit.data.load_dataset_multitask_ctc import Dataset
from models.ctc.multitask_ctc import MultitaskCTC
from utils.io.labels.character import Idx2char
from utils.io.labels.phone import Idx2phone
from utils.io.labels.sparsetensor import sparsetensor2list
from utils.evaluation.edit_distance import wer_align
parser = argparse.ArgumentParser()
parser.add_argument('--epoch', type=int, default=-1,
help='the epoch to restore')
parser.add_argument('--model_path', type=str,
help='path to the model to evaluate')
parser.add_argument('--beam_width', type=int, default=20,
help='beam_width (int, optional): beam width for beam search.' +
' 1 disables beam search, which mean greedy decoding.')
parser.add_argument('--eval_batch_size', type=str, default=1,
help='the size of mini-batch in evaluation')
def do_decode(model, params, epoch, beam_width, eval_batch_size):
"""Decode the Multi-task CTC outputs.
Args:
model: the model to restore
params (dict): A dictionary of parameters
epoch (int): the epoch to restore
beam_width (int): beam width for beam search.
1 disables beam search, which mean greedy decoding.
eval_batch_size (int): the size of mini-batch when evaluation
"""
# Load dataset
test_data = Dataset(
data_type='test', label_type_main=params['label_type_main'],
label_type_sub=params['label_type_sub'],
batch_size=eval_batch_size, splice=params['splice'],
num_stack=params['num_stack'], num_skip=params['num_skip'],
shuffle=False, progressbar=True)
# Define placeholders
model.create_placeholders()
# Add to the graph each operation (including model definition)
_, logits_main, logits_sub = model.compute_loss(
model.inputs_pl_list[0],
model.labels_pl_list[0],
model.labels_sub_pl_list[0],
model.inputs_seq_len_pl_list[0],
model.keep_prob_pl_list[0])
decode_op_main, decode_op_sub = model.decoder(
logits_main, logits_sub,
model.inputs_seq_len_pl_list[0],
beam_width=beam_width)
# Create a saver for writing training checkpoints
saver = tf.train.Saver()
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(model.save_path)
# If check point exists
if ckpt:
# Use last saved model
model_path = ckpt.model_checkpoint_path
if epoch != -1:
model_path = model_path.split('/')[:-1]
model_path = '/'.join(model_path) + '/model.ckpt-' + str(epoch)
saver.restore(sess, model_path)
print("Model restored: " + model_path)
else:
raise ValueError('There are not any checkpoints.')
# Visualize
decode(session=sess,
decode_op_main=decode_op_main,
decode_op_sub=decode_op_sub,
model=model,
dataset=test_data,
label_type_main=params['label_type_main'],
label_type_sub=params['label_type_sub'],
is_test=True,
save_path=None)
# save_path=model.save_path)
def decode(session, decode_op_main, decode_op_sub, model,
dataset, label_type_main, label_type_sub,
is_test=True, save_path=None):
"""Visualize label outputs of Multi-task CTC model.
Args:
session: session of training model
decode_op_main: operation for decoding in the main task
decode_op_sub: operation for decoding in the sub task
model: the model to evaluate
dataset: An instance of a `Dataset` class
label_type_main (string): character or character_capital_divide
label_type_sub (string): phone39 or phone48 or phone61
is_test (bool, optional):
save_path (string, optional): path to save decoding results
"""
idx2char = Idx2char(
map_file_path='../metrics/mapping_files/' + label_type_main + '.txt')
idx2phone = Idx2phone(
map_file_path='../metrics/mapping_files/' + label_type_sub + '.txt')
if save_path is not None:
sys.stdout = open(join(model.model_dir, 'decode.txt'), 'w')
for data, is_new_epoch in dataset:
# Create feed dictionary for next mini batch
inputs, labels_true_char, labels_true_phone, inputs_seq_len, input_names = data
feed_dict = {
model.inputs_pl_list[0]: inputs[0],
model.inputs_seq_len_pl_list[0]: inputs_seq_len[0],
model.keep_prob_pl_list[0]: 1.0
}
batch_size = inputs[0].shape[0]
labels_pred_char_st, labels_pred_phone_st = session.run(
[decode_op_main, decode_op_sub], feed_dict=feed_dict)
try:
labels_pred_char = sparsetensor2list(
labels_pred_char_st, batch_size=batch_size)
except:
# no output
labels_pred_char = ['']
try:
labels_pred_phone = sparsetensor2list(
labels_pred_char_st, batch_size=batch_size)
except:
# no output
labels_pred_phone = ['']
for i_batch in range(batch_size):
print('----- wav: %s -----' % input_names[0][i_batch])
if is_test:
str_true_char = labels_true_char[0][i_batch][0].replace(
'_', ' ')
str_true_phone = labels_true_phone[0][i_batch][0]
else:
str_true_char = idx2char(labels_true_char[0][i_batch])
str_true_phone = idx2phone(labels_true_phone[0][i_batch])
str_pred_char = idx2char(labels_pred_char[i_batch])
str_pred_phone = idx2phone(labels_pred_phone[i_batch])
print('Ref (char): %s' % str_true_char)
print('Hyp (char): %s' % str_pred_char)
print('Ref (phone): %s' % str_true_phone)
print('Hyp (phone): %s' % str_pred_phone)
if is_new_epoch:
break
def main():
args = parser.parse_args()
# Load config file
with open(join(args.model_path, 'config.yml'), "r") as f:
config = yaml.load(f)
params = config['param']
# Except for a blank label
if params['label_type_main'] == 'character':
params['num_classes_main'] = 28
elif params['label_type_main'] == 'character_capital_divide':
params['num_classes_main'] = 72
if params['label_type_sub'] == 'phone61':
params['num_classes_sub'] = 61
elif params['label_type_sub'] == 'phone48':
params['num_classes_sub'] = 48
elif params['label_type_sub'] == 'phone39':
params['num_classes_sub'] = 39
# Model setting
model = MultitaskCTC(
encoder_type=params['encoder_type'],
input_size=params['input_size'] * params['num_stack'],
num_units=params['num_units'],
num_layers_main=params['num_layers_main'],
num_layers_sub=params['num_layers_sub'],
num_classes_main=params['num_classes_main'],
num_classes_sub=params['num_classes_sub'],
main_task_weight=params['main_task_weight'],
lstm_impl=params['lstm_impl'],
use_peephole=params['use_peephole'],
parameter_init=params['weight_init'],
clip_grad_norm=params['clip_grad_norm'],
clip_activation=params['clip_activation'],
num_proj=params['num_proj'],
weight_decay=params['weight_decay'])
model.save_path = args.model_path
do_decode(model=model, params=params,
epoch=args.epoch, beam_width=args.beam_width,
eval_batch_size=args.eval_batch_size)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
8055388
|
<filename>test/xlog-py/lsn_gap.test.py<gh_stars>1-10
import os
import yaml
#
# gh-167: Replica can't find next xlog file if there is a gap in LSN
#
server.stop()
server.deploy()
# Create wal#1
server.admin("space = box.schema.space.create('test')")
server.admin("index = box.space.test:create_index('primary')")
server.admin("box.space.test:insert{1, 'first tuple'}")
server.admin("box.space.test:insert{2, 'second tuple'}")
lsn = int(yaml.load(server.admin("box.info.server.lsn", silent=True))[0])
path = os.path.join(server.vardir, server.name)
wal = os.path.join(path, str(lsn).zfill(20) + ".xlog")
server.stop()
server.start()
server.admin("box.space.test:insert{3, 'third tuple'}")
server.stop()
server.start()
server.admin("box.space.test:insert{4, 'fourth tuple'}")
server.stop()
# Remove xlog with {3, 'third tuple'}
os.unlink(wal)
server.start()
line="ignoring a gap in LSN"
print "check log line for '%s'" % line
print
if server.logfile_pos.seek_once(line) >= 0:
print "'%s' exists in server log" % line
print
# missing tuple from removed xlog
server.admin("box.space.test:select{}")
|
StarcoderdataPython
|
6695155
|
# -*- coding: utf-8 -*-
"""
Django settings for leprikonweb project.
"""
from django.utils.translation import ugettext_lazy as _
from cms_site.settings import *
ADMINS = (('<NAME>', '<EMAIL>'),)
MANAGERS = ADMINS
SERVER_EMAIL = 'Leprikón @ {} <<EMAIL>>'.format(os.uname()[1])
GANALYTICS_TRACKING_CODE = 'UA-78897621-1'
CONTAINER_TEMPLATES = {
'collapse': {
'label': _('collapse'),
},
}
|
StarcoderdataPython
|
1702721
|
#!/usr/bin/env python3
import matplotlib.pyplot as plt
from hdrh.histogram import HdrHistogram
import seaborn as sns
import pandas
from matplotlib import pyplot as plt
import os.path
from enum import Enum
import matplotlib as mpl
from typing import *
import argparse
parser = argparse.ArgumentParser(description='Generate latency curve.')
parser.add_argument('runid', type=str)
parser.add_argument('--invocations', type=int, default=40)
args = parser.parse_args()
INVOCATIONS = args.invocations
RUNID = args.runid
assert os.path.isdir(
f'/root/bench/results/{RUNID}'), f'Incorrect runid: {RUNID}'
HFAC = 1319
HEAP = {
'lusearch': 70,
'cassandra': 347,
'h2': 1572,
'tomcat': 94,
}
DACAPO = 'dacapochopin-b00bfa9'
DATA = {
'G1': RUNID + '/{bench}.{hfac}.{heap}.jdk-lxr.g1.common.hs.latency.{dacapo}',
'Shen.': RUNID + '/{bench}.{hfac}.{heap}.jdk-lxr.shenandoah.common.hs.latency.{dacapo}',
'LXR': RUNID + '/{bench}.{hfac}.{heap}.jdk-lxr.ix.common.tph.trace2-5.srv-128.srvw.lfb-32.latency.{dacapo}',
'ZGC': RUNID + '/{bench}.{hfac}.{heap}.jdk-lxr.z.common.hs.latency.{dacapo}',
}
MAX_INVOCATIONS = max(INVOCATIONS, 40)
MIN_LATENCY_USEC = 1
MAX_LATENCY_USEC = 1000 * 1000 # 1 sec
LATENCY_SIGNIFICANT_DIGITS = 5
LABEL_FONT_SIZE = 60
LEGEND_FONT_SIZE = 60
TICK_FONT_SIZE = 50
SAVE_FILE = 'jpg'
# SAVE_FILE = 'pdf'
def load_data(invocation: int, folder: str):
path = os.path.realpath(os.path.expanduser(
'{}.{}/dacapo-latency-usec-metered.csv'.format(folder, invocation)))
if not os.path.isfile(path):
return None
df = pandas.read_csv(path, names=["start", "end"])
try:
df["latency"] = df["end"] - df["start"]
except Exception as e:
print(path)
raise e
return df
def load_data_and_plot(bench, data: Optional[Dict[str, Union[str, List[str]]]] = None, invocations=MAX_INVOCATIONS, save=SAVE_FILE, legend: Union[str, bool] = True, max_percentile='99.999'):
assert bench in HEAP
print(f'[{bench}] Loading...')
histograms = {}
# Clean up inputs
if data is None:
data = {k: v for k, v in DATA.items()}
for gc in data.keys():
if isinstance(data[gc], str):
data[gc] = [data[gc]]
data[gc] = [
f'/root/bench/results/{x}'.format(
runid=RUNID, bench=bench, hfac=HFAC, heap=HEAP[bench], dacapo=DACAPO)
for x in data[gc]
]
data: Dict[str, List[str]]
# Load data
for gc, logs in data.items():
histograms[gc] = []
for folder in logs:
for i in range(invocations):
loaded_data = load_data(i, folder)
if loaded_data is None:
continue
histogram = HdrHistogram(
MIN_LATENCY_USEC, MAX_LATENCY_USEC, LATENCY_SIGNIFICANT_DIGITS)
latencies = loaded_data["latency"]
for l in latencies:
histogram.record_value(l)
histograms[gc].append(histogram)
if len(histograms[gc]) == 0:
histogram = HdrHistogram(
MIN_LATENCY_USEC, MAX_LATENCY_USEC, LATENCY_SIGNIFICANT_DIGITS)
histogram.record_value(0)
histograms[gc].append(histogram)
# Process data
print(f'[{bench}] Processing...')
percentile_list = []
for gc, hists in histograms.items():
for j, histogram in enumerate(hists):
for i in histogram.get_percentile_iterator(5):
percentile_list.append({"GC": gc, "inv": j, "value": i.value_iterated_to,
"percentile": i.percentile_level_iterated_to / 100})
percentile_df = pandas.DataFrame(percentile_list)
percentile_df["other"] = 1 / (1 - percentile_df["percentile"])
# Plot curves
print(f'[{bench}] Plotting...')
fig, ax = plt.subplots(1, 1, figsize=(16, 12))
# fig.suptitle(f'{bench} {latency_type} latency')
sns.color_palette()
# colors = ['green', 'blue', 'orange', 'red'][:len(gcs)]
# print(f'{gcs} {colors}')
sns.lineplot(data=percentile_df, x="other", y="value", hue="GC")
# sns.lineplot(data=percentile_df, x="other", y="value", hue="GC")
ax.set_xscale('log')
ax.set_xlabel('Percentile', fontsize=LABEL_FONT_SIZE, labelpad=12)
ax.set_ylabel('Latency (msec)', fontsize=LABEL_FONT_SIZE, labelpad=12)
labels = ['0', '90', '99', '99.9', '99.99', '99.999', '99.9999']
ax.set_xticks([1, 10, 100, 1000, 10000, 100000, 1000000]
[:labels.index(max_percentile) + 1])
ax.set_xticklabels(labels[:labels.index(
max_percentile) + 1], fontsize=TICK_FONT_SIZE)
# ax.set_xticks([1, 10, 100, 1000, 10000, 100000])
# ax.set_xticklabels(['0', '90', '99', '99.9', '99.99', '99.999'], fontsize=TICK_FONT_SIZE)
plt.yticks(fontsize=TICK_FONT_SIZE)
ax.yaxis.set_major_formatter(mpl.ticker.FuncFormatter(
lambda x, pos: f'{int(x / 1000)}'.format(x)))
handles, labels = plt.gca().get_legend_handles_labels()
order = [0, 1, 3, 2]
if legend == False:
plt.legend([], [], frameon=False)
elif legend == True:
plt.legend([handles[i] for i in order], [labels[i]
for i in order], fontsize=LEGEND_FONT_SIZE)
else:
plt.legend([handles[i] for i in order], [labels[i]
for i in order], fontsize=LEGEND_FONT_SIZE, loc=legend)
plt.tight_layout()
if save is not None:
print(f'[{bench}] Save to latency-{bench}.{save}')
plt.savefig(f'latency-{bench}.{save}', bbox_inches='tight')
load_data_and_plot(bench='lusearch', legend='upper left')
load_data_and_plot(bench='cassandra', legend='upper left')
load_data_and_plot(bench='h2', legend='upper left', max_percentile='99.99')
load_data_and_plot(bench='tomcat', legend='lower right')
|
StarcoderdataPython
|
234914
|
# -*- coding: utf-8 -*-
"""
author : <NAME> (email: <EMAIL>)
visit: (https://jfayaz.github.io)
"""
from urllib.request import urlopen
import pandas as pd
import numpy as np
import json
def url_resp_values_deag(url_final):
#deagg capture responses
# Opening url
#print(url_final)
deag_response = urlopen(url_final)
# Converting response to str format
response_1 = deag_response.read()
deag_response.close()
return response_1
def url_deag_process(lm,sfmt,sfmt_2):
### ---------- HAZARD CURVES ---------- ###
Deag_data_avaliable = 'No'
lm['vs30'] = np.int(lm['vs30'])
k,urls = checking_deag_urls(lm,sfmt,sfmt_2)
if k == 0:
Deag_data_avaliable = 'No'
print('\nNo Response from USGS for Deaggregation')
print('\nUSGS Server Busy! No Response from USGS. Please try again after sometime.')
return Deag_data_avaliable,0
else:
params_deag = lm.apply(lambda x: sfmt(**x), 1)
for i,row in enumerate(params_deag.values):
url_deag = urls + row
response_deag = url_resp_values_deag(url_deag)
data = json.loads(response_deag)
if data['status'] == 'success':
Deag_data_avaliable = 'Yes'
return Deag_data_avaliable,data
else:
print('\nNo Response from USGS for Deaggregation')
print('\nUSGS Server Busy! No Response from USGS. Please try again after sometime.')
return Deag_data_avaliable,0
def checking_deag_urls(lm,sfmt,sfmt_2):
url_responses = {}
data = pd.DataFrame()
url_head = ["https://earthquake.usgs.gov/nshmp-haz-ws/deagg/","https://prod01-earthquake.cr.usgs.gov/nshmp-haz-ws/deagg/"]
url_tail_1 = list(lm.apply(lambda x: sfmt(**x), 1))
url_tail_2 = list(lm.apply(lambda x: sfmt_2(**x), 1))
urls = {1:url_head[0]+url_tail_1[0],2:url_head[0]+url_tail_2[0],3:url_head[1]+url_tail_1[0],4:url_head[1]+url_tail_2[0]}
for i in range(1,5):
data = pd.DataFrame()
#print("\n\n Checking deaggregation URL:", i)
#print(urls[i])
df = url_resp_values_deag(urls[i])
data = json.loads(df)
#print("\n Response from URL:", data['status'])
url_responses.update({i:data['status']})
for k, v in url_responses.items():
if "success" == v and k in (1,3):
return k,url_head[0]
elif "success" == v and k in (2,4):
return k,url_head[1]
else:
return 0,url_head[0]
|
StarcoderdataPython
|
366127
|
import sys
import time
import numpy as np
import cv2
print(sys.executable)
print(sys.version)
print(cv2.__version__)
def evaluate_threshold(path, threshold):
cap = cv2.VideoCapture(video_file)
timeP = time.time()
diff_sum = 0
if cap.isOpened():
ret, img = cap.read()
while ret:
gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh_image = cv2.threshold(gray_image, threshold, 255, cv2.THRESH_BINARY)
M1 = np.sum(thresh_image) / 255
M0 = np.size(thresh_image) - M1
diff_sum += abs(M0 - M1)
ret, img = cap.read()
print(f'thresh {threshold}: {diff_sum}, elapsed time: {time.time() - timeP}s')
return diff_sum
def evaluate_section(path, value_list = [-1] * 256, init = 0, end = 255, div = 4):
div = min(div, end - init)
thresh_list = [0] * (div + 1)
eval_list = [0] * (div + 1)
for index in range(div + 1):
threshold = init + ((end - init) * index // div)
thresh_list[index] = threshold
if value_list[threshold] < 0:
value_list[threshold] = evaluate_threshold(path, threshold)
eval_list[index] = value_list[threshold]
for index in range(div + 1):
if index == div:
index_min = index
break
if eval_list[index + 1] > eval_list[index]:
index_min = index
break
if div == (end - init):
return thresh_list[index_min], eval_list[index_min]
else:
return evaluate_section(path, value_list, thresh_list[max(0, index_min - 1)], thresh_list[min(div, index_min + 1)], div)
video_file = "./data/butterflies.mp4"
cap = cv2.VideoCapture(video_file)
print(f'frame size: {cap.get(cv2.CAP_PROP_FRAME_WIDTH)} by {cap.get(cv2.CAP_PROP_FRAME_HEIGHT)}')
print(f'frame count: {cap.get(cv2.CAP_PROP_FRAME_COUNT)}')
print(f'pixel count: {cap.get(cv2.CAP_PROP_FRAME_WIDTH) * cap.get(cv2.CAP_PROP_FRAME_HEIGHT) * cap.get(cv2.CAP_PROP_FRAME_COUNT) / 1000000}M')
timeI = time.time()
thresh_opt, diff_opt = evaluate_section(video_file)
print(f'total elapsed time: {time.time() - timeI}s')
print(f'optimal threshold: {thresh_opt} at diff_sum {diff_opt}')
|
StarcoderdataPython
|
1816468
|
<reponame>Insurance-Metrics-Measure-Advisory/watchman-data-connector<gh_stars>100-1000
# from japanese_address import parse
def __parse(address):
pass
# return parse(address)
def run(value):
pass
# return parse(value)
|
StarcoderdataPython
|
170494
|
<gh_stars>0
from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField, SubmitField
from wtforms.validators import InputRequired
class CommentForm(FlaskForm):
comment = TextAreaField('Your comment here', validators=[InputRequired()])
submit = SubmitField('Post')
class BlogForm(FlaskForm):
title = StringField('Blog title', validators=[InputRequired()])
content = TextAreaField('Say something', validators=[InputRequired()])
submit = SubmitField('Submit')
class UpdateProfile(FlaskForm):
bio = TextAreaField('Tell us about you.', validators=[InputRequired()])
submit = SubmitField('Submit')
|
StarcoderdataPython
|
3274327
|
<gh_stars>0
from __future__ import annotations
import typing
class Activity:
def __init__(self, start_time: float, finish_time: float, profit: float) -> None:
self.start_time = start_time
self.finish_time = finish_time
self.profit = profit
def __iter__(self) -> typing.Iterator[float]:
for item in (self.start_time, self.finish_time):
yield item
def previous(activities: list[Activity], index: int) -> int:
activity = activities[index]
previous_index = 0
for _index in range(index):
finish_time = activities[_index].finish_time
if finish_time < activity.start_time and finish_time > activities[previous_index].finish_time:
previous_index = _index
return previous_index
def maximum_profit(activities: list[Activity], index: int = None) -> float:
if index is None:
index = len(activities) - 1
if index == 0:
return 0
profit = max(maximum_profit(activities, index - 1), activities[index].profit + maximum_profit(activities, previous(activities, index)))
return profit
def maximum_profit_memoised(activities: list[Activity], index: int = None) -> float:
if index is None:
index = len(activities) - 1
profits = [None for _ in range(index + 1)]
if index == 0:
return 0
if profits[index]:
return profits[index]
profits[index] = max(maximum_profit_memoised(activities, index - 1), activities[index].profit + maximum_profit_memoised(activities, previous(activities, index)))
return profits[index]
def maximum_profit_dynamic(activities: list[Activity]) -> float:
profits = [None for _ in range(len(activities))]
profits[0] = 0
for index, element in enumerate(activities):
if index > 0:
profits[index] = max(profits[index - 1], element.profit + profits[previous(activities, index)])
return profits[-1]
def weighted_activity_selection(activities: list[Activity]) -> set[Activity]:
selected = set()
index = len(activities) - 1
while index > 0:
if maximum_profit(activities, index) > maximum_profit(activities, index - 1):
selected.add(activities[index])
index = previous(activities, index)
else:
index -= 1
return selected
|
StarcoderdataPython
|
3218055
|
r"""
Points of Topological Manifolds
The class :class:`ManifoldPoint` implements points of a
topological manifold.
A :class:`ManifoldPoint` object can have coordinates in
various charts defined on the manifold. Two points are declared
equal if they have the same coordinates in the same chart.
AUTHORS:
- <NAME>, <NAME> (2013-2015) : initial version
REFERENCES:
- [Lee2011]_
- [Lee2013]_
EXAMPLES:
Defining a point in `\RR^3` by its spherical coordinates::
sage: M = Manifold(3, 'R^3', structure='topological')
sage: U = M.open_subset('U') # the domain of spherical coordinates
sage: c_spher.<r,th,ph> = U.chart(r'r:(0,+oo) th:(0,pi):\theta ph:(0,2*pi):periodic:\phi')
We construct the point in the coordinates in the default chart of ``U``
(``c_spher``)::
sage: p = U((1, pi/2, pi), name='P')
sage: p
Point P on the 3-dimensional topological manifold R^3
sage: latex(p)
P
sage: p in U
True
sage: p.parent()
Open subset U of the 3-dimensional topological manifold R^3
sage: c_spher(p)
(1, 1/2*pi, pi)
sage: p.coordinates(c_spher) # equivalent to above
(1, 1/2*pi, pi)
Computing the coordinates of ``p`` in a new chart::
sage: c_cart.<x,y,z> = U.chart() # Cartesian coordinates on U
sage: spher_to_cart = c_spher.transition_map(c_cart,
....: [r*sin(th)*cos(ph), r*sin(th)*sin(ph), r*cos(th)])
sage: c_cart(p) # evaluate P's Cartesian coordinates
(-1, 0, 0)
Points can be compared::
sage: p1 = U((1, pi/2, pi))
sage: p1 == p
True
sage: q = U((2, pi/2, pi))
sage: q == p
False
even if they were initially not defined within the same coordinate chart::
sage: p2 = U((-1,0,0), chart=c_cart)
sage: p2 == p
True
The `2\pi`-periodicity of the `\phi` coordinate is also taken into account
for the comparison::
sage: p3 = U((1, pi/2, 5*pi))
sage: p3 == p
True
sage: p4 = U((1, pi/2, -pi))
sage: p4 == p
True
"""
#*****************************************************************************
# Copyright (C) 2015 <NAME> <<EMAIL>>
# Copyright (C) 2015 <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
#*****************************************************************************
from sage.structure.element import Element
from sage.misc.decorators import options
from sage.symbolic.expression import Expression
from sage.rings.integer_ring import ZZ
class ManifoldPoint(Element):
r"""
Point of a topological manifold.
This is a Sage *element* class, the corresponding *parent* class
being :class:`~sage.manifolds.manifold.TopologicalManifold`
or :class:`~sage.manifolds.subset.ManifoldSubset`.
INPUT:
- ``parent`` -- the manifold subset to which the point belongs
- ``coords`` -- (default: ``None``) the point coordinates (as a tuple
or a list) in the chart ``chart``
- ``chart`` -- (default: ``None``) chart in which the coordinates are
given; if ``None``, the coordinates are assumed to refer to the
default chart of ``parent``
- ``name`` -- (default: ``None``) name given to the point
- ``latex_name`` -- (default: ``None``) LaTeX symbol to denote the point;
if ``None``, the LaTeX symbol is set to ``name``
- ``check_coords`` -- (default: ``True``) determines whether ``coords``
are valid coordinates for the chart ``chart``; for symbolic
coordinates, it is recommended to set ``check_coords`` to ``False``
EXAMPLES:
A point on a 2-dimensional manifold::
sage: M = Manifold(2, 'M', structure='topological')
sage: c_xy.<x,y> = M.chart()
sage: (a, b) = var('a b') # generic coordinates for the point
sage: p = M.point((a, b), name='P'); p
Point P on the 2-dimensional topological manifold M
sage: p.coordinates() # coordinates of P in the subset's default chart
(a, b)
Since points are Sage *elements*, the *parent* of which being the
subset on which they are defined, it is equivalent to write::
sage: p = M((a, b), name='P'); p
Point P on the 2-dimensional topological manifold M
A point is an element of the manifold subset in which it has
been defined::
sage: p in M
True
sage: p.parent()
2-dimensional topological manifold M
sage: U = M.open_subset('U', coord_def={c_xy: x>0})
sage: q = U.point((2,1), name='q')
sage: q.parent()
Open subset U of the 2-dimensional topological manifold M
sage: q in U
True
sage: q in M
True
By default, the LaTeX symbol of the point is deduced from its name::
sage: latex(p)
P
But it can be set to any value::
sage: p = M.point((a, b), name='P', latex_name=r'\mathcal{P}')
sage: latex(p)
\mathcal{P}
Points can be drawn in 2D or 3D graphics thanks to the
method :meth:`plot`.
"""
def __init__(self, parent, coords=None, chart=None, name=None,
latex_name=None, check_coords=True):
r"""
Construct a manifold point.
TESTS::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart()
sage: p = M((2,3), name='p'); p
Point p on the 2-dimensional topological manifold M
sage: TestSuite(p).run()
sage: U = M.open_subset('U', coord_def={X: x<0})
sage: q = U((-1,2), name='q'); q
Point q on the 2-dimensional topological manifold M
sage: TestSuite(q).run()
"""
if parent.is_empty():
raise TypeError(f'cannot define a point on the {parent} because it has been declared empty')
Element.__init__(self, parent)
parent._has_defined_points = True
self._manifold = parent.manifold() # a useful shortcut
self._coordinates = {} # dictionary of the point coordinates in various
# charts, with the charts as keys
if coords is not None:
if len(coords) != parent.manifold().dimension():
raise ValueError("the number of coordinates must be equal " +
"to the manifold's dimension")
from sage.manifolds.manifold import TopologicalManifold
if chart is None:
chart = parent._def_chart
elif isinstance(parent, TopologicalManifold):
if chart not in parent._atlas:
raise ValueError("the {} has not been".format(chart) +
"defined on the {}".format(parent))
if check_coords:
if not chart.valid_coordinates(*coords):
raise ValueError("the coordinates {}".format(coords) +
" are not valid on the {}".format(chart))
for schart in chart._supercharts:
self._coordinates[schart] = tuple(coords)
for schart in chart._subcharts:
if schart != chart:
if schart.valid_coordinates(*coords):
self._coordinates[schart] = tuple(coords)
self._name = name
if latex_name is None:
self._latex_name = self._name
else:
self._latex_name = latex_name
def _repr_(self):
r"""
Return a string representation of the point.
TESTS::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart()
sage: p = M((2,-3))
sage: p._repr_()
'Point on the 2-dimensional topological manifold M'
sage: p = M((2,-3), name='p')
sage: p._repr_()
'Point p on the 2-dimensional topological manifold M'
sage: repr(p) # indirect doctest
'Point p on the 2-dimensional topological manifold M'
"""
description = "Point"
if self._name is not None:
description += " " + self._name
description += " on the {}".format(self._manifold)
return description
def _latex_(self):
r"""
Return a LaTeX representation of the point.
TESTS::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart()
sage: p = M((2,-3))
sage: p._latex_()
'\\mbox{Point on the 2-dimensional topological manifold M}'
sage: p = M((2,-3), name='p')
sage: p._latex_()
'p'
sage: p = M((2,-3), name='p', latex_name=r'\mathcal{P}')
sage: p._latex_()
'\\mathcal{P}'
sage: latex(p) # indirect doctest
\mathcal{P}
"""
if self._latex_name is None:
return r'\mbox{' + str(self) + r'}'
return self._latex_name
def coordinates(self, chart=None, old_chart=None):
r"""
Return the point coordinates in the specified chart.
If these coordinates are not already known, they are computed from
known ones by means of change-of-chart formulas.
An equivalent way to get the coordinates of a point is to let the
chart acting on the point, i.e. if ``X`` is a chart and ``p`` a
point, one has ``p.coordinates(chart=X) == X(p)``.
INPUT:
- ``chart`` -- (default: ``None``) chart in which the coordinates
are given; if none are provided, the coordinates are assumed to
refer to the subset's default chart
- ``old_chart`` -- (default: ``None``) chart from which the
coordinates in ``chart`` are to be computed; if ``None``, a chart
in which the point's coordinates are already known will be picked,
privileging the subset's default chart
EXAMPLES:
Spherical coordinates of a point on `\RR^3`::
sage: M = Manifold(3, 'M', structure='topological')
sage: c_spher.<r,th,ph> = M.chart(r'r:(0,+oo) th:(0,pi):\theta ph:(0,2*pi):\phi') # spherical coordinates
sage: p = M.point((1, pi/2, pi))
sage: p.coordinates() # coordinates in the manifold's default chart
(1, 1/2*pi, pi)
Since the default chart of ``M`` is ``c_spher``, it is equivalent to
write::
sage: p.coordinates(c_spher)
(1, 1/2*pi, pi)
An alternative way to get the coordinates is to let the chart act
on the point (from the very definition of a chart)::
sage: c_spher(p)
(1, 1/2*pi, pi)
A shortcut for ``coordinates`` is ``coord``::
sage: p.coord()
(1, 1/2*pi, pi)
Computing the Cartesian coordinates from the spherical ones::
sage: c_cart.<x,y,z> = M.chart() # Cartesian coordinates
sage: c_spher.transition_map(c_cart, [r*sin(th)*cos(ph),
....: r*sin(th)*sin(ph), r*cos(th)])
Change of coordinates from Chart (M, (r, th, ph)) to Chart (M, (x, y, z))
The computation is performed by means of the above change
of coordinates::
sage: p.coord(c_cart)
(-1, 0, 0)
sage: p.coord(c_cart) == c_cart(p)
True
Coordinates of a point on a 2-dimensional manifold::
sage: M = Manifold(2, 'M', structure='topological')
sage: c_xy.<x,y> = M.chart()
sage: (a, b) = var('a b') # generic coordinates for the point
sage: P = M.point((a, b), name='P')
Coordinates of ``P`` in the manifold's default chart::
sage: P.coord()
(a, b)
Coordinates of ``P`` in a new chart::
sage: c_uv.<u,v> = M.chart()
sage: ch_xy_uv = c_xy.transition_map(c_uv, [x-y, x+y])
sage: P.coord(c_uv)
(a - b, a + b)
Coordinates of ``P`` in a third chart::
sage: c_wz.<w,z> = M.chart()
sage: ch_uv_wz = c_uv.transition_map(c_wz, [u^3, v^3])
sage: P.coord(c_wz, old_chart=c_uv)
(a^3 - 3*a^2*b + 3*a*b^2 - b^3, a^3 + 3*a^2*b + 3*a*b^2 + b^3)
Actually, in the present case, it is not necessary to specify
``old_chart='uv'``. Note that the first command erases all
the coordinates except those in the chart ``c_uv``::
sage: P.set_coord((a-b, a+b), c_uv)
sage: P._coordinates
{Chart (M, (u, v)): (a - b, a + b)}
sage: P.coord(c_wz)
(a^3 - 3*a^2*b + 3*a*b^2 - b^3, a^3 + 3*a^2*b + 3*a*b^2 + b^3)
sage: P._coordinates # random (dictionary output)
{Chart (M, (u, v)): (a - b, a + b),
Chart (M, (w, z)): (a^3 - 3*a^2*b + 3*a*b^2 - b^3,
a^3 + 3*a^2*b + 3*a*b^2 + b^3)}
"""
if chart is None:
dom = self.parent()
chart = dom._def_chart
def_chart = chart
else:
dom = chart.domain()
def_chart = dom._def_chart
if self not in dom:
raise ValueError("the point does not belong to the domain " +
"of {}".format(chart))
if chart not in self._coordinates:
# Check whether chart corresponds to a superchart of a chart
# in which the coordinates are known:
for ochart in self._coordinates:
if chart in ochart._supercharts or chart in ochart._subcharts:
self._coordinates[chart] = self._coordinates[ochart]
return self._coordinates[chart]
# If this point is reached, some change of coordinates must be
# performed
if old_chart is not None:
s_old_chart = old_chart
s_chart = chart
else:
# A chart must be found as a starting point of the computation
# The domain's default chart is privileged:
if (def_chart in self._coordinates
and (def_chart, chart) in dom._coord_changes):
old_chart = def_chart
s_old_chart = def_chart
s_chart = chart
else:
for ochart in self._coordinates:
for subchart in ochart._subcharts:
if (subchart, chart) in dom._coord_changes:
old_chart = ochart
s_old_chart = subchart
s_chart = chart
break
if old_chart is not None:
break
if old_chart is None:
# Some search involving the subcharts of chart is
# performed:
for schart in chart._subcharts:
for ochart in self._coordinates:
for subchart in ochart._subcharts:
if (subchart, schart) in dom._coord_changes:
old_chart = ochart
s_old_chart = subchart
s_chart = schart
break
if old_chart is not None:
break
if old_chart is not None:
break
if old_chart is None:
raise ValueError("the coordinates of {}".format(self) +
" in the {}".format(chart) + " cannot be computed " +
"by means of known changes of charts.")
else:
chcoord = dom._coord_changes[(s_old_chart, s_chart)]
self._coordinates[chart] = chcoord(*self._coordinates[old_chart])
return self._coordinates[chart]
coord = coordinates
def set_coordinates(self, coords, chart=None):
r"""
Sets the point coordinates in the specified chart.
Coordinates with respect to other charts are deleted, in order to
avoid any inconsistency. To keep them, use the method :meth:`add_coord`
instead.
INPUT:
- ``coords`` -- the point coordinates (as a tuple or a list)
- ``chart`` -- (default: ``None``) chart in which the coordinates
are given; if none are provided, the coordinates are assumed to
refer to the subset's default chart
EXAMPLES:
Setting coordinates to a point on a 2-dimensional manifold::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart()
sage: p = M.point()
We set the coordinates in the manifold's default chart::
sage: p.set_coordinates((2,-3))
sage: p.coordinates()
(2, -3)
sage: X(p)
(2, -3)
A shortcut for ``set_coordinates`` is ``set_coord``::
sage: p.set_coord((2,-3))
sage: p.coord()
(2, -3)
Let us introduce a second chart on the manifold::
sage: Y.<u,v> = M.chart()
sage: X_to_Y = X.transition_map(Y, [x+y, x-y])
If we set the coordinates of ``p`` in chart ``Y``, those in chart ``X``
are lost::
sage: Y(p)
(-1, 5)
sage: p.set_coord(Y(p), chart=Y)
sage: p._coordinates
{Chart (M, (u, v)): (-1, 5)}
"""
self._coordinates.clear()
self.add_coord(coords, chart)
set_coord = set_coordinates
def add_coordinates(self, coords, chart=None):
r"""
Adds some coordinates in the specified chart.
The previous coordinates with respect to other charts are kept. To
clear them, use :meth:`set_coord` instead.
INPUT:
- ``coords`` -- the point coordinates (as a tuple or a list)
- ``chart`` -- (default: ``None``) chart in which the coordinates
are given; if none are provided, the coordinates are assumed to
refer to the subset's default chart
.. WARNING::
If the point has already coordinates in other charts, it
is the user's responsibility to make sure that the coordinates
to be added are consistent with them.
EXAMPLES:
Setting coordinates to a point on a 2-dimensional manifold::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart()
sage: p = M.point()
We give the point some coordinates in the manifold's default chart::
sage: p.add_coordinates((2,-3))
sage: p.coordinates()
(2, -3)
sage: X(p)
(2, -3)
A shortcut for ``add_coordinates`` is ``add_coord``::
sage: p.add_coord((2,-3))
sage: p.coord()
(2, -3)
Let us introduce a second chart on the manifold::
sage: Y.<u,v> = M.chart()
sage: X_to_Y = X.transition_map(Y, [x+y, x-y])
If we add coordinates for ``p`` in chart ``Y``, those in chart ``X``
are kept::
sage: p.add_coordinates((-1,5), chart=Y)
sage: p._coordinates # random (dictionary output)
{Chart (M, (u, v)): (-1, 5), Chart (M, (x, y)): (2, -3)}
On the contrary, with the method :meth:`set_coordinates`, the
coordinates in charts different from ``Y`` would be lost::
sage: p.set_coordinates((-1,5), chart=Y)
sage: p._coordinates
{Chart (M, (u, v)): (-1, 5)}
"""
if len(coords) != self.parent().manifold()._dim:
raise ValueError("the number of coordinates must be equal to " +
"the manifold's dimension.")
if chart is None:
chart = self.parent()._def_chart
else:
if chart not in self.parent()._atlas:
raise ValueError("the {}".format(chart) + " has not been " +
"defined on the {}".format(self.parent()))
self._coordinates[chart] = coords
add_coord = add_coordinates
def __eq__(self, other):
r"""
Compares the current point with another one.
EXAMPLES:
Comparison with coordinates in the same chart::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart()
sage: p = M((2,-3), chart=X)
sage: q = M((2,-3), chart=X)
sage: p == q
True
sage: q = M((-2,-3), chart=X)
sage: p == q
False
Comparison with coordinates of other in a subchart::
sage: U = M.open_subset('U', coord_def={X: x>0})
sage: XU = X.restrict(U)
sage: q = U((2,-3), chart=XU)
sage: p == q and q == p
True
sage: q = U((1,-3), chart=XU)
sage: p == q or q == p
False
Comparison requiring a change of chart::
sage: Y.<u,v> = U.chart()
sage: XU_to_Y = XU.transition_map(Y, (ln(x), x+y))
sage: XU_to_Y.inverse()(u,v)
(e^u, v - e^u)
sage: q = U((ln(2),-1), chart=Y)
sage: p == q and q == p
True
sage: q = U((ln(3),1), chart=Y)
sage: p == q or q == p
False
Comparison with periodic coordinates::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart('x y:period=2')
sage: p = M((0,1))
sage: q = M((0,3))
sage: p == q and q == p
True
sage: q = M((0,2))
sage: p == q or q == p
False
sage: Y.<u,v> = M.chart('u:(0,2*pi):periodic v')
sage: p = M((0,1), chart=Y)
sage: q = M((-4*pi,1), chart=Y)
sage: p == q and q == p
True
sage: q = M((3*pi,1), chart=Y)
sage: p == q or q == p
False
"""
if other is self:
return True
if not isinstance(other, ManifoldPoint):
return False
if other.parent().manifold() != self.parent().manifold():
return False
# Search for a common chart to compare the coordinates
common_chart = None
# the subset's default chart is privileged:
# FIXME: Make this a better test
if hasattr(self.parent(), '_def_chart'): # self.parent() is open
def_chart = self.parent()._def_chart
else:
def_chart = self.parent().manifold()._def_chart
if def_chart in self._coordinates and def_chart in other._coordinates:
common_chart = def_chart
else:
for chart in self._coordinates:
if chart in other._coordinates:
common_chart = chart
break
if common_chart is None:
# A common chart is searched via a coordinate transformation,
# privileging the default chart
if def_chart in self._coordinates:
try:
other.coordinates(def_chart)
common_chart = def_chart
except ValueError:
pass
if common_chart is None:
if def_chart in other._coordinates:
try:
self.coordinates(def_chart)
common_chart = def_chart
except ValueError:
pass
if common_chart is None:
# At this stage, a common chart is searched via a coordinate
# transformation from any chart
for chart in self._coordinates:
try:
other.coordinates(chart)
common_chart = chart
break
except ValueError:
pass
else:
# Attempt a coordinate transformation in the reverse way:
for chart in other._coordinates:
try:
self.coordinates(chart)
common_chart = chart
break
except ValueError:
pass
if common_chart is None:
return False
#!# Another option would be:
# raise ValueError("no common chart has been found to compare " +
# "{} and {}".format(self, other))
periods = common_chart.periods()
for ind, (xs, xo) in enumerate(zip(self._coordinates[common_chart],
other._coordinates[common_chart])):
diff = xs - xo
period = periods[ind]
if period is not None:
if not (diff/period in ZZ):
return False
else:
if isinstance(diff, Expression) and not diff.is_trivial_zero():
return False
elif not (diff == 0):
return False
return True
def __ne__(self, other):
r"""
Non-equality operator.
TESTS::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart()
sage: p = M((2,-3), chart=X)
sage: q = M((0,1), chart=X)
sage: p != q
True
sage: p != M((2,-3), chart=X)
False
"""
return not (self == other)
def __hash__(self):
r"""
Return the hash of ``self``.
This hash function is set to constant on a given manifold, to fulfill
Python's credo::
p == q ==> hash(p) == hash(q)
This is necessary since ``p`` and ``q`` may be created in
different coordinate systems and nevertheless be equal.
.. TODO::
Find a better hash function.
TESTS::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart()
sage: p = M((2,-3), chart=X)
sage: hash(p) == hash(M)
True
"""
return hash(self.parent().manifold())
@options(size=10, color='black', label_color=None, fontsize=10, label_offset=0.1)
def plot(self, chart=None, ambient_coords=None, mapping=None,
label=None, parameters=None, **kwds):
r"""
For real manifolds, plot ``self`` in a Cartesian graph based
on the coordinates of some ambient chart.
The point is drawn in terms of two (2D graphics) or three (3D graphics)
coordinates of a given chart, called hereafter the *ambient chart*.
The domain of the ambient chart must contain the point, or its image
by a continuous manifold map `\Phi`.
INPUT:
- ``chart`` -- (default: ``None``) the ambient chart (see above); if
``None``, the ambient chart is set the default chart of
``self.parent()``
- ``ambient_coords`` -- (default: ``None``) tuple containing the 2
or 3 coordinates of the ambient chart in terms of which the plot
is performed; if ``None``, all the coordinates of the ambient
chart are considered
- ``mapping`` -- (default: ``None``)
:class:`~sage.manifolds.continuous_map.ContinuousMap`; continuous
manifold map `\Phi` providing the link between the current point
`p` and the ambient chart ``chart``: the domain of ``chart`` must
contain `\Phi(p)`; if ``None``, the identity map is assumed
- ``label`` -- (default: ``None``) label printed next to the point;
if ``None``, the point's name is used
- ``parameters`` -- (default: ``None``) dictionary giving the numerical
values of the parameters that may appear in the point coordinates
- ``size`` -- (default: 10) size of the point once drawn as a small
disk or sphere
- ``color`` -- (default: ``'black'``) color of the point
- ``label_color`` -- (default: ``None``) color to print the label;
if ``None``, the value of ``color`` is used
- ``fontsize`` -- (default: 10) size of the font used to print the
label
- ``label_offset`` -- (default: 0.1) determines the separation between
the point and its label
OUTPUT:
- a graphic object, either an instance of
:class:`~sage.plot.graphics.Graphics` for a 2D plot (i.e. based on
2 coordinates of the ambient chart) or an instance of
:class:`~sage.plot.plot3d.base.Graphics3d` for a 3D plot (i.e.
based on 3 coordinates of the ambient chart)
EXAMPLES:
Drawing a point on a 2-dimensional manifold::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart()
sage: p = M.point((1,3), name='p')
sage: g = p.plot(X)
sage: print(g)
Graphics object consisting of 2 graphics primitives
sage: gX = X.plot(max_range=4) # plot of the coordinate grid
sage: g + gX # display of the point atop the coordinate grid
Graphics object consisting of 20 graphics primitives
.. PLOT::
M = Manifold(2, 'M', structure='topological')
X = M.chart('x y'); x,y = X[:]
p = M.point((1,3), name='p')
g = p.plot(X)
gX = X.plot(max_range=4)
sphinx_plot(g+gX)
Actually, since ``X`` is the default chart of the open set in which
``p`` has been defined, it can be skipped in the arguments of
``plot``::
sage: g = p.plot()
sage: g + gX
Graphics object consisting of 20 graphics primitives
Call with some options::
sage: g = p.plot(chart=X, size=40, color='green', label='$P$',
....: label_color='blue', fontsize=20, label_offset=0.3)
sage: g + gX
Graphics object consisting of 20 graphics primitives
.. PLOT::
M = Manifold(2, 'M', structure='topological')
X = M.chart('x y'); x,y = X[:]
p = M.point((1,3), name='p')
g = p.plot(chart=X, size=40, color='green', label='$P$', \
label_color='blue', fontsize=20, label_offset=0.3)
gX = X.plot(max_range=4)
sphinx_plot(g+gX)
Use of the ``parameters`` option to set a numerical value of some
symbolic variable::
sage: a = var('a')
sage: q = M.point((a,2*a), name='q')
sage: gq = q.plot(parameters={a:-2}, label_offset=0.2)
sage: g + gX + gq
Graphics object consisting of 22 graphics primitives
.. PLOT::
M = Manifold(2, 'M', structure='topological')
X = M.chart('x y'); x,y = X[:]
p = M.point((1,3), name='p')
g = p.plot(chart=X, size=40, color='green', label='$P$', \
label_color='blue', fontsize=20, label_offset=0.3)
var('a')
q = M.point((a,2*a), name='q')
gq = q.plot(parameters={a:-2}, label_offset=0.2)
gX = X.plot(max_range=4)
sphinx_plot(g+gX+gq)
The numerical value is used only for the plot::
sage: q.coord()
(a, 2*a)
Drawing a point on a 3-dimensional manifold::
sage: M = Manifold(3, 'M', structure='topological')
sage: X.<x,y,z> = M.chart()
sage: p = M.point((2,1,3), name='p')
sage: g = p.plot()
sage: print(g)
Graphics3d Object
sage: gX = X.plot(number_values=5) # coordinate mesh cube
sage: g + gX # display of the point atop the coordinate mesh
Graphics3d Object
Call with some options::
sage: g = p.plot(chart=X, size=40, color='green', label='P_1',
....: label_color='blue', fontsize=20, label_offset=0.3)
sage: g + gX
Graphics3d Object
An example of plot via a mapping: plot of a point on a 2-sphere viewed
in the 3-dimensional space ``M``::
sage: S2 = Manifold(2, 'S^2', structure='topological')
sage: U = S2.open_subset('U') # the open set covered by spherical coord.
sage: XS.<th,ph> = U.chart(r'th:(0,pi):\theta ph:(0,2*pi):\phi')
sage: p = U.point((pi/4, pi/8), name='p')
sage: F = S2.continuous_map(M, {(XS, X): [sin(th)*cos(ph),
....: sin(th)*sin(ph), cos(th)]}, name='F')
sage: F.display()
F: S^2 → M
on U: (th, ph) ↦ (x, y, z) = (cos(ph)*sin(th), sin(ph)*sin(th), cos(th))
sage: g = p.plot(chart=X, mapping=F)
sage: gS2 = XS.plot(chart=X, mapping=F, number_values=9)
sage: g + gS2
Graphics3d Object
Use of the option ``ambient_coords`` for plots on a 4-dimensional
manifold::
sage: M = Manifold(4, 'M', structure='topological')
sage: X.<t,x,y,z> = M.chart()
sage: p = M.point((1,2,3,4), name='p')
sage: g = p.plot(X, ambient_coords=(t,x,y), label_offset=0.4) # the coordinate z is skipped
sage: gX = X.plot(X, ambient_coords=(t,x,y), number_values=5) # long time
sage: g + gX # 3D plot # long time
Graphics3d Object
sage: g = p.plot(X, ambient_coords=(t,y,z), label_offset=0.4) # the coordinate x is skipped
sage: gX = X.plot(X, ambient_coords=(t,y,z), number_values=5) # long time
sage: g + gX # 3D plot # long time
Graphics3d Object
sage: g = p.plot(X, ambient_coords=(y,z), label_offset=0.4) # the coordinates t and x are skipped
sage: gX = X.plot(X, ambient_coords=(y,z))
sage: g + gX # 2D plot
Graphics object consisting of 20 graphics primitives
.. PLOT::
M = Manifold(4, 'M', structure='topological')
X = M.chart('t x y z'); t,x,y,z = X[:]
p = M.point((1,2,3,4), name='p')
g = p.plot(X, ambient_coords=(y,z), label_offset=0.4)
gX = X.plot(X, ambient_coords=(y,z))
sphinx_plot(g+gX)
"""
from sage.plot.point import point2d
from sage.plot.text import text
from sage.plot.graphics import Graphics
from sage.plot.plot3d.shapes2 import point3d, text3d
from sage.manifolds.chart import Chart
if self._manifold.base_field_type() != 'real':
raise NotImplementedError('plot of points on manifolds over fields different'
' from the real field is not implemented')
# The ambient chart:
if chart is None:
chart = self.parent().default_chart()
elif not isinstance(chart, Chart):
raise TypeError("the argument 'chart' must be a coordinate chart")
# The effective point to be plotted:
if mapping is None:
eff_point = self
else:
eff_point = mapping(self)
# The coordinates of the ambient chart used for the plot:
if ambient_coords is None:
ambient_coords = chart[:]
elif not isinstance(ambient_coords, tuple):
ambient_coords = tuple(ambient_coords)
nca = len(ambient_coords)
if nca != 2 and nca != 3:
raise TypeError("invalid number of ambient coordinates: {}".format(nca))
# Extract the kwds options
size = kwds['size']
color = kwds['color']
label_color = kwds['label_color']
fontsize = kwds['fontsize']
label_offset = kwds['label_offset']
# The point coordinates:
coords = eff_point.coord(chart)
xx = chart[:]
xp = [coords[xx.index(c)] for c in ambient_coords]
if parameters is not None:
xps = [coord.substitute(parameters) for coord in xp]
xp = xps
xlab = [coord + label_offset for coord in xp]
if label_color is None:
label_color = color
resu = Graphics()
if nca == 2:
if label is None:
label = r'$' + self._latex_name + r'$'
resu += (point2d(xp, color=color, size=size) +
text(label, xlab, fontsize=fontsize, color=label_color))
else:
if label is None:
label = self._name
resu += (point3d(xp, color=color, size=size) +
text3d(label, xlab, fontsize=fontsize, color=label_color))
return resu
|
StarcoderdataPython
|
3232591
|
#!/usr/bin/env python
import sys, re, csv
import pprint as pp
import pandas as pd
import argparse
from datetime import date
from go_utils import gaf, obo
import timeit
parser = argparse.ArgumentParser(description='Convert the RBH output to to gaf files')
parser.add_argument("-i","--input", help="Input file with RBH GO output", required=True)
parser.add_argument("-o","--obo", help="OBO file from Gene Ontology", required=True)
parser.add_argument("-g","--gaf", help="output gaf file to store the value", required=True)
parser.add_argument("-d","--dataset", help="output gaf file to store the value", required=True)
args = parser.parse_args()
def print_elasped(start_time):
elapsed = timeit.default_timer() - start_time
sys.stderr.write("Time taken - %s\n" % elapsed)
return(timeit.default_timer())
#num_cores = int(args.num_threads)
start_time = timeit.default_timer()
sys.stderr.write("Importing the GO obo file\n")
input_obo = obo()
input_obo.read_file(args.obo)
start_time = print_elasped(start_time)
sys.stderr.write("Converting the obo to get the aspects for all GO terms\n")
aspects = {"biological_process":"P","molecular_function":"F","cellular_component":"C"}
go_aspect = {term['id']:aspects[term['namespace']] for term in input_obo.terms}
start_time = print_elasped(start_time)
sys.stderr.write("Reading RBH results\n")
df = pd.DataFrame.from_csv(args.input,index_col=None,sep="\t",header=0)
df.columns = ["gene","arab_gene","go","aspect", "ev_code", "rel_type"]
df["gene"] = df["gene"].replace({r'_P[0-9]+':"",r'FGP':'FG'},regex=True)
start_time = print_elasped(start_time)
in_gaf = gaf()
sys.stderr.write("Converting Pannzer results into a GAF\n")
in_gaf.init_annotations(df)
in_gaf.add_aspect(input_obo)
in_gaf.add_col_all("db","maize-GAMER")
in_gaf.add_col_all("qualifier","")
in_gaf.add_col_all("db_reference","MG:0000")
in_gaf.add_col_all("evidence_code","IEA")
in_gaf.add_col_all("with","0")
in_gaf.add_col_all("db_object_name","")
in_gaf.add_col_all("db_object_synonym","")
in_gaf.add_col_all("db_object_type","gene")
in_gaf.add_col_all("taxon","taxon:4577")
d = date.today()
in_gaf.add_col_all("date",d.strftime("%m%d%Y"))
in_gaf.add_col_all("assigned_by",args.dataset.upper())
in_gaf.add_col_all("annotation_extension","")
in_gaf.add_col_all("gene_product_form_id","")
in_gaf.reorder_cols()
#tmp_fields = list(in_gaf.gaf_2_x_fields)
'''
# def add_to_gaf(row):
# row_dict = row
# aspect = ""
# if row_dict["go"] in go_aspect:
# aspect = go_aspect[row_dict["go"]]
# else:
# aspect = "N"
# tmp_annot_data = ["Maize-GAME",row_dict["gene"],row_dict["gene"],"",row_dict["go"],"MG:0000","IEA",str(row_dict["score"]),aspect,"","","protein","taxon:4577","20161105","FANN-GO","",""]
# tmp_annot = dict(zip(in_gaf.gaf_2_x_fields,tmp_annot_data))
# in_gaf.add_annotation(tmp_annot)
#
# df.apply(add_to_gaf,axis=1)
# for row in df.iterrows():
# #sys.stderr.write("%s\n" % (row[0]+1))
# row_dict = row[1]
# aspect = ""
# if row_dict["go"] in go_aspect:
# aspect = go_aspect[row_dict["go"]]
# else:
# aspect = "N"
# tmp_annot_data = ["Maize-GAME",row_dict["gene"],row_dict["gene"],"",row_dict["go"],"MG:0000","IEA",str(row_dict["score"]),aspect,"","","protein","taxon:4577","20161105","FANN-GO","",""]
# tmp_annot = dict(zip(in_gaf.gaf_2_x_fields,tmp_annot_data))
# in_gaf.add_annotation(tmp_annot)
# if (row[0]+1) % 100 == 0:
# sys.stderr.write("Processed %s lines\n" % (row[0]+1))
in_gaf.annotations["with"] = pd.to_numeric(in_gaf.annotations["with"])
'''
start_time = print_elasped(start_time)
sys.stderr.write("Starting to write output files\n")
'''
for i in xrange(0,100,5):
th = float(i)/100
sys.stderr.write("Processing threshold - %s\n" % (th))
#tmp_df = gaf_df[gaf_df["with"]>=th]
outfile_th = args.gaf.split(".")
outfile_th[0] = outfile_th[0]+"-%s" % (th)
outfile_th = ".".join(outfile_th)
in_gaf.write_gaf(outfile_th,"with",th)
'''
in_gaf.write_gaf(args.gaf)
start_time = print_elasped(start_time)
sys.stderr.write("Process is completed\n")
|
StarcoderdataPython
|
9767246
|
'''
basic authentication (username, password)
no database systems, users defined by python scripts
'''
from flask import render_template, request, redirect, abort, flash, url_for
from flask_login import login_user, LoginManager, current_user, logout_user, login_required
from werkzeug.security import generate_password_hash, check_password_hash
from jinja2.exceptions import TemplateNotFound
from vicore import BaseArch, AppArchExt
class AuthUser:
'''A basic user authentication account following flask-login'''
def __init__(self, name, password):
self.set_password(password)
self.name = name
self.is_active = True
self.is_anonymous = False
def get_id(self):
return str(self.id)
def set_password(self, password, method ='pbkdf2:<PASSWORD>', saltlen = 16 ):
self.passhash=generate_password_hash(password, method=method, salt_length=saltlen)
def check_password(self, password):
return check_password_hash(self.passhash, password)
'''
basic.Arch
templates: login, profile, unauth
reroutes: login, logout
'''
class Arch(BaseArch):
def __init__(self, templates = {}, reroutes = {}, reroutes_kwarg = {}, rex_callback = {}, url_prefix = None):
'''
initialize the architecture for the vial
templ is a dictionary that returns user specified templates to user on given routes
reroutes is a dictionary that reroutes the user after certain actions on given routes
'''
super().__init__('viauth', templates, reroutes, reroutes_kwarg, rex_callback, url_prefix)
self.__userdict = {} # only for basic
self._default_tp('login', 'login.html')
self._default_tp('profile', 'profile.html')
self._default_tp('unauth','unauth.html')
self._default_rt('login', 'viauth.profile')
self._default_rt('logout','viauth.login')
def update_users(self, ulist):
'''
a primitive way of updating users. this is non-dynamic (i.e., init_app or generate
is called, the user list is STATIC!
'''
for i, u in enumerate(ulist):
u.id = i
self.__userdict[u.name] = u
def _find_byid(self, uid):
for u in self.__userdict.values():
if uid == u.get_id():
return u
def init_app(self, app):
apparch = self.generate_apparch()
apparch.ext.init_app(app)
app.register_blueprint(apparch.bp)
@app.teardown_appcontext
def shutdown_session(exception=None):
pass
def __unauth(self):
try:
tpl = render_template(self._templ['unauth'])
return tpl, 401
except TemplateNotFound:
return 'login required. please login at %s' % url_for('viauth.login', _external=True), 401
def generate_apparch(self):
bp = self.generate_blueprint()
lman = self.generate_lman()
return AppArchExt(bp, lman)
def generate_lman(self):
lman = LoginManager()
@lman.unauthorized_handler
def unauth():
return self.__unauth()
@lman.user_loader
def loader(uid):
u = self._find_byid(uid)
u.is_authenticated = True
return u
return lman
def generate_blueprint(self):
bp = self._init_bp()
@bp.route('/login', methods=['GET','POST'])
def login():
rscode = 200
if request.method == 'POST':
username = request.form.get('username')
password = request.form.get('password')
if not username or not password:
abort(400)
if username in self.__userdict and\
self.__userdict[username].check_password(password):
login_user(self.__userdict[username])
self.ok('login', 'login success')
return self._reroute('login')
self.err('login', 'invalid credentials')
rscode = 401
return render_template(self._templ['login']), rscode
@bp.route('/profile')
@login_required
def profile():
return render_template(self._templ['profile'])
@bp.route('/logout')
def logout():
logout_user()
self.ok('logout', 'logout success')
return self._reroute('logout')
return bp
|
StarcoderdataPython
|
3543420
|
<filename>evalml/data_checks/data_checks.py
import inspect
from evalml.data_checks import DataCheck
from evalml.exceptions import DataCheckInitError
from evalml.utils import infer_feature_types
def _has_defaults_for_all_args(init):
"""Tests whether the init method has defaults for all arguments."""
signature = inspect.getfullargspec(init)
n_default_args = 0 if not signature.defaults else len(signature.defaults)
n_args = len(signature.args) - 1 if 'self' in signature.args else len(signature.args)
return n_args == n_default_args
class DataChecks:
"""A collection of data checks."""
@staticmethod
def _validate_data_checks(data_check_classes, params):
"""Inits a DataChecks instance from a list of DataCheck classes and corresponding params."""
if not isinstance(data_check_classes, list):
raise ValueError(f"Parameter data_checks must be a list. Received {type(data_check_classes).__name__}.")
if not all(inspect.isclass(check) and issubclass(check, DataCheck) for check in data_check_classes):
raise ValueError("All elements of parameter data_checks must be an instance of DataCheck "
"or a DataCheck class with any desired parameters specified in the "
"data_check_params dictionary.")
params = params or dict()
if not isinstance(params, dict):
raise ValueError(f"Params must be a dictionary. Received {params}")
in_params = set(params.keys())
in_classes = set([c.name for c in data_check_classes])
name_to_class = {c.name: c for c in data_check_classes}
extraneous = in_params.difference(in_classes)
missing = in_classes.difference(in_params)
for extraneous_class in extraneous:
raise DataCheckInitError(
f"Class {extraneous_class} was provided in params dictionary but it does not match any name "
"in the data_check_classes list. Make sure every key of the params dictionary matches the name"
"attribute of a corresponding DataCheck class.")
for missing_class_name in missing:
if not _has_defaults_for_all_args(name_to_class[missing_class_name]):
raise DataCheckInitError(
f"Class {missing_class_name} was provided in the data_checks_classes list but it does not have "
"an entry in the parameters dictionary.")
@staticmethod
def _init_data_checks(data_check_classes, params):
data_check_instances = []
for data_check_class in data_check_classes:
class_params = params.get(data_check_class.name, {})
if not isinstance(class_params, dict):
raise DataCheckInitError(
f"Parameters for {data_check_class.name} were not in a dictionary. Received {class_params}.")
try:
data_check_instances.append(data_check_class(**class_params))
except TypeError as e:
raise DataCheckInitError(
f"Encountered the following error while initializing {data_check_class.name}: {e}")
return data_check_instances
def __init__(self, data_checks=None, data_check_params=None):
"""
A collection of data checks.
Arguments:
data_checks (list (DataCheck)): List of DataCheck objects
"""
data_check_params = data_check_params or dict()
self._validate_data_checks(data_checks, data_check_params)
data_check_instances = self._init_data_checks(data_checks, data_check_params)
self.data_checks = data_check_instances
def validate(self, X, y=None):
"""
Inspects and validates the input data against data checks and returns a list of warnings and errors if applicable.
Arguments:
X (ww.DataTable, pd.DataFrame, np.ndarray): The input data of shape [n_samples, n_features]
y (ww.DataColumn, pd.Series, np.ndarray): The target data of length [n_samples]
Returns:
dict: Dictionary containing DataCheckMessage objects
"""
messages = {
"warnings": [],
"errors": []
}
X = infer_feature_types(X)
if y is not None:
y = infer_feature_types(y)
for data_check in self.data_checks:
messages_new = data_check.validate(X, y)
messages["warnings"].extend(messages_new["warnings"])
messages["errors"].extend(messages_new["errors"])
return messages
class AutoMLDataChecks(DataChecks):
def __init__(self, data_check_instances):
if not all(isinstance(check, DataCheck) for check in data_check_instances):
raise ValueError("All elements of parameter data_checks must be an instance of DataCheck.")
self.data_checks = data_check_instances
|
StarcoderdataPython
|
11229192
|
<filename>scripts/oov-clustering/compare-references.py
#!/usr/bin/env python
import argparse
import sys
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('ref1')
parser.add_argument('ref2')
args = parser.parse_args()
nb_00 = 0
nb_01 = 0
nb_10 = 0
nb_11 = 0
with open(args.ref1) as f1, open(args.ref2) as f2:
for l1, l2 in zip(f1, f2):
fields_1 = [int(float(r)) for r in l1.split()]
fields_2 = [int(float(r)) for r in l2.split()]
for r1, r2 in zip(fields_1, fields_2):
if r1 == 0 and r2 == 0:
nb_00 += 1
elif r1 == 0 and r2 == 1:
nb_01 += 1
elif r1 == 1 and r2 == 0:
nb_10 += 1
elif r1 == 1 and r2 == 1:
nb_11 += 1
nb_total = nb_00 + nb_01 + nb_10 + nb_11
print("{:.2f} {:.2f}".format(100.0*nb_00/nb_total, 100.0*nb_01/nb_total))
print("{:.2f} {:.2f}".format(100.0*nb_10/nb_total, 100.0*nb_11/nb_total))
|
StarcoderdataPython
|
4906041
|
<filename>datasets/hollywood2.py
import json
import csv
from pathlib import Path
import torch
import torch.utils.data as data
import numpy as np
import math
from .loader import VideoLoader
def get_class_labels(data, data_name, root_path):
class_labels_map = {}
f = open(root_path / 'list.txt', 'r')
i = 0
while True:
tmp = f.readline()
if not tmp: break
class_labels_map[tmp[:-1]] = i
i += 1
return class_labels_map
# root_path == opt.video_path
def get_database(data, subset, root_path, annotation_path, video_path_formatter, data_name):
video_ids = []
video_paths = []
annotations = []
segments = []
#
with open(str(annotation_path) + '/holly_'+ subset +'Set.csv', newline='') as csvfile:
train_reader = csv.reader(csvfile, delimiter=',')
for row in train_reader:
# key
video_ids.append(row[0]) # only file name
video_paths.append(root_path / 'cut' / row[0])
annotations.append(row[1])
if subset == 'inference':
segments.append([int(row[2]), int(row[3])])
else:
segments.append(int(row[2]))
return video_ids, video_paths, annotations, segments
class Hollywood2(data.Dataset):
def __init__(self,
root_path,
annotation_path,
subset,
data_name=None,
spatial_transform=None,
temporal_transform=None,
target_transform=None,
video_loader=None,
video_path_formatter=(lambda root_path, label, video_id:
root_path / label / video_id),
image_name_formatter=lambda x: f'image_{x:05d}.jpg',
target_type = 'label'):
self.subset = subset
self.target_type = target_type
self.data_name = data_name
self.data, self.class_names = self.__make_dataset(
root_path, annotation_path, subset, video_path_formatter)
self.spatial_transform = spatial_transform
self.temporal_transform = temporal_transform
self.target_transform = target_transform
if video_loader is None:
self.loader = VideoLoader(image_name_formatter)
else:
self.loader = video_loader
def __make_dataset(self, root_path, annotation_path, subset,
video_path_formatter):
# if 'mit', 'data' doesnt exist
data = None
video_ids, video_paths, annotations, segments = get_database(
data, subset, root_path, annotation_path, video_path_formatter, self.data_name)
# redefine 'get_class_labels' for mit
class_to_idx = get_class_labels(data, self.data_name, root_path)
idx_to_class = {}
for name, label in class_to_idx.items():
idx_to_class[label] = name
n_videos = len(video_ids)
dataset = []
for i in range(n_videos):
if i % (n_videos // 5) == 0:
print('dataset loading [{}/{}]'.format(i, len(video_ids)))
label = annotations[i]
# inference
if subset is 'inference':
label_list = label.split('|')
label_id = [class_to_idx[i] for i in label_list]
segment = segments[i]
tt = np.linspace(segment[0], segment[1]-1, 30) #
frame_indices = [math.floor(i) for i in tt]
# train/validation
else:
label_id = class_to_idx[label]
segment = [0, segments[i]-1]
if segment[1] == 1:
continue
frame_indices = list(range(0, segments[i]))
video_path = video_paths[i]
if not video_path.exists():
continue
sample = {
'video': video_path,
'segment': segment,
'frame_indices': frame_indices,
'video_id': video_ids[i],
'label': label_id
}
dataset.append(sample)
return dataset, idx_to_class
def __loading(self, path, frame_indices):
clip = self.loader(path, frame_indices)
if self.spatial_transform is not None:
self.spatial_transform.randomize_parameters()
clip = [self.spatial_transform(img) for img in clip]
clip = torch.stack(clip, 0).permute(1, 0, 2, 3)
return clip
def __getitem__(self, index):
path = self.data[index]['video']
# change target when inference
target = self.data[index]['label']
frame_indices = self.data[index]['frame_indices']
if self.temporal_transform is not None:
frame_indices = self.temporal_transform(frame_indices)
clip = self.__loading(path, frame_indices)
if self.target_transform is not None:
target = self.target_transform(target)
return clip, target
def __len__(self):
return len(self.data)
#------------------------- For validation and Inference
import json
import copy
import functools
import torch
from torch.utils.data.dataloader import default_collate
# for inference
def collate_fn(batch):
batch_clips, batch_targets = zip(*batch)
#batch_clips = [clip for multi_clips in batch_clips for clip in multi_clips]
#batch_targets = [
# target for multi_targets in batch_targets for target in multi_targets
#]
#print(len(batch_clips), batch_clips[0].shape)
target_element = batch_targets[0]
if isinstance(target_element, int) or isinstance(target_element, str):
return default_collate(batch_clips), default_collate(batch_targets)
else:
return default_collate(batch_clips), batch_targets
def collate_fn_val(batch):
batch_clips, batch_targets = zip(*batch)
batch_clips = [clip for multi_clips in batch_clips for clip in multi_clips]
batch_targets = [
target for multi_targets in batch_targets for target in multi_targets
]
target_element = batch_targets[0]
if isinstance(target_element, int) or isinstance(target_element, str):
return default_collate(batch_clips), default_collate(batch_targets)
else:
return default_collate(batch_clips), batch_targets
class Hollywood2MultiClips(Hollywood2):
def __loading__one(self, path, frame_indices):
clip = self.loader(path, frame_indices)
if self.spatial_transform is not None:
self.spatial_transform.randomize_parameters()
clip = [self.spatial_transform(img) for img in clip]
clip = torch.stack(clip, 0).permute(1, 0, 2, 3)
return clip
def __loading(self, path, video_frame_indices):
clips = []
segments = []
for clip_frame_indices in video_frame_indices:
clip = self.loader(path, clip_frame_indices)
if self.spatial_transform is not None:
self.spatial_transform.randomize_parameters()
clip = [self.spatial_transform(img) for img in clip]
clips.append(torch.stack(clip, 0).permute(1, 0, 2, 3))
segments.append(
[min(clip_frame_indices),
max(clip_frame_indices) + 1])
return clips, segments
def __getitem__(self, index):
path = self.data[index]['video']
video_frame_indices = self.data[index]['frame_indices']
if self.subset=='inference':
if self.temporal_transform is not None:
video_frame_indices = self.temporal_transform(video_frame_indices)
clips = self.__loading__one(path, video_frame_indices)
else:
if self.temporal_transform is not None:
video_frame_indices = self.temporal_transform(video_frame_indices)
clips, segments = self.__loading(path, video_frame_indices)
if isinstance(self.target_type, list):
targets = [self.data[index][t] for t in self.target_type]
else:
targets = self.data[index][self.target_type]
if 'segment' in self.target_type:
if isinstance(self.target_type, list):
segment_index = self.target_type.index('segment')
targets = []
for s in segments:
targets.append(copy.deepcopy(target))
targets[-1][segment_index] = s
else:
targets = segments
# if inference, don't need eo expand to segments size
elif self.subset == 'inference':
pass
else:
targets = [targets for _ in range(len(segments))]
return clips, targets
|
StarcoderdataPython
|
1774495
|
# -*- coding: utf-8 -*-
"""The app module, containing the app factory function."""
from flask import Flask, jsonify
from werkzeug.exceptions import HTTPException
from werkzeug.exceptions import default_exceptions
from api import commands, user
from api.extensions import bcrypt, db, migrate
from api.settings import ProdConfig
from api.user import jwt_extension
# Views
from api.dummy.view import DummyView
from api.user.views import UsersView
def create_app(config_object=ProdConfig):
"""An application factory, as explained here: http://flask.pocoo.org/docs/patterns/appfactories/.
:param config_object: The configuration object to use.
"""
app = Flask(__name__.split('.')[0])
app.config.from_object(config_object)
register_extensions(app)
register_views(app)
register_errorhandlers(app)
register_shellcontext(app)
register_commands(app)
return app
def register_extensions(app):
"""Register Flask extensions."""
bcrypt.init_app(app)
db.init_app(app)
jwt_extension.init_app(app)
migrate.init_app(app, db)
return None
def register_views(app):
"""Register Flask blueprints."""
DummyView.register(app)
UsersView.register(app)
return None
# TODO: better errorhandler logic !
def register_errorhandlers(app):
"""Register error handlers."""
def render_error(error):
response = jsonify(message=str(error))
if isinstance(error, HTTPException):
response.status_code = error.code
else:
response.status_code = 500
return response
for code in default_exceptions.keys():
app.errorhandler(code)(render_error)
app.errorhandler(Exception)(render_error)
return None
def register_shellcontext(app):
"""Register shell context objects."""
def shell_context():
"""Shell context objects."""
return {
'db': db,
'User': user.models.User}
app.shell_context_processor(shell_context)
def register_commands(app):
"""Register Click commands."""
app.cli.add_command(commands.test)
app.cli.add_command(commands.lint)
app.cli.add_command(commands.clean)
app.cli.add_command(commands.urls)
|
StarcoderdataPython
|
6444196
|
<reponame>cjwatson/flask-storm<filename>tests/test_utils_colored.py
import pytest
import sys
from flask_storm.utils import colored, has_color_support
from mock import Mock, patch
fg42 = "\x1b[38;5;42m"
bg69 = "\x1b[48;5;69m"
bold = "\x1b[1m"
underline = "\x1b[4m"
reset = "\x1b[0m"
def j(*args):
return "".join(args)
def test_reset():
assert len(colored("")) == 0
assert colored("", bold=True).endswith("\x1b[0m")
def test_foreground_color():
assert colored("foo", color=42) == j(fg42, "foo", reset)
def test_background_color():
assert colored("foo", background=69) == j(bg69, "foo", reset)
def test_bold():
assert colored("foo", bold=True) == j(bold, "foo", reset)
def test_underline():
assert colored("foo", underline=True) == j(underline, "foo", reset)
def test_prefix_order():
ansi = colored("foo", color=42, background=69, bold=True, underline=True)
real = j(fg42, bg69, bold, underline, "foo", reset)
assert ansi == real
def test_has_color_support():
tty_file = Mock()
tty_file.isatty.return_value = True
file = Mock()
file.isatty.return_value = False
with patch("sys.platform", "Pocket PC"):
assert not has_color_support(tty_file)
# This will check against STDOUT, but will always fail because of the
# Pocket PC platform
assert not has_color_support()
with patch("sys.platform", "darwin"):
assert has_color_support(tty_file)
assert not has_color_support(file)
with patch("sys.platform", "linux2"):
assert has_color_support(tty_file)
assert not has_color_support(file)
with patch("sys.platform", "win32"):
assert not has_color_support(tty_file)
with patch.dict("os.environ", {"ANSICON": ""}):
assert has_color_support(tty_file)
|
StarcoderdataPython
|
1794283
|
<filename>functions_legacy/RawMigrationDb2AggrRiskDrivers.py
import numpy as np
from numpy import array, unique, zeros, sort, where, argsort, r_, ones
from numpy import sum as npsum
from datetime import datetime
def RawMigrationDb2AggrRiskDrivers(db,t_start,t_end):
# This function processes the raw database of credit migrations to extract
# the aggregate risk drivers.
# INPUTS
# db :[struct] raw database
# t_start :[string] time window's starting date
# t_end :[string] time window's ending date
# OPS
# dates :[vector] vector of dates corresponding to migrations
# N :[cell] N{t}[i] is the number of obligors with rating i at time dates[t]
# n :[cell] n{t}(i,j) is the cumulative number of transitions between ratings i and j up to time dates[t]
# m :[cell] m{t}(i,j) is the number of transitions occured at time dates[t] between ratings i and j
# n_tot :[vector] n[t] is the total number of transitions up to time dates[t]
# fin_rat :[cell] contains the issuers (first row) with their corresponding final ratings (second row)
## Code
ratings_str = db.ratings
rr_ = len(ratings_str)
db.data[1] = list(map(lambda x: datetime.strptime(x, '%d-%b-%Y'), db.data[1]))
idx_dates = (db.data[1] >= t_start) & (db.data[1] <= t_end)
data_raw = db.data[:,idx_dates]# dataset inside time-window
## Transform "cell" dataset into "double"
# issuers
issuers_raw = data_raw[0]
issuers_d = array(list(map(float,issuers_raw)))
issuers = unique(issuers_d)
s_ = len(issuers)
# dates
dates_raw = data_raw[1]
dates_d = dates_raw
dates = unique(dates_d)
t_ = len(dates)
# ratings
ratings_raw = data_raw[2,:]
ratings_d = zeros((1,len(ratings_raw)),dtype=int)
for r in range(rr_):
idx = ratings_str[r]==ratings_raw
ratings_d[0,idx] = r+1
data_d = r_[issuers_d[np.newaxis,...], dates_d[np.newaxis,...], ratings_d]# dataset in "double" format
## Process data
matrix = np.NaN*ones((s_,t_),dtype=int)
for s in range(s_):
idx_issuer = data_d[0]==issuers[s]
data_temp = data_d[:,idx_issuer]
dates_temp = data_temp[1]
dates_temp,idx_t = sort(dates_temp), argsort(dates_temp)
data_temp = data_temp[:,idx_t]
if len(dates_temp)==1:
idx_1 = where(dates==dates_temp)[0][0]
matrix[s,idx_1:] = data_temp[2]
else:
idx_2 = where(dates==dates_temp[-1])[0][0]
matrix[s,idx_2:] = data_temp[2,-1]
for t in range(1,len(dates_temp)):
idx_1 = where(dates==dates_temp[-t-1])[0][0]
matrix[s,idx_1:idx_2] = data_temp[2,-t-1]
idx_2 = idx_1
## Compute aggregate risk drivers
m = zeros((t_,rr_,rr_))
n = zeros((t_,rr_,rr_))
n_tot = zeros((1,t_))
N = zeros((t_,rr_))
for t in range(t_):
for i in range(rr_):
N[t,i] = npsum(matrix[:,t]==i+1)
if t>0:
for j in range(rr_):
if i!=j:
# number of transitions realized at time t between ratings i and j
m[t,i,j] = npsum((matrix[:,t-1]==i+1)*(matrix[:,t]==j+1))
if t>0:
# number of transitions, between ratings i and j, realized up to time t
n[t] = n[t-1]+m[t]
# total number of transitions up to time t
n_tot[0,t] = npsum(n[t])
## Final ratings
issuers_raw_ = unique(issuers_raw)
fin_rat = {1:zeros(s_),2:{}}
for s in range(s_):
fin_rat[1][s] = issuers_raw_[s]
fin_rat[2][s] = ratings_str[int(matrix[s,-1])-1]
return dates, N, n, m, n_tot, fin_rat
|
StarcoderdataPython
|
5023598
|
<reponame>01coders/50-Days-Of-Code<filename>python_AbhiMHolla/day 02.py<gh_stars>0
# Arithmetic operations
#basic Arithmetic operators part 1
x = 5
y = 3
z=15
print(x + y)
print(z-y)
print(x*y)
print(z/y)
print(z%x)
#comparison operators
a = 15
b = 23
print(a == b) # returns False because 15 is not equal to 23
print(a != b) # returns True because 15 is not equal to 23
print(a>b) # returns False because 15 is not greater than 23
print(a<b) # returns True because 15 is less than to 23
print(a>=b) # returns False because 15 is not greater than 23
print(a<=b) # returns True because 15 is less than to 23
|
StarcoderdataPython
|
6660099
|
<reponame>spbrogan/edk2-pytool-extensions<filename>edk2toolext/capsule/pyopenssl_signer.py
# @file pyopenssl_signer.py
# This module contains the abstracted signing interface for pyopenssl. This interface
# abstraction takes in the signature_options and signer_options dictionaries that are
# used by capsule_tool and capsule_helper.
#
##
# Copyright (C) Microsoft Corporation
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
import logging
from OpenSSL import crypto
def sign(data, signature_options, signer_options):
'''
primary signing interface. Takes n the signature_options and signer_options
dictionaries that are used by capsule_tool and capsule_helper
'''
# NOTE: Currently, we only support the necessary algorithms for capsules.
if signature_options['sign_alg'] != 'pkcs12':
raise ValueError(f"Unsupported signature algorithm: {signature_options['sign_alg']}")
if signature_options['hash_alg'] != 'sha256':
raise ValueError(f"Unsupported hashing algorithm: {signature_options['hash_alg']}")
logging.debug("Executing PKCS1 Signing")
# If a key file is provided, use it for signing.
if 'key_file' in signer_options:
with open(signer_options['key_file'], 'rb') as key_file:
signer_options['key_data'] = key_file.read()
# TODO: Figure out OIDs.
# TODO: Figure out EKU.
pkcs12 = crypto.load_pkcs12(signer_options['key_data'])
return crypto.sign(pkcs12.get_privatekey(), data, signature_options['hash_alg'])
|
StarcoderdataPython
|
12805476
|
<reponame>apgupta3091/CSE389Project<filename>Pr0j3ct/server.py
# server.py
# implements HTTP Server class
from Pr0j3ct.requests import RequestProcessor
from Pr0j3ct.logging import Logger
from Pr0j3ct.scheduler import Scheduler
import os
import ssl
import socket
class Server:
def __init__(self, rootDirectory, port, indexFile="index.html", enableSSL=False):
# check arguments
if not os.path.exists(rootDirectory):
raise ValueError("rootDirectory: {} not found".format(rootDirectory))
self.rootDirectory = os.path.abspath(rootDirectory)
if port > 65535 or port < 0:
raise ValueError("port: {} should be in [0,65535]".format(port))
self.port = port
if not os.path.isfile(os.path.join(self.rootDirectory, indexFile)):
raise ValueError("indexFile: {} is not found under {}".format(indexFile, self.rootDirectory))
self.indexFile = indexFile
# initialize variables
self.scheduler = Scheduler()
self.logger = Logger(self.__class__.__name__)
# log information
self.logger.info("Server port: {}".format(self.port))
self.logger.info("Server document root: {}".format(self.rootDirectory))
# try to load SSL certificate
self.SSL_cert_file = os.path.join("certificates", "signed.crt")
self.SSL_key_file = os.path.join("certificates", "signed.private.key")
if os.path.isfile(self.SSL_cert_file) and os.path.isfile(self.SSL_key_file) and enableSSL:
self.SSL_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.SSL_context.load_cert_chain(self.SSL_cert_file, self.SSL_key_file)
self.SSL_enabled = True
self.logger.info("Server SSL enabled")
else:
self.SSL_context = None
self.SSL_cert_file = ""
self.SSL_key_file = ""
self. SSL_enabled = False
def start(self):
# create server socket
try:
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.bind(("", self.port))
serversocket.listen(5)
serversocket.settimeout(1)
except socket.error as e:
self.logger.error("{}".format(e))
return
self.logger.info("Server started")
# start listening
try:
while True:
try:
clientsocket, clientaddress = serversocket.accept()
clientaddress = "{}:{}".format(clientaddress[0], clientaddress[1])
self.logger.info("Client connected: {}".format(clientaddress))
if self.SSL_enabled:
clientsocket = self.SSL_context.wrap_socket(clientsocket, server_side=True)
processor = RequestProcessor(self.rootDirectory, self.indexFile, clientsocket, clientaddress)
self.scheduler.add(processor)
except socket.timeout: pass
except ssl.SSLError as e:
# ignore HTTP request error in HTTPS mode
self.logger.error(e)
except KeyboardInterrupt:
# on keyboard interrupt, close server and all running sub-threads
self.logger.info("Server stopped")
self.scheduler.shutdown()
self.logger.close()
serversocket.close()
|
StarcoderdataPython
|
4936184
|
from django.apps import AppConfig
class SavConfig(AppConfig):
name = 'SAV'
|
StarcoderdataPython
|
370100
|
import sys, os, timeit
moddir = os.path.join( os.path.dirname( __file__ ), '..' )
sys.path = [moddir] + sys.path
import pytest
from utils import Approx
from dynconfig.read import *
from dynconfig.parsers import *
def test_simple():
data = '''
var1 = $(1|int)
var2 = some string
var3 = $(3|int)
var4 = $({var3} + m.pi + 2)
var5 = $({var4} + 2.0)
nest1/var1 = $(11|int)
nest1/var2 = $({var3} + 12)
nest1/var3 = $({var1} + 12)
nest1/var4 = $({var3} + 12)
nest1/var5 = $({../nest1/var3} + 12)
nest1/list1/0 = $(01|int)
nest1/list1/1 = $({0})
nest1/list1/2 = $(03|int)
nest1/nest2/var1 = $(111)
nest1/nest2/var2 = $(112)
nest1/nest2/var3 = $({var1})
nest1/nest2/var4 = $({/var1})
nest1/nest2/var5 = $({/nest1/var1})
'''
data = readConfig( data , parser=lambda x : keyval.load(x,separator='/') )
assert data['var1'] == 1
assert data['var2'] == 'some string'
assert data['var3'] == 3
assert data['var4'] == Approx(3 + 3.14159 + 2 )
assert data['var5'] == Approx(3 + 3.14159 + 2 + 2.0 )
assert data['nest1']['var1'] == 11
assert data['nest1']['var2'] == 11 + 12 + 12
assert data['nest1']['var3'] == 11 + 12
assert data['nest1']['var4'] == 11 + 12 + 12
assert data['nest1']['var5'] == 11 + 12 + 12
assert data['nest1']['list1']['0'] == 1
assert data['nest1']['list1']['1'] == 1
assert data['nest1']['list1']['2'] == 3
assert data['nest1']['nest2']['var1'] == 111
assert data['nest1']['nest2']['var2'] == 112
assert data['nest1']['nest2']['var3'] == 111
assert data['nest1']['nest2']['var4'] == 1
assert data['nest1']['nest2']['var5'] == 11
|
StarcoderdataPython
|
1603042
|
import datetime
import io
import logging
import os
import tempfile
import uuid
import zipfile
from typing import *
import pandas as pd
import requests
from Modules.ExposureNotification.SourceRegionsProviders.base import BaseSourceRegionsProvider
from Modules.ExposureNotification import exposure_notification_exceptions
from Modules.ExposureNotification.ProtocolBuffers.ExposureNotification import TemporaryExposureKeyExport_pb2
maximum_key_rolling_period = 24 * 60 # 24h in 10 min intervals
maximum_key_rolling_period_in_seconds = 24 * 60 * 60
_protobuf_export_file_name = "export.bin"
class BaseBackendClient:
def __init__(
self, *, backend_identifier: str, server_endpoint_url: str = None,
source_regions_provider=None):
self.backend_identifier = backend_identifier
self.server_endpoint_url = server_endpoint_url
self._source_regions_provider: BaseSourceRegionsProvider = source_regions_provider
# Exposure Keys
def generate_exposure_keys_export_endpoints_with_parameters(self, **kwargs) -> List[dict]:
raise NotImplemented()
def download_exposure_keys_with_parameters(
self, **kwargs) -> pd.DataFrame:
endpoints_with_parameters = \
self.generate_exposure_keys_export_endpoints_with_parameters(**kwargs)
exposure_keys_df = pd.DataFrame()
for endpoint_with_parameters in endpoints_with_parameters:
try:
endpoint = endpoint_with_parameters.pop("endpoint")
endpoint_exposure_keys_df = self._download_exposure_keys_from_endpoint_with_parameters(
endpoint=endpoint, parameters=endpoint_with_parameters, **kwargs)
exposure_keys_df = exposure_keys_df.append(endpoint_exposure_keys_df)
except exposure_notification_exceptions.NoKeysFoundException as e:
logging.warning(repr(e))
if not exposure_keys_df.empty:
valid_generation_dates = self.get_generation_dates_from_parameters(
as_string=True, **kwargs)
exposure_keys_df = exposure_keys_df[
exposure_keys_df.generation_date_string.isin(valid_generation_dates)]
return exposure_keys_df
def _download_exposure_keys_from_endpoint_with_parameters(
self, endpoint, parameters=None, save_raw_zip_path_prefix=None, **_kwargs) -> pd.DataFrame:
if parameters is None:
parameters = dict()
parameters = parameters.copy()
parameters.update(dict(
backend_identifier=self.backend_identifier,
server_endpoint_url=self.server_endpoint_url))
logging.info(f"Downloading TEKs from '{endpoint}' (parameters: {parameters})...")
no_keys_found_exception = \
exposure_notification_exceptions.NoKeysFoundException(
f"No exposure keys found on endpoint '{endpoint}' (parameters: {parameters}).")
request_response = self.send_get_request(url=endpoint)
try:
request_response.raise_for_status()
except requests.exceptions.HTTPError as e:
if request_response.status_code == 404:
raise no_keys_found_exception
else:
raise e
file_bytes = request_response.content
if save_raw_zip_path_prefix:
endpoint_identifier_components = \
[self.backend_identifier] + parameters["endpoint_identifier_components"]
endpoint_identifier_components = "-".join(map(str, endpoint_identifier_components))
raw_zip_path = self.backend_identifier + f"/Current/TEKs-{endpoint_identifier_components}.zip"
raw_zip_path = save_raw_zip_path_prefix + raw_zip_path
os.makedirs(os.path.dirname(raw_zip_path), exist_ok=True)
with open(raw_zip_path, "wb") as f:
f.write(file_bytes)
if len(file_bytes) == 0:
raise no_keys_found_exception
return self._parse_exposure_keys_export_file(file_bytes=file_bytes)
def _parse_exposure_keys_export_file(self, *, file_bytes: bytes = None) -> pd.DataFrame:
exposure_keys = []
g = self.load_object_from_signed_and_compressed_protobuf(
file_bytes=file_bytes,
protobuf_class=TemporaryExposureKeyExport_pb2.TemporaryExposureKeyExport,
remove_prefix=16)
start_timestamp = str(g.start_timestamp)
end_timestamp = str(g.end_timestamp)
region = str(g.region).upper()
verification_key_version = str(g.signature_infos[0].verification_key_version).upper()
verification_key_id = str(g.signature_infos[0].verification_key_id).upper()
signature_algorithm = str(g.signature_infos[0].signature_algorithm).upper()
logging_details = []
logging_details.append(f"Exposure Key File")
logging_details.append(f"Timestamp Range: {start_timestamp} -> {end_timestamp}")
logging_details.append(f"Region: {region}")
logging_details.append("")
logging_details.append("Signature Details:")
logging_details.append(f"Verification Key Version: {verification_key_version}")
logging_details.append(f"Verification Key ID: {verification_key_id}")
logging_details.append(f"Signature Algorithm: {signature_algorithm}")
logging_details.append("")
logging_details.append(f"Exposure Keys ({len(g.keys)}):")
for key in g.keys:
key_rolling_start_timestamp = \
key.rolling_start_interval_number * 10 * 60
key_rolling_period_in_seconds = key.rolling_period * 10 * 60
if key_rolling_period_in_seconds > maximum_key_rolling_period_in_seconds:
raise Exception(
f"Invalid key 'key_rolling_period': "
f"{key_rolling_period_in_seconds}s "
f"(expected: <={maximum_key_rolling_period_in_seconds}s)")
generation_datetime = \
datetime.datetime.utcfromtimestamp(key_rolling_start_timestamp)
generation_date_string = generation_datetime.strftime("%Y-%m-%d")
key_uuid = uuid.UUID(bytes=key.key_data)
exposure_keys.append(dict(
generation_datetime=generation_datetime,
generation_date_string=generation_date_string,
region=str(g.region).upper(),
verification_key_version=str(g.signature_infos[0].verification_key_version).upper(),
verification_key_id=str(g.signature_infos[0].verification_key_id).upper(),
signature_algorithm=str(g.signature_infos[0].signature_algorithm).upper(),
key_data=key_uuid,
rolling_start_interval_number=key.rolling_start_interval_number,
rolling_period=key.rolling_period,
transmission_risk_level=key.transmission_risk_level,
))
logging_details.append(
f"{key_uuid} (rolling start interval number: {key_rolling_start_timestamp}, "
f"rolling period: {key_rolling_period_in_seconds}s, "
f"report type: {key.report_type})")
logging_details = "\n".join(logging_details)
logging.info(logging_details)
exposure_keys_df = pd.DataFrame.from_records(exposure_keys)
exposure_keys_df["backend_identifier"] = self.backend_identifier
return exposure_keys_df
@staticmethod
def load_object_from_signed_and_compressed_protobuf(file_bytes: bytes, protobuf_class, remove_prefix=None):
with io.BytesIO(file_bytes) as f:
with zipfile.ZipFile(f, "r") as z:
temporary_directory = tempfile.gettempdir()
temporary_directory_uuid = str(uuid.uuid4()).upper()
temporary_directory = os.path.join(temporary_directory, temporary_directory_uuid)
z.extractall(temporary_directory)
protobuf_file_path = \
os.path.join(temporary_directory, _protobuf_export_file_name)
with open(protobuf_file_path, "rb") as p:
g = protobuf_class()
if remove_prefix:
p.read(remove_prefix)
g.ParseFromString(p.read())
return g
@staticmethod
def send_get_request(url, **kwargs):
logging.info(f"Sending GET request to endpoint '{url}'…")
return requests.get(url, **kwargs)
@staticmethod
def get_generation_dates_from_parameters(
*, generation_dates: List[datetime.datetime] = None,
generation_days: int = None, as_string: bool = None, **_kwargs):
if isinstance(generation_dates, datetime.datetime):
generation_dates = [generation_dates]
else:
if generation_days is None:
generation_days = 14
now_datetime = datetime.datetime.utcnow()
generation_dates = [now_datetime - datetime.timedelta(days=i) for i in range(generation_days)]
if as_string:
generation_dates = list(map(lambda x: x.strftime("%Y-%m-%d"), generation_dates))
return generation_dates
# Source Regions
@property
def _default_backend_identifier_based_source_regions(self) -> Set[str]:
default_source_region = self.backend_identifier.split("-")[0].split("@")[0]
return {default_source_region}
def source_regions_for_date(self, date=None) -> Set[str]:
if self._source_regions_provider:
if date is None:
date = datetime.datetime.utcnow()
return self._source_regions_provider.source_regions_for_date(date=date)
else:
source_regions = self._default_backend_identifier_based_source_regions
return set(source_regions)
|
StarcoderdataPython
|
6647163
|
<gh_stars>0
import sqlite3
# open connection to new db file
CONN = sqlite3.connect('demo_data.sqlite3')
# create table
cursor = CONN.cursor()
create_table = 'CREATE TABLE demo (s varchar(30), x int, y int);'
cursor.execute(create_table)
cursor.close()
CONN.commit()
# add data to table
cursor2 = CONN.cursor()
insert_data = 'INSERT INTO demo (s, x, y) VALUES '
# data from table in markdown
data = [
('g', 3, 9),
('v', 5, 7),
('f', 8, 7)
]
for datum in data:
cursor2.execute(insert_data + str(datum))
cursor2.close()
CONN.commit()
# query data in table
cursor3 = CONN.cursor()
rows = cursor3.execute(
'SELECT COUNT(*) FROM demo;'
).fetchall()
x_y = cursor3.execute(
'SELECT COUNT(*) FROM demo WHERE x >= 5 AND y >= 5;'
).fetchall()
y_values = cursor3.execute(
'SELECT COUNT (DISTINCT y) FROM demo;'
).fetchall()
print('Number of Rows: ', rows[0])
print('Rows where X and Y are >= 5', x_y[0])
print('Unique values in column Y: ', y_values[0])
cursor3.close()
CONN.commit()
|
StarcoderdataPython
|
11220582
|
from estimators.bandits import base
from typing import Optional
class Estimator(base.Estimator):
weighted_examples_count: float
weighted_reward: float
def __init__(self):
self.weighted_examples_count = 0
self.weighted_reward = 0
def add_example(self, p_log: float, r: float, p_pred: float, count: float = 1.0) -> None:
w = p_pred / p_log
self.weighted_examples_count += w * count
self.weighted_reward += r * w * count
def get(self) -> Optional[float]:
return self.weighted_reward / self.weighted_examples_count if self.weighted_examples_count != 0 else None
def __add__(self, other: 'Estimator') -> 'Estimator':
result = Estimator()
result.weighted_examples_count = self.weighted_examples_count + other.weighted_examples_count
result.weighted_reward = self.weighted_reward + other.weighted_reward
return result
|
StarcoderdataPython
|
12818534
|
import unittest
import numpy as np
from rlcard.agents.human_agents.wizard_human_agent import _print_state, _print_action
# from rlcard.agents.human_agents.wizard_ms_trickpred_human_agent import _print_state, _print_action as _print_state02, _print_action02
# from rlcard.agents.human_agents.wizard_s_trickpred_human_agent import _print_state, _print_action as _print_state03, _print_action03
class TestLeducHuman(unittest.TestCase):
def test_print_state(self):
raw_state = {'hand': ['r-2','b-wizard',"y-3", 'b-3', 'b-6'],
'current_player': 0,
'color_to_play':"r",
'legal_actions': ['y-3'],
'played_cards': [ 'r-1', 'r-3','y-2'],
'played_cards_in_trick':['y-2'],
'num_cards_left': 4,
'actions_in_trick_left':1,
'player_without_color':np.zeros((2,4)),
'wizard_played:':True,
'trick_scores': [0,1],
}
action_record = []
_print_state(raw_state, action_record)
def test_print_action(self):
_print_action('r-8')
if __name__ == '__main__':
unittest.main(exit=False)
|
StarcoderdataPython
|
4825462
|
<reponame>aronwoost/sublime-expand-region
import unittest
from expand_to_word import *
class WordTest(unittest.TestCase):
@classmethod
def setUpClass(self):
with open ("test/snippets/word_01.txt", "r") as myfile:
self.string1 = myfile.read()
with open ("test/snippets/word_02.txt", "r") as myfile:
self.string2 = myfile.read()
with open ("test/snippets/word_03.txt", "r") as myfile:
# decode utf8 unicode
self.string3 = myfile.read().decode("utf8")
def test_word_with_whitespaces_around (self):
result = expand_to_word(" hello ", 3, 3);
self.assertEqual(result["start"], 1)
self.assertEqual(result["end"], 6)
self.assertEqual(result["string"], "hello")
def test_find_word_with_dot_before (self):
result = expand_to_word("foo.bar", 5, 5);
self.assertEqual(result["start"], 4)
self.assertEqual(result["end"], 7)
self.assertEqual(result["string"], "bar")
def test_find_word_when_string_is_only_the_word (self):
result = expand_to_word("bar", 1, 1);
self.assertEqual(result["start"], 0)
self.assertEqual(result["end"], 3)
self.assertEqual(result["string"], "bar")
def test_find_word_when_parts_of_the_word_are_already_selected (self):
result = expand_to_word("hello", 1, 4);
self.assertEqual(result["start"], 0)
self.assertEqual(result["end"], 5)
self.assertEqual(result["string"], "hello")
def test_dont_find_word1 (self):
result = expand_to_word(self.string1, 1, 10);
self.assertEqual(result, None)
def test_dont_find_word2 (self):
result = expand_to_word(" ee ee", 2, 5);
self.assertEqual(result, None)
def test_dont_find_word3_and_dont_hang (self):
result = expand_to_word("aaa", 0, 3);
self.assertEqual(result, None)
def test_dont_expand_to_linebreak (self):
result = expand_to_word(self.string2, 0, 0);
self.assertEqual(result, None)
def test_special_chars1(self):
result = expand_to_word(self.string3, 15, 15)
self.assertEqual(result["start"], 13)
self.assertEqual(result["end"], 24)
def test_special_chars2(self):
result = expand_to_word(self.string3, 57, 57)
self.assertEqual(result["start"], 57)
self.assertEqual(result["end"], 64)
def test_special_chars3(self):
result = expand_to_word(self.string3, 75, 77)
self.assertEqual(result["start"], 75)
self.assertEqual(result["end"], 85)
def test_special_chars4(self):
result = expand_to_word(self.string3, 89, 89)
self.assertEqual(result["start"], 86)
self.assertEqual(result["end"], 89)
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
1823264
|
# encoding: utf-8
from pypi_server.handlers.pypi.simple.packages import PackagesHandler
from pypi_server.handlers.pypi.simple.files import VersionsHandler
|
StarcoderdataPython
|
1654260
|
# -*- coding: utf-8 -*-
###############################################################################
#
# SearchByKeyword
# Searches movie reviews by keyword and various filter parameters.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class SearchByKeyword(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the SearchByKeyword Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(SearchByKeyword, self).__init__(temboo_session, '/Library/NYTimes/MovieReviews/SearchByKeyword')
def new_input_set(self):
return SearchByKeywordInputSet()
def _make_result_set(self, result, path):
return SearchByKeywordResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return SearchByKeywordChoreographyExecution(session, exec_id, path)
class SearchByKeywordInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the SearchByKeyword
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((optional, string) The API Key provided by NY Times.)
"""
super(SearchByKeywordInputSet, self)._set_input('APIKey', value)
def set_CriticsPick(self, value):
"""
Set the value of the CriticsPick input for this Choreo. ((optional, string) Set this parameter to Y to limt the results to NYT Critics' Picks. To get only those movies that have not been highlighted by Times critics, specify N.)
"""
super(SearchByKeywordInputSet, self)._set_input('CriticsPick', value)
def set_DVD(self, value):
"""
Set the value of the DVD input for this Choreo. ((optional, string) Set this parameter to Y to limit the results to movies that have been released on DVD. To get only those movies that have not been released on DVD, specify N.)
"""
super(SearchByKeywordInputSet, self)._set_input('DVD', value)
def set_Limit(self, value):
"""
Set the value of the Limit input for this Choreo. ((optional, integer) The number of results to return.)
"""
super(SearchByKeywordInputSet, self)._set_input('Limit', value)
def set_Offset(self, value):
"""
Set the value of the Offset input for this Choreo. ((optional, integer) A numeric value indicating the starting point of the result set. This can be used in combination with the Limit input to page through results.)
"""
super(SearchByKeywordInputSet, self)._set_input('Offset', value)
def set_OpeningDate(self, value):
"""
Set the value of the OpeningDate input for this Choreo. ((optional, date) Limits by date or range of dates. The opening-date is the date the movie's opening date in the New York region. Format YYYY-MM-DD. Separate ranges with semicolons.)
"""
super(SearchByKeywordInputSet, self)._set_input('OpeningDate', value)
def set_Order(self, value):
"""
Set the value of the Order input for this Choreo. ((optional, string) Sets the sort order of the results. Accepted values are: by-title, by-publication-date, by-opening-date, by-dvd-release-date.)
"""
super(SearchByKeywordInputSet, self)._set_input('Order', value)
def set_PublicationDate(self, value):
"""
Set the value of the PublicationDate input for this Choreo. ((optional, date) Limits by date or range of dates. The publication-date is the date the review was first publish.ed in The Times. Format YYYY-MM-DD. Separate ranges with semicolons.)
"""
super(SearchByKeywordInputSet, self)._set_input('PublicationDate', value)
def set_Query(self, value):
"""
Set the value of the Query input for this Choreo. ((conditional, string) A string of search keywords. Matches movie titles and indexed terms.)
"""
super(SearchByKeywordInputSet, self)._set_input('Query', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: json (the default) and xml.)
"""
super(SearchByKeywordInputSet, self)._set_input('ResponseFormat', value)
def set_Reviewer(self, value):
"""
Set the value of the Reviewer input for this Choreo. ((optional, string) Limits results to reviews by a specific critic. Reviewer names should be hyphenated or concatenated with dots (i.e manohla.dargis).)
"""
super(SearchByKeywordInputSet, self)._set_input('Reviewer', value)
def set_ThousandBest(self, value):
"""
Set the value of the ThousandBest input for this Choreo. ((optional, string) Set this parameter to Y to limit the results to movies on the Times list of The Best 1,000 Movies Ever Made. To get only those movies that are not on the list, specify N.)
"""
super(SearchByKeywordInputSet, self)._set_input('ThousandBest', value)
class SearchByKeywordResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the SearchByKeyword Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from the NY Times API.)
"""
return self._output.get('Response', None)
class SearchByKeywordChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return SearchByKeywordResultSet(response, path)
|
StarcoderdataPython
|
378620
|
<reponame>mpesavento/arg-mine
import os
import json
from arg_mine import PROJECT_DIR
def load_json_fixture(fixture_filename):
"""Get test fixture data from a JSON filename"""
# import pkg_resources
# json_path = pkg_resources.resource_filename("tests.fixtures", fixture_filename)
json_path = os.path.join(PROJECT_DIR, "tests", "fixtures", fixture_filename)
with open(json_path, "r") as f:
json_blob = json.load(f)
return json_blob
def _drop_keys(my_dict, ignored_keys):
for k in ignored_keys:
if k in my_dict:
del my_dict[k]
return my_dict
def save_json_request_fixture(
fixture_filename,
payload: dict,
response: dict,
status_code: int = 200,
drop_keys: list = None,
):
"""
Save a requests.post.json return as a test fixture
Parameters
----------
fixture_filename : str
saved to tests/fixtures/
payload
response
status_code
drop_keys : list of keys in payload to NOT save in the output
Returns
-------
"""
import pkg_resources
json_path = pkg_resources.resource_filename("tests.fixtures", fixture_filename)
# pop any keywords we don't want to save
if drop_keys:
payload = _drop_keys(payload, drop_keys)
test_data = {
"payload": payload,
"response": response,
"status_code": status_code,
}
with open(json_path, "w") as f:
json.dump(test_data, f, indent=2)
|
StarcoderdataPython
|
3594349
|
<filename>py_types/runtime/__init__.py
"""Runtime checks and decorators to ensure correctness of functions.
Includes schema tools and runtime type checks."""
from .schema import (
schema,
SchemaOr,
SchemaError
)
from .typecheck import typecheck
|
StarcoderdataPython
|
276253
|
import rasterio
import numpy as np
import dask.array as da
from dask.base import tokenize
from rasterio.windows import Window
def read_raster(image_path, bands=None, masked=False, block_size=1):
"""
Read all or some band_ids from raster
Arguments:
image_path {string} -- image_path to raster file
Keyword Arguments:
bands {int, iterable(int)} -- bands number or iterable of band_ids.
When passing None, it reads all band_ids (default: {None})
masked {bool} -- If `True`, returns masked array masking `nodata` values
(default: {False})
block_size {int} -- block size multiplier (default: {1})
Returns:
dask.array.Array -- a Dask array
"""
import pathlib
def read_window(raster_path, window, band_ids=None, if_masked=False):
with rasterio.open(raster_path) as src_path:
return src_path.read(
indexes=band_ids, window=window, masked=if_masked
)
def resize_window(window, blk_size):
return Window(
col_off=window.col_off * blk_size,
row_off=window.row_off * blk_size,
width=window.width * blk_size,
height=window.height * blk_size)
def block_windows(data_set, blk_size):
shape_list = data_set.block_shapes
band_id = shape_list.index(min(shape_list)) + 1
return [
(pos, resize_window(win, blk_size))
for pos, win in data_set.block_windows(bidx=band_id)
]
assert isinstance(image_path, (str, pathlib.Path))
if isinstance(image_path, str):
image_path = pathlib.Path(image_path)
with rasterio.open(image_path) as src:
if bands is None:
bands = list(range(1, (src.count + 1)))
b_shapes = np.array(src.block_shapes)
h, w = np.min(b_shapes[:, 0]), np.min(b_shapes[:, 1])
u_dtypes = set(src.dtypes)
assert isinstance(bands, (int, tuple, list))
if isinstance(bands, int):
chunks = (h * block_size, w * block_size)
else:
chunks = (len(bands), h * block_size, w * block_size)
if len(u_dtypes) > 1:
raise ValueError(
"Multiple 'dtype' found.\n** Read individual band instead! **"
)
else:
assert len(
u_dtypes
) == 1, "No 'dtype' found!\n** Possibly corrupted File **"
dtype = u_dtypes.pop()
blocks = block_windows(src, block_size)
name = 'Raster-{}'.format(
tokenize(image_path.absolute(), bands, chunks)
)
if isinstance(bands, (tuple, list)):
shape = len(bands), *src.shape
dsk = {
(name, 0, i, j): (
read_window, image_path, window, bands, masked
)
for (i, j), window in blocks
}
else:
shape = src.shape
dsk = {
(name, i, j): (read_window, image_path, window, bands, masked)
for (i, j), window in blocks
}
return da.Array(
dask=dsk,
name=name,
chunks=chunks,
dtype=dtype,
shape=shape
)
|
StarcoderdataPython
|
6587862
|
<gh_stars>1-10
from pimsviewer import run
from pimsviewer.example_plugins import AnnotatePlugin
run('path/to/file', [AnnotatePlugin])
|
StarcoderdataPython
|
9797909
|
<reponame>Darkshadow9799/Super-Resolution
import moviepy.editor as mp
audio_path = 'Results/audio.mp3'
video_path = 'Results/2.mp4'
clip = mp.VideoFileClip(video_path)
clip.audio.write_audiofile(audio_path)
print("SUCCESS")
|
StarcoderdataPython
|
4836458
|
<reponame>materialsvirtuallab/nano281<gh_stars>10-100
from sklearn.ensemble import AdaBoostClassifier
x_train, x_test, y_train, y_test = train_test_split(x, y_class, test_size=0.2)
decision_tree = AdaBoostClassifier(DecisionTreeClassifier(criterion="entropy", random_state=0, max_depth=3),
n_estimators=20)
decision_tree = decision_tree.fit(x_train, y_train)
train_accuracy = decision_tree.score(x_train, y_train)
test_accuracy = decision_tree.score(x_test, y_test)
|
StarcoderdataPython
|
3344236
|
<filename>src/road_collisions_uk/models/collision.py
import datetime
import os
import glob
import csv
from pandas import DataFrame
import pandas as pd
from road_collisions_base import logger
from road_collisions_base.models.raw_collision import RawCollision
from road_collisions_uk.utils import extract_tgz
from road_collisions_uk.models.vehicle import Vehicles
from road_collisions_uk.models.casualty import Casualties
class Collisions():
def __init__(self, *args, **kwargs):
self._data = kwargs.get('data', [])
def __getitem__(self, i):
return self._data[i]
def __iter__(self):
return (i for i in self._data)
def __len__(self):
return len(self._data)
def append(self, data):
self._data.append(data)
def extend(self, data):
self._data.extend(data)
def serialize(self):
return [
d.serialize() for d in self
]
@staticmethod
def from_dir(dirpath, region=None, year=None):
if region is None:
search_dir = f'{dirpath}/**'
else:
search_dir = f'{dirpath}/{region}/**'
for filename in glob.iglob(search_dir, recursive=True):
if os.path.splitext(filename)[-1] not in {'.tgz', '.gz'}:
continue
# TODO: Don't extract every time
extract_tgz(filename)
print('Loading accidents')
accident_data = []
with open(os.path.join(dirpath, region, 'accident.csv')) as csvfile:
data = csv.DictReader(csvfile)
done = 0
for row in data:
if year is None or int(row['accident_year']) == year:
accident_data.append(row)
done += 1
accident_df = DataFrame(accident_data)
accident_df = accident_df.set_index('accident_reference')
print('Loaded accidents')
print('Loading casualties')
casualty_data = []
with open(os.path.join(dirpath, region, 'casualty.csv')) as csvfile:
data = csv.DictReader(csvfile)
done = 0
for row in data:
if year is None or int(row['accident_year']) == year:
casualty_data.append(row)
done += 1
casualty_df = DataFrame(casualty_data)
casualty_df = casualty_df.set_index('accident_reference')
print('Loaded casualties')
print('Loading vehicles')
vehicle_data = []
with open(os.path.join(dirpath, region, 'vehicle.csv')) as csvfile:
data = csv.DictReader(csvfile)
done = 0
for row in data:
if year is None or int(row['accident_year']) == year:
vehicle_data.append(row)
done += 1
vehicle_df = DataFrame(vehicle_data)
vehicle_df = vehicle_df.set_index('accident_reference')
print('Loaded vehicles')
print('Parsing collisions')
collisions = Collisions()
for index, row in accident_df.iterrows():
accident_vehicles = vehicle_df.loc[index]
accident_casualties = casualty_df.loc[index]
if isinstance(accident_vehicles, pd.core.series.Series):
vehicles = Vehicles.parse(
accident_vehicles.to_dict()
)
else:
vehicles = Vehicles.parse(
accident_vehicles.to_dict(orient='records')
)
if isinstance(accident_casualties, pd.core.series.Series):
casualties = Casualties.parse(
accident_casualties.to_dict()
)
else:
casualties = Casualties.parse(
accident_casualties.to_dict(orient='records')
)
row_data = row.to_dict()
row_data.update({
'accident_index': index,
'vehicles': vehicles,
'casualties': casualties
})
collisions.append(
Collision(
**row_data
)
)
print('Finished parsing collisions')
return collisions
def filter(self, **kwargs):
'''
By whatever props that exist
'''
logger.debug('Filtering from %s' % (len(self)))
filtered = [
d for d in self if all(
[
getattr(d, attr) == kwargs[attr] for attr in kwargs.keys()
]
)
]
return Collisions(
data=filtered
)
@staticmethod
def load_all(year=None):
import road_collisions_uk
return Collisions.from_dir(
'/opt/road_collisions/',
region='uk',
year=year
)
class Collision(RawCollision):
__slots__ = [
'accident_index',
'accident_year',
'location_easting_osgr',
'location_northing_osgr',
'longitude',
'latitude',
'police_force',
'accident_severity',
'number_of_vehicles',
'number_of_casualties',
'date',
'time',
'local_authority_district',
'local_authority_ons_district',
'local_authority_highway',
'first_road_class',
'first_road_number',
'road_type',
'speed_limit',
'junction_detail',
'junction_control',
'second_road_class',
'second_road_number',
'pedestrian_crossing_human_control',
'pedestrian_crossing_physical_facilities',
'light_conditions',
'weather_conditions',
'road_surface_conditions',
'special_conditions_at_site',
'carriageway_hazards',
'urban_or_rural_area',
'did_police_officer_attend_scene_of_accident',
'trunk_road_flag',
'lsoa_of_accident_location',
]
# Do casualties and vehicles fo in slots?
def __init__(self, **kwargs):
self.accident_index = kwargs['accident_index']
self.accident_severity = int(kwargs['accident_severity'])
self.accident_year = int(kwargs['accident_year'])
self.carriageway_hazards = int(kwargs['carriageway_hazards'])
self.date = kwargs['date']
self.did_police_officer_attend_scene_of_accident = int(kwargs['did_police_officer_attend_scene_of_accident'])
self.first_road_class = int(kwargs['first_road_class'])
self.first_road_number = int(kwargs['first_road_number'])
self.junction_control = int(kwargs['junction_control'])
self.junction_detail = int(kwargs['junction_detail'])
self.latitude = float(kwargs['latitude']) if kwargs['latitude'] != 'NULL' else None
self.light_conditions = int(kwargs['light_conditions'])
self.local_authority_district = int(kwargs['local_authority_district'])
self.local_authority_highway = kwargs['local_authority_highway']
self.local_authority_ons_district = kwargs['local_authority_ons_district']
self.location_easting_osgr = int(kwargs['location_easting_osgr']) if kwargs['latitude'] != 'NULL' else None
self.location_northing_osgr = int(kwargs['location_northing_osgr']) if kwargs['latitude'] != 'NULL' else None
self.longitude = float(kwargs['longitude']) if kwargs['latitude'] != 'NULL' else None
self.lsoa_of_accident_location = kwargs['lsoa_of_accident_location']
self.number_of_casualties = int(kwargs['number_of_casualties'])
self.number_of_vehicles = int(kwargs['number_of_vehicles'])
self.pedestrian_crossing_human_control = int(kwargs['pedestrian_crossing_human_control'])
self.pedestrian_crossing_physical_facilities = int(kwargs['pedestrian_crossing_physical_facilities'])
self.police_force = int(kwargs['police_force'])
self.road_surface_conditions = int(kwargs['road_surface_conditions'])
self.road_type = int(kwargs['road_type'])
self.second_road_class = int(kwargs['second_road_class'])
self.second_road_number = int(kwargs['second_road_number'])
self.special_conditions_at_site = int(kwargs['special_conditions_at_site'])
self.speed_limit = int(kwargs['speed_limit'])
self.time = kwargs['time']
self.trunk_road_flag = int(kwargs['trunk_road_flag'])
self.urban_or_rural_area = int(kwargs['urban_or_rural_area'])
self.weather_conditions = int(kwargs['weather_conditions'])
self.casualties = kwargs['casualties']
self.vehicles = kwargs['vehicles']
@staticmethod
def parse(data):
if isinstance(data, Collision):
return data
if isinstance(data, dict):
if 'data' in data.keys():
return Collision(
**RawCollision.parse(
data
).data
)
else:
# from serialization
return Collision(
**data
)
@property
def id(self):
return self.data['accident_index']
@property
def geo(self):
return [self.latitude, self.longitude]
@property
def timestamp(self):
return datetime.datetime.strptime(
f'{self.date} {self.time}',
'%d/%m/%Y %I:%M'
)
def serialize(self):
return {
'accident_index': self.accident_index,
'accident_year': self.accident_year,
'location_easting_osgr': self.location_easting_osgr,
'location_northing_osgr': self.location_northing_osgr,
'longitude': self.longitude,
'latitude': self.latitude,
'police_force': self.police_force,
'accident_severity': self.accident_severity,
'number_of_vehicles': self.number_of_vehicles,
'number_of_casualties': self.number_of_casualties,
'date': self.date,
'time': self.time,
'local_authority_district': self.local_authority_district,
'local_authority_ons_district': self.local_authority_ons_district,
'local_authority_highway': self.local_authority_highway,
'first_road_class': self.first_road_class,
'first_road_number': self.first_road_number,
'road_type': self.road_type,
'speed_limit': self.speed_limit,
'junction_detail': self.junction_detail,
'junction_control': self.junction_control,
'second_road_class': self.second_road_class,
'second_road_number': self.second_road_number,
'pedestrian_crossing_human_control': self.pedestrian_crossing_human_control,
'pedestrian_crossing_physical_facilities': self.pedestrian_crossing_physical_facilities,
'light_conditions': self.light_conditions,
'weather_conditions': self.weather_conditions,
'road_surface_conditions': self.road_surface_conditions,
'special_conditions_at_site': self.special_conditions_at_site,
'carriageway_hazards': self.carriageway_hazards,
'urban_or_rural_area': self.urban_or_rural_area,
'did_police_officer_attend_scene_of_accident': self.did_police_officer_attend_scene_of_accident,
'trunk_road_flag': self.trunk_road_flag,
'lsoa_of_accident_location': self.lsoa_of_accident_location,
'casualties': self.casualties.serialize(),
'vehicles': self.vehicles.serialize(),
}
|
StarcoderdataPython
|
8088314
|
from rest_framework.pagination import PageNumberPagination
class DefaultPagePagination(PageNumberPagination):
max_page_size = 100
page_size_query_param = "page_size"
page_size = 25
|
StarcoderdataPython
|
285642
|
import json
import requests
import sys
# instantiate working variables
octopus_server_uri = 'http://your.octopus.app/'
octopus_api_key = 'API-YOURAPIKEY'
params = {'API-Key': octopus_api_key}
space_name = 'Default'
project_name = 'ProjectName'
#Set disable_proect to 'True' to disable | 'False' to enable.
disable_project = True
# Get Space
spaces = requests.get("{url}/api/spaces?partialName={space}&skip=0&take=100".format(url=octopus_server_uri, space=space_name), params)
spaces_data = json.loads(spaces.text)
for item in spaces_data['Items']:
if item['Name'] == space_name:
space = item
else:
sys.exit("The Space with name {spaceName} cannot be found.".format(spaceName=space_name))
# Get Project
projects = requests.get("{url}/api/{spaceID}/projects?partialName={projectName}&skip=0&take=100".format(url=octopus_server_uri, spaceID=space['Id'], projectName=project_name), params)
projects_data = json.loads(projects.text)
for item in projects_data['Items']:
if item['Name'] == project_name:
project = item
else:
sys.exit("Project with name {projectName} cannot be found.".format(projectName=project_name))
# Enable/Disable Project
project['IsDisabled'] = disable_project
# Save Changes
uri = "{url}/api/{spaceID}/projects/{projectID}".format(url=octopus_server_uri, spaceID=space['Id'], projectID=project['Id'])
change = requests.put(uri, headers={'X-Octopus-ApiKey': octopus_api_key}, data=json.dumps(project))
if change.status_code == 200:
print("Request Successful.")
else:
print("Error - Request Code: {code}".format(code=change.status_code))
|
StarcoderdataPython
|
4874083
|
import os
from bs4 import BeautifulSoup
import xmltodict
from collections import OrderedDict
def data_generator(dirname):
"""Transforms all summaries in directory into data objects for pipeline.
Simply iterates over the directory and calls file_to_data.
"""
for root, dirs, files in os.walk(dirname):
for name in files:
# Exclude hidden files and only cover non-update files
if not name[0] == "." and root[-1] == 'A':
filename = os.path.join(root, name)
# Get cluster id (ending with '-A') from root name
cluster_id = root[-8:-3] + '-A'
yield file_to_data(filename, cluster_id)
def file_to_data(filename, cluster_id):
"""Transforms a single summary file into a data object for pipeline."""
# open file
with open(filename, 'r') as fh:
# transform to real xml
soup = BeautifulSoup(fh.read(), features='xml')
# remove P-tags from xml (to make LA Times readable)
# cleaned_soup = str(soup)
cleaned_soup = str(soup).replace('<P>', '')
cleaned_soup = cleaned_soup.replace('</P>', '')
# parse xml and put into ordered dict, remove outside DOC-tag
article = xmltodict.parse(cleaned_soup)['DOC']
# retrieve document
doc_id = article['@id']
# retrieve all relevant text from the document
text = [article.get('HEADLINE', ''), article.get('TEXT', '')]
# put sentences in list of dicts
body = [{'type': 'p', 'content': p} for p in text]
# Create dictionary of doc_id and body
data = {'doc_id': doc_id, 'cluster_id': cluster_id, 'body': body, 'summaries': []}
return data
def merge_clusters(articles):
"""Merges articles of every cluster into one article object."""
clusters = []
cluster_ids = set([article['cluster_id'] for article in articles])
for id in cluster_ids:
body = []
for article in articles:
if article['cluster_id'] == id:
body += article['body']
data = {'cluster_id': id, 'body': body, 'summaries': []}
clusters.append(data)
return clusters
def add_summaries(articles, dirname):
"""Adds summaries from summary directory to the correct article object."""
all_summaries = {}
for root, dirs, files in os.walk(dirname, topdown=True):
for name in files:
filename = os.path.join(root, name)
# Cluster id is in the first 7 characters of the filename
cluster_id = name[:7]
with open(filename, 'r') as fh:
summary = fh.read()
# Add summary to right list (or initiate cluster if not existing yet)
if cluster_id in all_summaries:
all_summaries[cluster_id] += [summary]
else:
all_summaries[cluster_id] = [summary]
# Add list of summaries (4 per cluster) to corresponding article objects
for article in articles:
article['summaries'] = all_summaries[article['cluster_id']]
|
StarcoderdataPython
|
9745926
|
#!/usr/bin/python3
# File name: ajakipro_checkavailablespace.py
# Version: 1.0.0
# Author: <NAME>
# Email: <EMAIL>
# Date created: 5/23/2021
# Date last modified: 5/23/2021
# Checks to see amount of available space on the Ki Pro. If the percentage is >= the threshhold, the job succeeds. If it is less, it fails.
# Job succeed/error chaining is then used to send an alert if needed
import sys
import json
import requests
from datetime import datetime
import uuid
try:
stdinput = sys.stdin.readline()
data = json.loads(stdinput)
ip = data['params']['ip']
percentage = data['params']['percentage']
url = 'http://' + ip + '/config?action=get¶mid=eParamID_CurrentMediaAvailable'
r = requests.get(url)
availableSpace = int(json.loads(r.text)['value_name'])
if availableSpace >= int(percentage):
#success
print('{ "complete": 1 }')
else:
#failed
print('{ "complete": 1, "code": 999, "description": "Not enough space on the Ki Pro.", "chain_data": { "message": "Only ' + str(availableSpace) + '% remaining on KiPro: ' + ip + '"} }')
except Exception as e:
print('{ "complete": 1, "code": 999, "description": "Failed to execute." }')
print(e)
|
StarcoderdataPython
|
3292093
|
<filename>test/test_stack.py<gh_stars>0
# -*- coding: utf-8 -*-
from src.Stack import Stack
def test_empty_stack():
s = Stack()
assert s.size() == 0
assert len(s) == 0
def test_nonempty_stack():
s = Stack()
s.push(1)
s.push(2)
assert len(s) == 2
assert s.pop() == 2
assert len(s) == 1
assert s.pop() == 1
assert len(s) == 0
assert s.pop() == None
|
StarcoderdataPython
|
4929983
|
<reponame>Thanatoz-1/EmotionStimuli
__author__ = "<NAME>"
from emotion.utils import Data, Dataset
from emotion import HMM
from emotion.evaluation import Evaluation
# read Data from file, only gne, all labels
rem_all = Data(
filename="data/rectified-unified-with-offsets.json",
roles=[
"experiencer",
"target",
"cue-joy",
"cue-fear",
"cue-trust",
"cue-surprise",
"cue-anticipation",
"cue-sadness",
"cue-anger",
"cue-disgust" "cue-other",
"cause",
],
corpora=["reman"],
splits=[0.8, 0.2],
)
# convert data to Brown-Format
rem_all.conv2brown()
# set train- and test-sets
train = Dataset(data=rem_all, splt=0)
test = Dataset(data=rem_all, splt=1)
# train model on trainset, one for each label
hmm_rem_exp = HMM("experiencer")
hmm_rem_tar = HMM("target")
hmm_rem_cue_joy = HMM("cue-joy")
hmm_rem_cue_fear = HMM("cue-fear")
hmm_rem_cue_trust = HMM("cue-trust")
hmm_rem_cue_surpr = HMM("cue-surprise")
hmm_rem_cue_sadn = HMM("cue-sadness")
hmm_rem_cue_anger = HMM("cue-anger")
hmm_rem_cue_antic = HMM("cue-anticipation")
hmm_rem_cue_disg = HMM("cue-disgust")
hmm_rem_cue_other = HMM("cue-other")
hmm_rem_cse = HMM("cause")
hmm_rem_exp.train(dataset=train)
hmm_rem_tar.train(dataset=train)
hmm_rem_cue_joy.train(dataset=train)
hmm_rem_cue_fear.train(dataset=train)
hmm_rem_cue_trust.train(dataset=train)
hmm_rem_cue_surpr.train(dataset=train)
hmm_rem_cue_sadn.train(dataset=train)
hmm_rem_cue_anger.train(dataset=train)
hmm_rem_cue_antic.train(dataset=train)
hmm_rem_cue_disg.train(dataset=train)
hmm_rem_cue_other.train(dataset=train)
hmm_rem_cse.train(dataset=train)
# predict testset using previously trained model
hmm_rem_exp.predictDataset(dataset=test)
hmm_rem_tar.predictDataset(dataset=test)
hmm_rem_cue_joy.predictDataset(dataset=test)
hmm_rem_cue_fear.predictDataset(dataset=test)
hmm_rem_cue_trust.predictDataset(dataset=test)
hmm_rem_cue_surpr.predictDataset(dataset=test)
hmm_rem_cue_sadn.predictDataset(dataset=test)
hmm_rem_cue_anger.predictDataset(dataset=test)
hmm_rem_cue_antic.predictDataset(dataset=test)
hmm_rem_cue_other.predictDataset(dataset=test)
hmm_rem_cue_disg.predictDataset(dataset=test)
hmm_rem_cse.predictDataset(dataset=test)
# evaluate the predictions and return precicion, recall and f-score
eval_rem_exp = Evaluation(dataset=test, role="experiencer", threshold=0.8)
eval_rem_tar = Evaluation(dataset=test, role="target", threshold=0.8)
eval_rem_cue_joy = Evaluation(dataset=test, role="cue-joy", threshold=0.8)
eval_rem_cue_fear = Evaluation(dataset=test, role="cue-fear", threshold=0.8)
eval_rem_cue_trust = Evaluation(dataset=test, role="cue-trust", threshold=0.8)
eval_rem_cue_surpr = Evaluation(dataset=test, role="cue-surprise", threshold=0.8)
eval_rem_cue_sadn = Evaluation(dataset=test, role="cue-sadness", threshold=0.8)
eval_rem_cue_anger = Evaluation(dataset=test, role="cue-anger", threshold=0.8)
eval_rem_cue_antic = Evaluation(dataset=test, role="cue-anticipation", threshold=0.8)
eval_rem_cue_disg = Evaluation(dataset=test, role="cue-disgust", threshold=0.8)
eval_rem_cue_other = Evaluation(dataset=test, role="cue-other", threshold=0.8)
eval_rem_cse = Evaluation(dataset=test, role="cause", threshold=0.8)
# eval_rem_exp.save_doc("tests/reman/doc_reman_exp.json")
# eval_rem_tar.save_doc("tests/reman/doc_reman_tar.json")
# eval_rem_cue_joy.SaveDoc("tests/reman/doc_reman_cue_joy.json")
# eval_rem_cue_fear.SaveDoc("tests/reman/doc_reman_cue_fear.json")
# eval_rem_cue_trust.SaveDoc("tests/reman/doc_reman_cue_trust.json")
# eval_rem_cue_surpr.SaveDoc("tests/reman/doc_reman_cue_surpr.json")
# eval_rem_cue_sadn.SaveDoc("tests/reman/doc_reman_cue_sadn.json")
# eval_rem_cue_anger.SaveDoc("tests/reman/doc_reman_cue_anger.json")
# eval_rem_cue_antic.SaveDoc("tests/reman/doc_reman_cue_antic.json")
# eval_rem_cue_disg.SaveDoc("tests/reman/doc_reman_cue_disg.json")
# eval_rem_cue_other.SaveDoc("tests/reman/doc_reman_cue_other.json")
# eval_rem_cse.SaveDoc("tests/reman/doc_reman_cse.json")
eval_rem_exp.save_eval(eval_name="experiencer", filename="tests/reman/eval_reman.json")
eval_rem_tar.save_eval(eval_name="target", filename="tests/reman/eval_reman.json")
eval_rem_cue_joy.save_eval(eval_name="cue_joy", filename="tests/reman/eval_reman.json")
eval_rem_cue_fear.save_eval(
eval_name="cue_fear", filename="tests/reman/eval_reman.json"
)
eval_rem_cue_trust.save_eval(
eval_name="cue_trust", filename="tests/reman/eval_reman.json"
)
eval_rem_cue_surpr.save_eval(
eval_name="cue_surprise", filename="tests/reman/eval_reman.json"
)
eval_rem_cue_sadn.save_eval(
eval_name="cue_sadness", filename="tests/reman/eval_reman.json"
)
eval_rem_cue_anger.save_eval(
eval_name="cue_anger", filename="tests/reman/eval_reman.json"
)
eval_rem_cue_antic.save_eval(
eval_name="cue_anticipation", filename="tests/reman/eval_reman.json"
)
eval_rem_cue_disg.save_eval(
eval_name="cue_disgust", filename="tests/reman/eval_reman.json"
)
eval_rem_cue_other.save_eval(
eval_name="cue_other", filename="tests/reman/eval_reman.json"
)
eval_rem_cse.save_eval(eval_name="cause", filename="tests/reman/eval_reman.json")
|
StarcoderdataPython
|
3301472
|
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
|
StarcoderdataPython
|
4983612
|
<filename>Methods/RichouxDeepNetwork/RichouxDeepNetwork.py<gh_stars>1-10
#Based on paper Comparing two deep learning sequence-based models for protein-protein interaction prediction by Richoux, Servantie, Bores, and Teletchea
import os
import sys
#add parent and grandparent to path
currentdir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(currentdir)
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
parentdir = os.path.dirname(parentdir)
sys.path.append(parentdir)
from GenericNetworkModule import GenericNetworkModule
from GenericNetworkModel import GenericNetworkModel
import PPIPUtils
import time
import numpy as np
import torch
from NetworkRunnerCollate import NetworkRunnerCollate
from SimpleTorchDictionaryDataset import SimpleTorchDictionaryDataset
import torch
import torch.nn as nn
from joblib import dump, load
class RichouxFullNetwork(nn.Module):
def __init__(self,proteinLength=1166,encodingSize=24,linearSize=20,seed=1,):
super(RichouxFullNetwork, self).__init__()
torch.manual_seed(seed)
self.linear1 = nn.Linear(proteinLength*encodingSize,linearSize)
self.batchNorm1 = nn.BatchNorm1d(linearSize)
self.linear2 = nn.Linear(linearSize,linearSize)
self.batchNorm2 = nn.BatchNorm1d(linearSize)
self.linear3 = nn.Linear(linearSize*2,linearSize)
self.batchNorm3 = nn.BatchNorm1d(linearSize)
self.linear4 = nn.Linear(linearSize,2)
self.sigmoid = nn.Sigmoid()
self.apply(self.weight_init)
def weight_init(self,layer):
if isinstance(layer,nn.Linear) or isinstance(layer,nn.Conv1d):
torch.nn.init.xavier_uniform_(layer.weight)
if isinstance(layer,nn.LSTM):
for i in range(0,layer.num_layers):
torch.nn.init.xavier_uniform_(layer._parameters['weight_ih_l'+str(i)])
torch.nn.init.xavier_uniform_(layer._parameters['weight_hh_l'+str(i)])
def forward(self,x):
(protA, protB) = x
protLst = []
#run each protein through first few layers
for item in [protA, protB]:
#flatten proteins to single vector of data per protein in batch
item = item.view(item.shape[0],-1)
item = self.linear1(item)
item = self.batchNorm1(item)
item = self.linear2(item)
item = self.batchNorm2(item)
protLst.append(item)
protA = protLst[0]
protB = protLst[1]
x = torch.cat((protA,protB),dim=1)
x = self.linear3(x)
x = self.batchNorm3(x)
x = self.linear4(x)
#x = self.sigmoid(x)
#x = x.squeeze(1)
return x
class RichouxLSTMNetwork(nn.Module):
def __init__(self,encodingSize=24,convOutChannels=5,convKernelSize=20,hiddenSize=32,finalSize=25,seed=1):
super(RichouxLSTMNetwork, self).__init__()
torch.manual_seed(seed)
self.pooling = nn.MaxPool1d(3)
self.convLst = nn.ModuleList()
self.batchNormLst = nn.ModuleList()
for i in range(0,3):
self.convLst.append(nn.Conv1d(encodingSize if i ==0 else convOutChannels, convOutChannels, convKernelSize))
self.batchNormLst.append(nn.BatchNorm1d(convOutChannels))
self.LSTM = nn.LSTM(input_size=convOutChannels,hidden_size=hiddenSize,batch_first=True)
#self.tanh = nn.Tanh()
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU()
self.linear1 = nn.Linear(hiddenSize*2,finalSize)
self.batchNorm1 = nn.BatchNorm1d(finalSize)
self.linear2 = nn.Linear(finalSize,2)
self.apply(self.weight_init)
def weight_init(self,layer):
if isinstance(layer,nn.Linear) or isinstance(layer,nn.Conv1d):
torch.nn.init.xavier_uniform_(layer.weight)
if isinstance(layer,nn.LSTM):
for i in range(0,layer.num_layers):
torch.nn.init.xavier_uniform_(layer._parameters['weight_ih_l'+str(i)])
torch.nn.init.xavier_uniform_(layer._parameters['weight_hh_l'+str(i)])
def forward(self,x):
(protA, protB) = x
protLst = []
for item in [protA, protB]: #run each protein through LSTM/conv/pooling layers
for i in range(0,3):
#convolution on dim2, reducing/keeping channels (dim 1) at 5 (starts at 24)
item = self.convLst[i](item)
#apply relu
item = self.relu(item)
#max pool over sequence length (dim 2), dividing it by 3
item = self.pooling(item)
#batch norm over 5 channels
item = self.batchNormLst[i](item)
#flip axes to make hidden data (size 5, current in dim 2) last
item = item.permute(0,2,1)
#grab the last set of hidden values for each item in the batch
#pytorch specifically gives out this element (h_n) in addition to the standard output for each timestep. (This could also be accessed using output[0][:,-1,:] when batch is first)
#call squeeze to remove first dimension (numer_layers is 1)
item = self.LSTM(item)[1][0].squeeze(0)
#pytorch applies tanh to output by default
# item = self.tanh(item)
protLst.append(item)
protA = protLst[0]
protB = protLst[1]
x = torch.cat((protA,protB),dim=1)
x = self.linear1(x)
x = self.relu(x)
x = self.batchNorm1(x)
x = self.linear2(x)
return x
class RichouxModel(GenericNetworkModel):
def __init__(self,hyp={},encodingSize=24,modelType='LSTM',maxProteinLength=1166,fullGPU=False,deviceType=None,numEpochs=100,batchSize=2048,lr=1e-3,minLr=8e-4,schedFactor=.9,schedPatience=3,schedThresh=1e-2):
GenericNetworkModel.__init__(self,hyp=hyp,fullGPU=fullGPU,deviceType=deviceType,numEpochs=numEpochs,batchSize=batchSize,lr=lr,minLr=minLr,schedFactor=schedFactor,schedPatience=schedPatience,schedThresh=schedThresh)
self.modelType = hyp.get('modelType',modelType).upper()
if self.modelType not in ['LSTM','FULL']:
self.modelType = 'LSTM'
self.maxProteinLength = hyp.get('maxProteinLength',maxProteinLength)
self.encodingSize = encodingSize #can not override through hyperparams, passed in from model, based on provided encoding files
def genModel(self):
if self.modelType == 'LSTM':
self.net = RichouxLSTMNetwork(self.encodingSize,seed=self.seed)
else:
self.net = RichouxFullNetwork(self.maxProteinLength,self.encodingSize,seed=self.seed)
self.model = NetworkRunnerCollate(self.net,hyp=self.hyp)
#protein length should be at least 3**3 to survive 3 sets of maxpool(3) layers
class RichouxNetworkModule(GenericNetworkModule):
def __init__(self, hyperParams = {}, maxProteinLength=1166, modelType='LSTM',validationRate=0.1, encodingSize=24, seed=1):
GenericNetworkModule.__init__(self,hyperParams)
self.maxProteinLength = self.hyperParams.get('maxProteinLength',maxProteinLength)
self.modelType = modelType
self.validationRate = validationRate
self.seed = self.hyperParams.get('seed',seed)
self.encodingSize= self.hyperParams.get('encodingSize',encodingSize) #placeholder, will be overridden when loading data
def genModel(self):
self.model = RichouxModel(self.hyperParams,self.encodingSize,self.modelType,self.maxProteinLength)
def loadFeatureData(self,featureFolder):
self.dataLookup, self.dataMatrix = self.loadEncodingFileWithPadding(featureFolder+'OneHotEncoding24.encode',self.maxProteinLength)
self.encodingSize = self.dataMatrix.shape[1]
#protein length should be at least 3**3 to survive 3 sets of maxpool(3) layers
class RichouxNetworkModuleLSTM(RichouxNetworkModule):
def __init__(self, hyperParams = {}, maxProteinLength=1166, modelType='LSTM',validationRate=0.1, encodingSize=24, seed=1):
super().__init__(hyperParams,maxProteinLength,modelType,validationRate,encodingSize,seed)
class RichouxNetworkModuleFULL(RichouxNetworkModule):
def __init__(self, hyperParams = {}, maxProteinLength=1166, modelType='FULL',validationRate=0.1, encodingSize=24, seed=1):
super().__init__(hyperParams,maxProteinLength,modelType,validationRate,encodingSize,seed)
|
StarcoderdataPython
|
6403361
|
from django.apps import AppConfig
class NoteapiConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'apis.noteapi'
|
StarcoderdataPython
|
6483953
|
import os
import abc
import swamp
import threading
import numpy as np
import pandas as pd
from statistics import mean
import swamp.utils.swamplibrary
from swamp.logger import SwampLogger
from swamp.wrappers.gesamt import Gesamt
from itertools import groupby, combinations
from sklearn.metrics import silhouette_score
from sklearn.model_selection import ParameterSampler
ABC = abc.ABCMeta('ABC', (object,), {})
class Clustering(ABC):
"""Abstract class for SWAMP library clustering purposes, used to cluster similar helical pairs into ensembles
Implements data structures and methods commonly used in clustering tasks and optimal hyper-parameter search
:param int nthreads: number of threads to be used for fragment clustering (default 1)
:param int n_iter: number of iterations for the :py:func:`~swamp.clustering.clustering.grid_search` of the optimal \
clustering hyper paramaters (default 100)
:param `~swamp.logger.swamplogger.SwampLogger` logger: logger instance to record log messages
:ivar bool error: True if errors have occurred at some point on the process of clustering
:ivar dict best_params: contains the optimal hyper-parameters as found on the \
:py:func:`~swamp.clustering.clustering.grid_search`
:ivar list labels: a list with the cluster labels assigned to each member of the \
:py:attr:`~swamp.clustering.clustering.similarity_mtx`
:ivar list child_threads: a list with the :py:attr:`threading.thread.name` of each child \
:py:obj:`threading.thread` instance used on the :py:func:`~swamp.clustering.clustering.grid_search`
:ivar `pandas.DataFrame` rmsd_mtx: square dataframe with the rmsd distance across framgents in the library
:ivar `pandas.DataFrame` similarity_mtx: square dataframe with the similarity across framgents in the library
:ivar `pandas.DataFrame` nalign_mtx: square dataframe with the no. of aligned residues between framgents in the \
library
:ivar dict centroid_dict: dictionary with the centroids associated with each cluster id
:ivar dict cluster_dict: dictionary with the list of fragments ids that form each cluster
:ivar dict composition_dict: dictionary with the final composition of each ensemble
:ivar dict ensemble_dict: dictionary with the list fragments ids that form each ensemble
:ivar `threading.Semaphore` semaphore: a `threading.Semaphore` instance to control the execution of threads \
on :py:func:`~swamp.clustering.clustering.grid_search`
:ivar `~swamp.clustering.clustering.ParameterSearchResults` gridsearch_results: a \
:py:obj:`~swamp.clustering.clustering.ParameterSearchResults` instance with the results obtained on \
:py:func:`~swamp.clustering.clustering.grid_search`
"""
def __init__(self, nthreads=1, n_iter=100, logger=None):
self.error = False
if logger is None:
self.logger = SwampLogger(__name__)
self.logger.init(use_console=True, console_level='info')
else:
self.logger = logger
self.best_params = None
self.labels = None
self.child_threads = None
self.nthreads = nthreads
self.n_iter = n_iter
self.cluster_dict = None
self.centroid_dict = None
self.ensemble_dict = None
self.rmsd_mtx = None
self.nalign_mtx = None
self.similarity_mtx = None
self.semaphore = threading.Semaphore(value=self.nthreads)
self.gridsearch_results = ParameterSearchResults(self.logger)
self.composition_dict = {}
# ------------------ Abstract methods and properties ------------------
@abc.abstractmethod
def cluster(self):
""" Abstract method to run the clustering algorithm"""
pass
@abc.abstractmethod
def _clustering(self, **kwargs):
"""Abstract method to hold the clustering algorithm"""
pass
@property
@abc.abstractmethod
def _algorithm_name(self):
"""Abstract property to hold the name of the specific clustering algorithm"""
pass
@property
@abc.abstractmethod
def _hyper_params(self):
"""Abstract property to hold the hyperparameters for the specific clustering algorithm"""
pass
# ------------------ Some general properties ------------------
@property
def clustering_header(self):
"""The header to be displayed when initialising the logger"""
return """**********************************************************************
***************** SWAMP CLUSTERING *****************
**********************************************************************
"""
@property
def _param_sample(self):
"""Sample of hyperparameters to be tested in the :py:func:`~swamp.clustering.clustering.grid_search`"""
sampler = ParameterSampler(self._hyper_params, n_iter=self.n_iter, random_state=41)
return [d for d in sampler]
@property
def distance_mtx(self):
"""Square matrix that corresponds to 1 - :py:attr:`~swamp.clustering.clustering.distance_mtx`"""
return 1 - self.similarity_mtx
@property
def fragment_list(self):
"""Columns of the :py:attr:`~swamp.clustering.clustering.distance_mtx`, which correspond with the list of \
fragments in the library"""
return self.distance_mtx.columns
@property
def _reference_input(self):
"""Dictionary with the ihe input required for each clustering algorithm"""
return {"affinity": self.similarity_mtx,
"spectral": self.similarity_mtx,
"optics": self.distance_mtx,
"agglomerative": self.distance_mtx,
"dbscan": self.distance_mtx
}
# ------------------ Some protected methods ------------------
def _join_threads(self):
"""Join the threads listed at :py:attr:`~swamp.clustering.child_threads` to wait for their completion"""
mainthread = threading.current_thread()
self.logger.debug("Mainthread is %s" % mainthread.getName())
for t in threading.enumerate():
if t is not mainthread and t.getName() in self.child_threads:
self.logger.debug("joining thread %s" % t.getName())
t.join()
def _get_clst_qscore(self, frag_ids):
"""Method to get the average qscore among the models in a given cluster
:param tuple frag_ids: a list with the fragment ids of contained in the cluster
:returns: the average qscore among the models in the cluster (float)
"""
qscores = []
for frag_pair in combinations(frag_ids, 2):
frag_a_scores = self.similarity_mtx[frag_pair[0]]
qscores.append(frag_a_scores[frag_a_scores.index == frag_pair[1]].values[0])
return mean(qscores)
def _test_params(self, idx, **kwargs):
"""Method to test clustering the library of fragments with a given set of parameters
:param int idx: index of the current test within the :py:func:`~swamp.clustering.clustering.grid_search`
:param dict kwargs: arguments passed to :py:obj:`~swamp.cluster.clustering._clustering()`
"""
self.semaphore.acquire()
self.logger.debug("Grid search parameters %s" % dict(**kwargs))
clst = self._clustering(**kwargs)
clst.fit(self._reference_input[self._algorithm_name])
self.logger.debug("Model fit done %s" % dict(**kwargs))
self._gridsearch_results.register(
new_results=pd.DataFrame([[idx] + self.assess_clustering(labels=clst.labels_)],
columns=["params_idx", "n_clst", "clst_size", "clst_qscore", "silhouette_score",
"singletons"]))
self.logger.debug("Done with grid search %s" % dict(**kwargs))
self.semaphore.release()
# ------------------ Some general methods ------------------
def get_clst_id(self, frag_id):
"""Get the unique cluster identifier where a given fragment is assigned
:param str frag_id: the id of the fragment of interest
:returns: the cluster id where the fragment can be found (str)
"""
if self.cluster_dict is None:
return None
for clst_id in self.cluster_dict.keys():
if frag_id in self.cluster_dict[clst_id]:
return clst_id
def get_centroid_id(self, frag_id):
"""Get the unique cluster identifier where a given centroid is assigned
:param str frag_id: the id of the centroid of interest
:returns: the cluster id where the fragment can be found (str)
"""
if self.centroid_dict is None:
return None
for clst_id in self.centroid_dict.keys():
if frag_id == self.centroid_dict[clst_id]:
return clst_id
def register_library(self, library):
"""Register a given :py:obj:`~swamp.utils.swamplibrary.SwampLibrary` instance in order to load the fragment \
distance info
:argument `~swamp.utils.swamplibrary.SwampLibrary` library: the \
:py:obj:`~swamp.utils.swamplibrary.SwampLibrary` insntance to be registered
:raises TypeError: if `library` is not a :py:obj:`~swamp.utils.swamplibrary.SwampLibrary` insntance
"""
if not isinstance(library, swamp.utils.swamplibrary.SwampLibrary):
raise TypeError('Library to be registered must be a swamp.utils.swamplibrary.SwampLibrary !')
nalign_mtx = library.nalign_matrix.fillna(0, inplace=False)
self.nalign_mtx = nalign_mtx.astype(int)
rmsd_mtx = library.rmsd_matrix.fillna(0, inplace=False)
self.rmsd_mtx = rmsd_mtx.astype(float)
qscore_mtx = library.qscore_matrix.fillna(0, inplace=False)
self.similarity_mtx = qscore_mtx.astype(float)
def grid_search(self):
"""Method to do a grid random search for a range of clustering hyper-parameters defined at \
:py:attr:`~swamp.clustering.clustering._hyper_params`"""
self.logger.info("Starting grid hyper parameter search with %s parallel threads and %s iterations" % (
self.nthreads, self.n_iter))
self.child_threads = []
for idx, params in enumerate(self._param_sample):
t = threading.Thread(target=self._test_params, args=(idx,), kwargs=(params))
self.logger.debug("Sending thread %s" % t.getName())
self.child_threads.append(t.getName())
t.start()
self.logger.info("Waiting for workers")
self._join_threads()
self.logger.debug("Grid parameter search is done!")
def assess_clustering(self, labels):
"""Method to asses the quality of the obtained clustering
:param tuple labels: the labels assigned to each fragment
:returns: a tuple with no. clusters, average cluster size, average cluster qscore, silhouette score and the \
no. singletons
"""
# Clst size
tmp_labels = [x for x in labels if x != -1]
if len(tmp_labels) == 0:
return [np.nan] * 5
tmp_labels.sort()
clst_size = mean([len(list(group)) for key, group in groupby(tmp_labels)])
# Singletons
singletons = list(labels).count(-1)
# Clst qscore
clst_dict = self.get_cluster_dict(labels=labels, inplace=False)
qscore_list = []
for clst in clst_dict.keys():
if clst != -1:
qscore_list.append(self._get_clst_qscore(clst_dict[clst]))
clst_qscore = mean(qscore_list)
# Silhouette score
silhouette_sco = silhouette_score(self.distance_mtx, metric='precomputed', labels=labels)
return [len(set([x for x in tmp_labels])), clst_size, clst_qscore, silhouette_sco, singletons]
def get_cluster_dict(self, labels, inplace=True):
"""Method to generate a cluster dictionary containing the cluster identifiers as keys and the fragment names \
as the values
:param tuple labels: the labels assigned to each fragment in the library
:param bool inplace: if True, then it sets :py:attr:`~swamp.clustering.clustering.cluster_dict` (default True)
:returns: a dictionary containing the cluster id as keys and the frag ids as the values (if not `inplace`)
"""
self.logger.debug('Creating cluster dictionary')
clst_dict = {}
for idx, label in enumerate(labels):
if label in clst_dict.keys():
clst_dict[label].append(self.similarity_mtx.columns[idx])
else:
clst_dict[label] = [self.similarity_mtx.columns[idx]]
if inplace:
self.cluster_dict = clst_dict
else:
return clst_dict
def get_centroid_dict(self):
"""Get a dictionary with the centroids of each cluster at \
:py:attr:`~swamp.clustering.clustering.cluster dict`"""
self.logger.debug('Creating centroid dictionary')
if self.cluster_dict is None:
return None
self.centroid_dict = {}
for clst_id in self.cluster_dict.keys():
if clst_id != -1 and len(self.cluster_dict[clst_id]) > 1:
if len(self.cluster_dict[clst_id]) == 2:
self.centroid_dict[clst_id] = self.cluster_dict[clst_id][0]
else:
current_centroid = (0, None)
# Get the average qscore respect the other fragments in the cluster (except the current candidate)
for fragment in self.cluster_dict[clst_id]:
tmp_mtx = self.similarity_mtx[fragment].drop(fragment, 0)
tmp_mtx = tmp_mtx.drop([x for x in tmp_mtx.index.values if x not in self.cluster_dict[clst_id]],
0)
avg_qscore = mean(list(tmp_mtx))
# The candidate with the lowest avg qscore is the centroid
if avg_qscore > current_centroid[0]:
current_centroid = (avg_qscore, fragment)
self.centroid_dict[clst_id] = current_centroid[1]
def get_ensemble_dict(self, rmsd_threshold=0.7, nalign_threshold=30, qscore_threshold=None, nthreads=1):
"""Merge similar fragments to create a dictionary with fragment ensembles (clustering with replacement)
An rmsd and no. aligned residues threshold can be used, but if a qscore threshold is given, this one will
be used instead
:param float rmsd_threshold: the rmsd threshold at which fragments are included into the ensemble (default 0.7)
:param int nalign_threshold: threshold of aligned residues to include a fragment into an ensemble (default 30)
:param float qscore_threshold: qscore threshold at which fragments are included into the ensemble (default None)
:param int nthreads: number of threads to compute the ensemble optimal parameter (default 1)
"""
self.logger.debug('Creating ensemble dictionary')
self.ensemble_dict = {}
# Check all centroids
for clst_id in self.centroid_dict.keys():
if clst_id != -1:
self.ensemble_dict[clst_id] = self.get_ensemble(centroid_id=clst_id, rmsd_threshold=rmsd_threshold,
nalign_threshold=nalign_threshold, nthreads=nthreads,
qscore_threshold=qscore_threshold)
def get_ensemble(self, centroid_id, rmsd_threshold=0.7, nalign_threshold=30, qscore_threshold=None, nthreads=1):
"""Search for fragments to form an ensemble given a centroid identifier
Fragments within the threshold distance from the centroid will be included in the ensemble. Other centroids \
will be excluded from this search.
:param int centroid_id: centroid identifier of the centroid of interest
:param float rmsd_threshold: the rmsd threshold at which fragments are included into the ensemble (default 0.7)
:param int nalign_threshold: threshold of aligned residues to include a fragment into an ensemble (default 30)
:param float qscore_threshold: qscore threshold at which fragments are included into the ensemble (default None)
:param int nthreads: number of threads to compute the ensemble optimal model alignment (default 1)
:returns: a :py:obj:`gemmi.Structure` hierarchy with the ensemble
"""
if self.centroid_dict is None:
self.get_centroid_dict()
def _lower_threshold(threshold, logger):
threshold -= 0.03
logger.debug('No fragments found, lowering qscore threshold to %s' % threshold)
return list(set(tmp_dist_mtx.index[tmp_dist_mtx.astype(float) >= threshold].tolist())), threshold
def _increase_threshold(threshold, logger):
threshold += 0.03
logger.debug('Found %s fragments, increasing qscore threshold to %s' % (
len(cluster_fragments), threshold))
return list(set(tmp_dist_mtx.index[tmp_dist_mtx.astype(float) >= threshold].tolist())), threshold
self.logger.debug('Retrieving ensemble for cluster %s' % centroid_id)
fname = os.path.join(swamp.FRAG_PDB_DB, '{}.pdb')
centroid = self.centroid_dict[centroid_id]
# Get the the valid fragments close enough to this centroid
if qscore_threshold is None:
tmp_dist_mtx = self.rmsd_mtx[centroid].drop(centroid, 0)
good_distance_fragments = tmp_dist_mtx.index[tmp_dist_mtx.astype(float) <= rmsd_threshold].tolist()
tmp_nalign_mtx = self.nalign_mtx[centroid].drop(centroid, 0)
good_nalign_fragments = tmp_nalign_mtx.index[tmp_nalign_mtx.astype(int) >= nalign_threshold].tolist()
cluster_fragments = list(set(good_nalign_fragments) & set(good_distance_fragments))
else:
tmp_dist_mtx = self.similarity_mtx[centroid].drop(centroid, 0)
good_qscore_fragments = tmp_dist_mtx.index[tmp_dist_mtx.astype(float) >= qscore_threshold].tolist()
cluster_fragments = list(set(good_qscore_fragments))
# This optimizes results...
while len(cluster_fragments) == 0 and qscore_threshold > 0.8:
cluster_fragments, qscore_threshold = _lower_threshold(qscore_threshold, self.logger)
while len(cluster_fragments) > 6 and qscore_threshold < 0.92:
cluster_fragments, qscore_threshold = _increase_threshold(qscore_threshold, self.logger)
# Add the centroid to the ensemble and exclude any other centroid
cluster_fragments = list(set(cluster_fragments) - set(self.centroid_dict.values()))
cluster_fragments.append(centroid)
# Only add the ensemble if this combination of fragments didn't appear yet
if sorted(cluster_fragments) not in self.composition_dict.values():
self.composition_dict[centroid_id] = sorted(cluster_fragments)
# If there are enough good fragments, make an ensemble and write into a file
self.logger.debug('Retrieving optimum alignment for %s fragments [%s]' % (
len(cluster_fragments), ', '.join(cluster_fragments)))
gesamt, hierarchy = Gesamt.get_optimum_alignment([fname.format(frag_id) for frag_id in cluster_fragments],
nthreads=nthreads, logger=self.logger)
if gesamt:
self.logger.debug('Optimal qscore: %s' % gesamt.qscore)
return hierarchy
class ParameterSearchResults(object):
"""Class to hold the results from a multi-threaded :py:func:`~swamp.clustering.clustering.grid_search`
Implements :py:obj:`~threading.Semaphore` methods to regulate thread I/O into a result list
:param `~swamp.logger.swamplogger.SwampLogger` logger: logger instance to record thread log messages
:ivar `threading.lock` lock: lock to control I/O to the result instance
:ivar `pandas.DataFrame` value: dataframe with the results of the grid search
"""
def __init__(self, logger):
self.lock = threading.Lock()
self.value = pd.DataFrame(
columns=["params_idx", "n_clst", "clst_size", "clst_qscore", "silhouette_score", "singletons"])
self.logger = logger
def register(self, new_results):
"""Register a given set of new results into :py:attr:`~swamp.clustering.ParameterSearchResults.value`
:param `pandas.DataFrame` new_results: the set of new results to be registered
"""
self.logger.debug('Waiting for lock')
self.lock.acquire()
self.logger.debug('Acquired lock')
self.value = pd.concat([self.value, new_results], 0)
self.lock.release()
self.logger.debug('Released lock')
|
StarcoderdataPython
|
3279758
|
"""This module supplies various reindex functions.
"""
import logging
import dateutil.parser
from elasticsearch import helpers
from . import util
__all__ = ['date_reindex']
logger = logging.getLogger(__name__)
def date_reindex(url, source_index_name, target_index_name, date_field=None,
delete_docs=False, query=None, use_same_id=True,
scan_kwargs={}):
"""Re-index all documents in a source index to the target index.
The re-index takes an optional query to limit the source documents.
If a date field identifier is used, the target index name is assumed to be a
template that will be called with the date field as a format parameter. This
allows temporal re-indexing from e.g. "sourceindex" to
"targetindex-2015-01-02". If not date field is given, the target index name
is used as-is
:param url: Cluster url
:type url: str
:param source_index_name: The name of the source index to re-index from.
:type source_index_name: str
:param target_index_name: The name of the target index to re-index to.
:type target_index_name: str
:param date_field: The name of a date field in the source documents to use
for temporal re-indexing into the target index.
:type date_field: str
:param delete_docs: Whether or not to delete the source documents. Default
is False.
:type delete_docs: bool
:param query: A query to use for the source documents
:type query: dict
:param use_same_id: Whether or not to use the same ID as the source. If
True, will use the exact same ID as the source. If False, will re-create
a new ID automatically. Default is True.
:param scan_kwargs: Extra arguments for the index scanner. Similar to
scan_kwargs in helpers.reindex
:type scan_kwargs: dict
:returns: The result of an iterating bulk operation.
"""
# Inspired by the reindex helper in the elasticsearch lib
logger.info('Starting reindex from {} to {}'
.format(source_index_name, target_index_name))
client = util.get_client(url)
docs = helpers.scan(client,
index=source_index_name,
query=query,
scroll='5m',
**scan_kwargs)
def _docs_to_operations(hits):
for h in hits:
if date_field and date_field not in h['_source']:
logger.error('Date field not found in {}'.format(h['_id']))
continue
delete_op = None
if delete_docs:
delete_op = {
'_op_type': 'delete',
'_index': h['_index'],
'_type': h['_type'],
'_id': h['_id']
}
new_index_name = target_index_name
if date_field:
date_value = dateutil.parser.parse(h['_source'][date_field])
new_index_name = new_index_name.format(date_value)
h['_index'] = new_index_name
if not use_same_id:
del h['_id']
if 'fields' in h:
h.update(h.pop('fields'))
yield h
if delete_op is not None:
yield delete_op
kwargs = {
'stats_only': True,
}
return helpers.bulk(client, _docs_to_operations(docs), chunk_size=1000,
**kwargs)
|
StarcoderdataPython
|
1883275
|
<gh_stars>0
# Special Pythagorean triplet
#
# A Pythagorean triplet is a set of three natural numbers, a < b < c, for which,
# a2 + b2 = c2
#
# For example, 32 + 42 = 9 + 16 = 25 = 52.
#
# There exists exactly one Pythagorean triplet for which a + b + c = 1000.
# Find the product abc.
def abc_1000():
for a in range(1, 500):
for b in range(1, 1000-a):
yield a, b, 1000 - a - b
options = list(abc_1000())
for o in options:
assert o[0] + o[1] + o[2] == 1000
for a, b, c in abc_1000():
if a < b and b < c and a*a + b*b == c*c:
print(a, b, c, a*b*c)
|
StarcoderdataPython
|
11307632
|
<gh_stars>1-10
HOST = 'localhost' # 10.19.179.123'
# HOST = '10.10.207.61'
PORT = 1081
BUFFSIZE = 16777215
|
StarcoderdataPython
|
8121087
|
#!/usr/bin/python3
# ros
from scipy.ndimage.measurements import label
import rospy
import ros_numpy
import message_filters
from sensor_msgs.msg import Image, PointCloud2
from cv_bridge import CvBridge, CvBridgeError
from std_msgs.msg import Float32MultiArray, MultiArrayLayout, MultiArrayDimension
from SegmentationMapping.msg import ImageLabelDistribution
from pprint import pprint
# map label to color
from label2color import label_to_color, background
from NeuralNetConfigs import NeuralNetConfigs
# visualize pcl
#from helper import publish_pcl_pc2_label
import cv2, time
import numpy as np
from threading import Lock
from scipy.ndimage import rotate, zoom
import sys, os
if sys.version_info[0] < 3:
import Queue as queue
else:
import queue
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
# debug
import pdb
# Extra
import PIL
import time
class SegmentationNode:
def __init__(self):
rospy.init_node('~', anonymous=True)
neural_net_input_width = 640
neural_net_input_height = 480
#neural_net_input_width = 566
#neural_net_input_height = 421
# Load the network configurations into a tuple
self.network_config = NeuralNetConfigs("/home/benoit/segmentation_mapping/git/SegmentationMapping/config/mobilenet_nclt/optimized_graph_mobilenet_trt32_2019-06-15.pb",\
14,\
"network/input/Placeholder:0",\
"", \
neural_net_input_width,\
neural_net_input_height, \
"network/output/ClassIndexPrediction:0",\
"network/output/ClassDistribution:0",\
True)
print(self.network_config)
# Initialize the neural network
self.tf_init()
self.counter = 0
self.callback()
def callback(self):
print("segmentation")
now = rospy.Time.now()
#original_img = self.bridge.imgmsg_to_cv2(img_msg , desired_encoding="rgb8")
original_img = cv2.imread("/home/benoit/segmentation_mapping/data/input.png", cv2.IMREAD_COLOR)
#original_img = cv2.cvtColor(original_img, cv2.COLOR_BGR2RGB)
labeled_img, distribution = self.infer(original_img)
#self.publish_label_and_distribution(labeled_img, distribution, img_msg.header)
def tf_init(self):
print("open graph..")
with tf.gfile.GFile(self.network_config.path, 'rb') as f:
self.graph_def = tf.GraphDef()
self.graph_def.ParseFromString(f.read())
print("open graph complete")
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
self.y, = tf.import_graph_def(self.graph_def, return_elements=[self.network_config.label_output_tensor], name='')
self.G = tf.get_default_graph()
self.x = self.G.get_tensor_by_name(self.network_config.image_input_tensor)
self.dist = self.G.get_tensor_by_name(self.network_config.distribution_output_tensor)
tf.global_variables_initializer().run(session=self.sess)
print("Tf init finish")
def publish_label_and_distribution(self, labeled_img, distribution, header, lidar_header=None):
now = rospy.Time.now()
rospy.loginfo("Going to publish at time %i %i\n\n", now.secs, now.nsecs)
label_msg = self.bridge.cv2_to_imgmsg(labeled_img, encoding="mono8")
label_msg.header = header
distribution_msg = ImageLabelDistribution()
m_arr = Float32MultiArray()
m_arr.layout.data_offset = 0
m_arr.layout.dim = [MultiArrayDimension() for _ in range(3)]
m_arr.layout.dim[0].label = "h"
m_arr.layout.dim[0].size = labeled_img.shape[0]
m_arr.layout.dim[0].stride = self.network_config.num_classes * labeled_img.size
m_arr.layout.dim[1].label = "w"
m_arr.layout.dim[1].size = labeled_img.shape[1]
m_arr.layout.dim[1].stride = self.network_config.num_classes * labeled_img.shape[1]
m_arr.layout.dim[2].label = "c"
m_arr.layout.dim[2].size = self.network_config.num_classes
m_arr.layout.dim[2].stride = self.network_config.num_classes
m_arr.data = distribution.flatten().tolist()
distribution_msg.header = header
distribution_msg.distribution = m_arr
self.labeled_img_pub.publish(label_msg)
self.distribution_pub.publish(distribution_msg)
#if lidar_header is not None:
# distribution_msg.header.stamp = lidar_header.stamp
# self.distribution_at_lidar_time_pub.publish(distribution_msg)
now = rospy.Time.now()
rospy.loginfo("End callabck at time %i %i\n\n", now.secs, now.nsecs)
def infer(self, rgb_img):
num_class = self.network_config.num_classes
#if rgb_img.shape[0] != self.network_config.input_height or \
# rgb_img.shape[1] != self.network_config.input_width:
# rgb = cv2.resize(rgb_img, \
# dsize=(self.network_config.input_width, self.network_config.input_height),\
# interpolation=cv2.INTER_CUBIC)
#else:
# rgb = rgb_img
rgb = rgb_img
rgb = np.expand_dims(rgb, axis=0)
label_out, dist_out = self.sess.run([self.y, self.dist], feed_dict={self.x: rgb})
now = rospy.Time.now()
dist_out = dist_out[0, :, :, :]
label_out = label_out[0, :, : ].astype(np.uint8)
rospy.loginfo("after segmentation time %i %i", now.secs, now.nsecs )
# Generating colored output
# start_time = time.time()
if self.network_config.enable_colored_labeling:
colored_label_out = np.zeros(shape=(label_out.shape[0], label_out.shape[1], 3))
for i in range(len(colored_label_out)):
for j in range(len(colored_label_out[i])):
colored_label_out[i][j] = np.array(label_to_color[label_out[i][j]]) # convert labels into color
colored_label_out = colored_label_out.astype(np.uint8) # turn into integer
# end_time = time.time()
# print("TIME TAKEN: ", end_time - start_time)
pil_image = PIL.Image.fromarray(colored_label_out, 'RGB')
opencv_image = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)
cv2.imwrite("/home/benoit/segmentation_mapping/data/output.png", opencv_image)
return label_out, dist_out
def spin(self):
rospy.spin()
if __name__ == "__main__":
node = SegmentationNode()
#node.spin()
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.