id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
9610340
|
<gh_stars>0
import model.m_mysql as db
class MUser(object):
@staticmethod
def get_user_id_of_account_id(account_id):
'''
获取账户编号所对应的用户编号
@param account_id:int 账户编号
@return int user_id用户编号
@version v0.0.1 闫涛 2019-03-16
'''
sql = 'select user_id from t_user where account_id=%s'
params = (account_id)
return db.query(sql, params)
|
StarcoderdataPython
|
11343400
|
<filename>step1/util/metrics.py
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from math import exp
import math
from . import pytorch_ssim
import pdb
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size/2)**2/float(2*sigma**2)) for x in range(window_size)])
return gauss/gauss.sum()
def create_window(window_size, channel):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = Variable(_2D_window.expand(channel, 1, window_size, window_size))
return window
def SSIM(img1, img2):
(_, channel, _, _) = img1.size()
window_size = 11
window = create_window(window_size, channel)
mu1 = F.conv2d(img1, window, padding = window_size/2, groups = channel)
mu2 = F.conv2d(img2, window, padding = window_size/2, groups = channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1*mu2
sigma1_sq = F.conv2d(img1*img1, window, padding = window_size/2, groups = channel) - mu1_sq
sigma2_sq = F.conv2d(img2*img2, window, padding = window_size/2, groups = channel) - mu2_sq
sigma12 = F.conv2d(img1*img2, window, padding = window_size/2, groups = channel) - mu1_mu2
C1 = 0.01**2
C2 = 0.03**2
ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))
return ssim_map.mean()
def ssim(img1,img2):
img1 = torch.from_numpy(np.rollaxis(img1, 2)).float().unsqueeze(0)/255.0
img2 = torch.from_numpy(np.rollaxis(img2, 2)).float().unsqueeze(0)/255.0
img1 = Variable( img1, requires_grad=False) # torch.Size([256, 256, 3])
img2 = Variable( img2, requires_grad = False)
ssim_value = float(pytorch_ssim.ssim(img1, img2))
print(ssim_value)
return ssim_value
def PSNR(img1, img2):
mse = np.mean( (img1 - img2) ** 2 )
if mse == 0:
return 100
PIXEL_MAX = 255.0
return 20 * math.log10(PIXEL_MAX / math.sqrt(mse))
|
StarcoderdataPython
|
1815327
|
<reponame>blackhatethicalhacking/dfirtrack
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from dfirtrack_main.logger.default_logger import info_logger
from dfirtrack_main.models import System
import csv
from time import strftime
@login_required(login_url="/login")
def systems(request):
# create csv MIME type object
sod = HttpResponse(content_type='text/csv')
# define filename
sod['Content-Disposition'] = 'attachment; filename="sod.csv"'
# create file object for writing lines
sod_writer = csv.writer(sod)
# write headline
sod_writer.writerow([
'ID',
'System',
'Status',
'Reason',
'Recommendation',
'Type',
'IP',
'Domain',
'DNS Name',
'OS',
'Company',
'Location',
'Serviceprovider',
'Created',
'Last modified',
])
# get all System objects ordered by system_name
systems = System.objects.all().order_by("system_name")
# iterate over systems
for system in systems:
# skip system depending on export variable
if system.system_export_spreadsheet == False:
continue
# set foreign key field to none if it doesn't exist
if system.reason == None:
reason = ''
else:
reason = system.reason.reason_name
# set foreign key field to none if it doesn't exist
if system.recommendation== None:
recommendation = ''
else:
recommendation = system.recommendation.recommendation_name
# set foreign key field to none if it doesn't exist
if system.systemtype == None:
systemtype = ''
else:
systemtype = system.systemtype.systemtype_name
# get all ips of system
ips_all = system.ip.all()
# count ips
n = system.ip.count()
# create empty ip string
ip = ''
# set counter
i = 1
# iterate over ip objects in ip list
for ip_obj in ips_all:
# add actual ip to ip string
ip = ip + ip_obj.ip_ip
# add newline except for last ip
if i < n:
ip = ip + '\n'
i = i + 1
# set foreign key field to none if it doesn't exist
if system.domain == None:
domain = ''
else:
domain = system.domain.domain_name
# set foreign key field to none if it doesn't exist
if system.dnsname == None:
dnsname = ''
else:
dnsname = system.dnsname.dnsname_name
# set foreign key field to none if it doesn't exist
if system.os == None:
os = ''
else:
os = system.os.os_name
# get all companies of system
companys_all = system.company.all()
# count companies
n = system.company.count()
# create empty company string
company = ''
# set counter
i = 1
# iterate over company objects in company list
for company_obj in companys_all:
# add actual company to company string
company = company + company_obj.company_name
# add newline except for last company
if i < n:
company = company + '\n'
i = i + 1
# set foreign key field to none if it doesn't exist
if system.location == None:
location = ''
else:
location = system.location.location_name
# set foreign key field to none if it doesn't exist
if system.serviceprovider == None:
serviceprovider = ''
else:
serviceprovider = system.serviceprovider.serviceprovider_name
# prepare string values for datetimes
create_time = system.system_create_time.strftime('%Y-%m-%d %H:%M')
modify_time = system.system_modify_time.strftime('%Y-%m-%d %H:%M')
# write a line for every system
sod_writer.writerow([
system.system_id,
system.system_name,
system.systemstatus.systemstatus_name,
reason,
recommendation,
systemtype,
ip,
domain,
dnsname,
os,
company,
location,
serviceprovider,
create_time,
modify_time,
])
# write an empty row
sod_writer.writerow([])
# prepare string value for actual datetimes
actualtime = strftime('%Y-%m-%d %H:%M')
# write meta information
sod_writer.writerow(['SOD created:', actualtime])
creator = request.user
sod_writer.writerow(['Created by:', creator])
# call logger
info_logger(str(request.user), " SYSTEM_CSV_CREATED")
# return csv object
return sod
|
StarcoderdataPython
|
4998404
|
import random
import string
EOL = '\r\n'
def gen_str(length=10, letters=string.ascii_letters+string.digits):
return "".join([random.choice(letters) for n in range(length)])
|
StarcoderdataPython
|
6575558
|
import os
def count_lines(start=".", lines=0, blacklisted_dirs=["venv"], file_extensions=["py"]):
for file in os.listdir(start):
relative_path = os.path.join(start, file)
if os.path.isfile(relative_path) and relative_path.split(".")[-1] in file_extensions:
with open(relative_path, 'r', encoding='utf-8') as f:
lines += len(f.readlines())
elif os.path.isdir(relative_path) and file not in blacklisted_dirs:
lines = count_lines(relative_path, lines, blacklisted_dirs, file_extensions)
return lines
def find_files(start=".", files = [], blacklisted_dirs=["venv"], file_extensions=["py"]):
for file in os.listdir(start):
relative_path = os.path.join(start, file)
if os.path.isfile(relative_path) and relative_path.split(".")[-1] in file_extensions:
yield relative_path
elif os.path.isdir(relative_path) and file not in blacklisted_dirs:
yield from find_files(relative_path, blacklisted_dirs, file_extensions)
|
StarcoderdataPython
|
6648042
|
<reponame>AdamKlekowski/moler<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Testing command specific API
Command is a type of ConnectionObserver.
Testing ConnectionObserver API conformance of Command is done
inside test_connection_observer.py (as parametrized tests).
- call as function (synchronous)
- call as future (asynchronous)
"""
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2018, Nokia'
__email__ = '<EMAIL>'
import importlib
import pytest
from moler.command import Command
from moler.threaded_moler_connection import ThreadedMolerConnection
from moler.helpers import instance_id
from moler.io.raw.memory import FifoBuffer
def test_command_has_means_to_retrieve_embedded_command_string(do_nothing_command__for_major_base_class):
"""Command needs to start from sending 'command string' to some device"""
command_instance = do_nothing_command__for_major_base_class
assert hasattr(command_instance, "command_string")
def test_str_conversion_of_command_object():
"""
String conversion shows embedded command string, class of command object
and id to allow for differentiating between multiple instances of same command
"""
class PingCmd(Command):
def __init__(self, host='localhost', connection=None):
super(PingCmd, self).__init__(connection=connection)
self.command_string = 'ping {}'.format(host)
def data_received(self, data, recv_time):
pass # not important now
ping = PingCmd()
assert 'PingCmd("ping localhost", id:{})'.format(instance_id(ping)) == str(ping)
ping = PingCmd(host='127.0.0.1')
assert 'PingCmd("ping 127.0.0.1", id:{})'.format(instance_id(ping)) == str(ping)
ping.command_string = ''
assert 'PingCmd("<EMPTY COMMAND STRING>", id:{})'.format(instance_id(ping)) == str(ping)
def test_str_conversion_of_command_object_encodes_newline_for_display():
"""Important for logs and troubleshooting"""
class PingCmd(Command):
def __init__(self, host='localhost', connection=None):
super(PingCmd, self).__init__(connection=connection)
self.command_string = 'ping {}\n'.format(host)
def data_received(self, data, recv_time):
pass # not important now
ping = PingCmd()
assert r'PingCmd("ping localhost<\n>",' in str(ping) # newline is visible in < > braces
ping.command_string = 'ping localhost\\n'
assert r'PingCmd("ping localhost\n",' in str(ping) # \n (two characters string) is visible as \n string
def test_repr_conversion_of_command_object():
"""
repr() conversion shows same as str() plus embedded connection used by command
"""
moler_conn = ThreadedMolerConnection(decoder=lambda data: data.decode("utf-8"))
class LsCmd(Command):
def __init__(self, options='-l', connection=None):
super(LsCmd, self).__init__(connection=connection)
self.command_string = 'ls {}'.format(options)
def data_received(self, data, recv_time):
pass # not important now
ls = LsCmd(connection=moler_conn)
# (1) command with ThreadedMolerConnection to glued to ext-io
assert 'LsCmd("ls -l", id:{}, using ThreadedMolerConnection(id:{})-->[?])'.format(instance_id(ls), instance_id(moler_conn)) == repr(ls)
# TODO: add test for <ThreadedMolerConnection( id:{}>
# (2) command with ThreadedMolerConnection glued to ext-io
ext_io_connection = FifoBuffer(moler_connection=moler_conn)
how2send_repr = repr(ext_io_connection.write)
assert 'LsCmd("ls -l", id:{}, using ThreadedMolerConnection(id:{})-->[{}])'.format(instance_id(ls), instance_id(moler_conn), how2send_repr) == repr(ls)
# TODO: move ThreadedMolerConnection(id:{})-->[{}])'.format(instance_id(moler_conn), how2send_repr) into ThreadedMolerConnection __repr__ test
# TODO: and here just:
# assert 'LsCmd("ls -l", id:{}, using {})'.format(instance_id(ls), repr(moler_conn)) == repr(ls)
# (3) command without connection
ls.connection = None
assert 'LsCmd("ls -l", id:{}, using <NO CONNECTION>)'.format(instance_id(ls)) == repr(ls)
# TODO: generic - shift into ConnectionObserver; here just show that command's repr adds command string
def test_command_string_is_required_to_start_command(command_major_base_class):
from moler.exceptions import NoCommandStringProvided
moler_conn = ThreadedMolerConnection()
command_class = do_nothing_command_class(base_class=command_major_base_class)
command = command_class(connection=moler_conn)
assert not command.command_string # ensure it is empty before starting command
with pytest.raises(NoCommandStringProvided) as error:
command.start() # start the command-future (background run)
assert error.value.command == command
assert 'for {}'.format(str(command)) in str(error.value)
assert 'You should fill .command_string member before starting command' in str(error.value)
def test_command_string_is_required_to_call_command(command_major_base_class):
import threading
from moler.exceptions import NoCommandStringProvided
moler_conn = ThreadedMolerConnection()
command_class = do_nothing_command_class(base_class=command_major_base_class)
command = command_class(connection=moler_conn)
assert not command.command_string # ensure it is empty before starting command
def command_in_thread():
with pytest.raises(NoCommandStringProvided) as error:
command()
assert error.value.command == command
assert 'for {}'.format(str(command)) in str(error.value)
assert 'You should fill .command_string member before starting command' in str(error.value)
cmd_thrd = threading.Thread(target=command_in_thread)
cmd_thrd.start()
cmd_thrd.join()
command = command_class(connection=moler_conn)
with pytest.raises(NoCommandStringProvided) as error:
command() # call the command-future (foreground run)
assert error.value.command == command
assert 'for {}'.format(str(command)) in str(error.value)
assert 'You should fill .command_string member before starting command' in str(error.value)
def test_calling_command_sends_command_string_over_connection(do_nothing_command_class__for_major_base_class,
connection_to_remote):
"""Command as function"""
from moler.exceptions import ConnectionObserverTimeout
class QuickCmd(do_nothing_command_class__for_major_base_class):
def await_done(self, timeout=0.1):
return super(QuickCmd, self).await_done(timeout=timeout)
ext_io = connection_to_remote
ping = QuickCmd(connection=ext_io.moler_connection)
ping.command_string = 'ping localhost'
with ext_io.open():
try:
ping() # call the command-future (foreground run)
except ConnectionObserverTimeout:
pass
assert b'ping localhost' in ext_io.remote_endpoint()
def test_calling_start_on_command_sends_command_string_over_connection(do_nothing_command_class__for_major_base_class,
connection_to_remote):
"""Command as future"""
class QuickCmd(do_nothing_command_class__for_major_base_class):
def await_done(self, timeout=0.1):
return super(QuickCmd, self).await_done(timeout=timeout)
ext_io = connection_to_remote
ping = QuickCmd(connection=ext_io.moler_connection)
ping.command_string = 'ping localhost'
with ext_io.open():
ping.start() # start background-run of command-future
assert b'ping localhost' in ext_io.remote_endpoint()
ping.cancel()
def test_command_is_running_after_sending_command_string(do_nothing_command__for_major_base_class):
"""
Default behaviour is:
after sending command string to device we treat command as running since
we have just activated some action on device
!!!!!!!!!!!!
OR: when it is run in some 'feeder process' (thread, process, asyncio loop, Twisted loop)
but even if we have no loop to progress our python-command
the real command on device has started since we have called it over connection
!!!!!!!!!!!!
"""
ping = do_nothing_command__for_major_base_class
class TheConnection(object):
def sendline(self, data):
assert data == 'ping localhost' # ping command to be started on some shell
assert ping.running() # I'm in connection's send - command object should assume "real CMD (ping) is running"
def subscribe(self, observer, connection_closed_handler):
pass
def unsubscribe(self, observer, connection_closed_handler):
pass
ping.connection = TheConnection()
ping.command_string = 'ping localhost'
assert not ping.running()
ping.start() # start the command-future
assert ping.running()
ping.cancel()
# --------------------------- resources ---------------------------
@pytest.fixture(params=['command.Command'])
def command_major_base_class(request):
module_name, class_name = request.param.rsplit('.', 1)
module = importlib.import_module('moler.{}'.format(module_name))
klass = getattr(module, class_name)
return klass
def do_nothing_command_class(base_class):
"""Command class that can be instantiated (overwritten abstract methods); uses different base class"""
class DoNothingCommand(base_class):
def data_received(self, data, recv_time): # we need to overwrite it since it is @abstractmethod
pass # ignore incoming data
return DoNothingCommand
@pytest.fixture
def do_nothing_command_class__for_major_base_class(command_major_base_class):
klass = do_nothing_command_class(base_class=command_major_base_class)
return klass
@pytest.fixture
def do_nothing_command__for_major_base_class(do_nothing_command_class__for_major_base_class):
instance = do_nothing_command_class__for_major_base_class()
return instance
@pytest.fixture
def connection_to_remote():
"""
Any external-IO connection that embeds Moler-connection
Alows to check if data send from command has reached remote side via:
`data in conn.remote_endpoint()`
"""
class RemoteConnection(FifoBuffer):
def remote_endpoint(self):
"""Simulate remote endpoint that gets data"""
return self.buffer
ext_io = RemoteConnection(moler_connection=ThreadedMolerConnection(encoder=lambda data: data.encode("utf-8"),
decoder=lambda data: data.decode("utf-8")))
return ext_io
|
StarcoderdataPython
|
142971
|
"""
996. Number of Squareful Arrays
Given an array A of non-negative integers, the array is squareful if for every pair of adjacent elements, their sum is a perfect square.
Return the number of permutations of A that are squareful. Two permutations A1 and A2 differ if and only if there is some index i such that A1[i] != A2[i].
Example 1:
Input: [1,17,8]
Output: 2
Explanation:
[1,8,17] and [17,8,1] are the valid permutations.
Example 2:
Input: [2,2,2]
Output: 1
"""
# dfs
# most difficult part is to associate this problem with graph
# Runtime: 24 ms, faster than 97.78% of Python3 online submissions for Number of Squareful Arrays.
# Memory Usage: 13 MB, less than 100.00% of Python3 online submissions for Number of Squareful Arrays.
import math
class Solution:
def numSquarefulPerms(self, A: List[int]) -> int:
graph = collections.defaultdict(set)
n = len(A)
for i in range(n-1):
is_met = False
for j in range(n):
if j in graph[i]:
is_met = True
continue
tmp = A[j] + A[i]
if self.check_perfect(tmp):
is_met = True
graph[j].add(i)
graph[i].add(j)
if not is_met:
return 0
if n-1 not in graph:
return 0
res = 0
val_set = set([])
for i in range(n):
tmp = [-1 for _ in range(n)]
if A[i] in val_set:
continue
tmp[0] = i
val_set.add(A[i])
cnt = self.dfs(0, tmp, graph, A, set([i]))
res += cnt
return res
def dfs(self, idx, tmp, graph, A, visited):
if idx == len(tmp)-1:
return 1
val_set = set([])
res = 0
for nei in graph[tmp[idx]]:
if A[nei] in val_set:
continue
if nei in visited:
continue
tmp[idx+1] = nei
val_set.add(A[nei])
visited.add(nei)
cnt = self.dfs(idx+1, tmp, graph, A, visited)
visited.remove(nei)
res += cnt
return res
def check_perfect(self, num):
root = math.sqrt(num)
return int(root+0.5)**2 == num
|
StarcoderdataPython
|
112553
|
<reponame>smurfix/distkv
import pytest
import io
from functools import partial
from distkv.mock import run
from distkv.mock.mqtt import stdtest
from distkv.client import ServerError
from distkv.util import PathLongener, P
import logging
logger = logging.getLogger(__name__)
async def collect(i, path=()):
res = []
pl = PathLongener(path)
async for r in i:
r.pop("tock", 0)
r.pop("seq", 0)
pl(r)
res.append(r)
return res
@pytest.mark.trio
async def test_71_basic(autojump_clock): # pylint: disable=unused-argument
async with stdtest(args={"init": 123}, tocks=80) as st:
async with st.client() as c:
with pytest.raises(ServerError):
await c._request(
"set_internal",
path=P("type.int"),
value={
"bad": ["foo", None],
"good": [0, 1, 2],
"code": "if not isinstance(value,int): rise ValueError('not an int')",
# yes this checks for the typo (SyntaxError=
},
)
with pytest.raises(ServerError):
await c._request(
"set_internal",
path=P("type.int"),
value={
"bad": ["foo", None],
"good": [0, 1, "dud"],
"code": "if not isinstance(value,int): raise ValueError('not an int')",
},
)
with pytest.raises(ServerError):
await c._request(
"set_internal",
path=P("type.int"),
value={
"bad": ["foo", 4],
"good": [0, 1, 2],
"code": "if not isinstance(value,int): raise ValueError('not an int')",
},
)
with pytest.raises(ServerError):
await c._request(
"set_internal",
path=P("type.int"),
value={
"bad": [],
"good": [0, 1, 2],
"code": "if not isinstance(value,int): raise ValueError('not an int')",
},
)
with pytest.raises(ServerError):
await c._request(
"set_internal",
path=P("type.int"),
value={
"bad": ["foo", None],
"good": [1],
"code": "if not isinstance(value,int): raise ValueError('not an int')",
},
)
await c._request(
"set_internal",
path=P("type.int"),
value={
"bad": ["foo", None],
"good": [0, 1, 2],
"code": "if not isinstance(value,int): raise ValueError('not an int')",
},
)
with pytest.raises(ServerError):
await c._request(
"set_internal",
path=P("type.int.percent"),
value={
"bad": ["fuf", 101],
"good": [0, 55, 100],
"code": "if not 0<=value<=100: raise ValueError('not a percentage')",
},
)
with pytest.raises(ServerError):
await c._request(
"set_internal",
path=P("type.int.percent"),
value={
"bad": ["fuf", 101],
"good": [0, 5.5, 100],
"code": "if not 0<=value<=100: raise ValueError('not a percentage')",
},
)
await c._request(
"set_internal",
path=P("type.int.percent"),
value={
"bad": [-1, 101],
"good": [0, 55, 100],
"code": "if not 0<=value<=100: raise ValueError('not a percentage')",
},
)
with pytest.raises(ServerError):
await c._request(
"set_internal", path=P("match.one.+.two"), value={"tope": P("int.percent")}
)
await c._request(
"set_internal", path=P("match.one.+.two"), value={"type": P("int.percent")}
)
await c.set(P("one.x.two"), value=99)
with pytest.raises(ServerError):
await c.set(P("one.y.two"), value=9.9)
with pytest.raises(ServerError):
await c.set(P("one.y.two"), value="zoz")
await c.set(P("one.y"), value="zoz")
pass # client end
pass # server end
@pytest.mark.trio
async def test_72_cmd(autojump_clock, tmpdir): # pylint: disable=unused-argument
async with stdtest(args={"init": 123}, tocks=80) as st:
(s,) = st.s
async with st.client() as c:
h = p = None # pylint
for h, p, *_ in s.ports:
if h[0] != ":":
break
rr = partial(run, "client", "-h", h, "-p", p, do_stdout=False)
path = tmpdir.join("foo")
with io.open(path, "w") as f:
f.write(
"""\
good:
- 0
- 2
bad:
- none
- "Foo"
code: "if not isinstance(value,int): raise ValueError('not an int')"
"""
)
await rr("type", "set", "-d", str(path), "int")
with io.open(path, "w") as f:
f.write("if not 0<=value<=100: raise ValueError('not a percentage')\n")
with pytest.raises(ServerError):
await rr(
"type",
"set",
"-s",
str(path),
"-g",
"0",
"-g",
"100",
"-g",
"50",
"-b",
"-1",
"-b",
"5.5",
"int.percent",
)
await rr(
"type",
"set",
"-s",
str(path),
"-g",
"0",
"-g",
"100",
"-g",
"50",
"-b",
"-1",
"-b",
"555",
"int.percent",
)
await rr("type", "match", "-t", "int.percent", "foo.+.bar")
with pytest.raises(ServerError):
await rr("data", "foo.dud.bar", "set", "-v", ":", "123")
with pytest.raises(ServerError):
await rr("data", "foo.dud.bar", "set", "-e", ":", "123")
with pytest.raises(ServerError):
await rr("data", "foo.dud.bar", "set", "-e", ":", "5.5")
await rr("data", "foo.dud.bar", "set", "-e", ":", "55")
assert (await c.get(P("foo.dud.bar"))).value == 55
pass # client end
pass # server end
|
StarcoderdataPython
|
4896250
|
#!/usr/bin/python
#PIN 0-8 3v3 pull-up default, 9-27 pull-down default
# Raspberry Pi SPI Port and Device
spi_port = 0
spi_dev = 0
# Pin # for relay connected to heating element
he_pin = 26
brew_pin = 17
steam_pin = 22
#overriding the time config when wanting to heat up not during normal hours
overRide = 16
# Default goal temperature
set_temp = 103
set_steam_temp = 149.
# Default alarm time
snooze = '07:00'
#circuit breaker time in minutes convert to seconds
circuitBreakerTime = 20 * 60
TimerOnMo = '9:30'
TimerOffMo = '10:30'
TimerOnTu = '09:30'
TimerOffTu = '10:30'
TimerOnWe = '09:30'
TimerOffWe = '10:30'
TimerOnTh = '09:30'
TimerOffTh = '10:30'
TimerOnFr = '09:30'
TimerOffFr = '10:30'
TimerOnSa = '10:00'
TimerOffSa = '12:00'
TimerOnSu = '10:00'
TimerOffSu = '12:00'
#temp lowpoint and high point
low_temp_b = 85
high_temp_b = 110
low_temp_s = 130
high_temp_s = 155
# Main loop sample rate in seconds
sample_time = 0.1
# PID Proportional, Integral, and Derivative value
# we use Ziegler Nichols method to tune, from experiment Ts=130sec, Ku = 22. Therefore
Ku = 22
Ts = 130
Pc = 0.55 * Ku#22#8.2#5.6#3.4
Ic = 1.2 * Ku/Ts#0.6#1.2
Dc = 0.075 * Ku * Ts #40.0
Pw = Pc
Iw = Ic
Dw = Dc
# Pw = 22#8.4#6.4#5.6#2.9
# Iw = 0#0.6#1.2
# Dw = 0#40.0
#Web/REST Server Options
port = 8080
|
StarcoderdataPython
|
11299447
|
<filename>tests_3_8/async/test_exceptions.py
# The module ``unittest`` supports async only from 3.8 on.
# That is why we had to move this test to 3.8 specific tests.
# pylint: disable=missing-docstring, invalid-name, unnecessary-lambda
import unittest
from typing import Optional, List
import icontract
class TestSyncFunctionAsyncConditionFail(unittest.IsolatedAsyncioTestCase):
def test_precondition(self) -> None:
async def x_greater_zero(x: int) -> bool:
return x > 0
@icontract.require(x_greater_zero)
def some_func(x: int) -> int:
return x * 10
value_error = None # type: Optional[ValueError]
try:
_ = some_func(100)
except ValueError as err:
value_error = err
self.assertIsNotNone(value_error)
self.assertRegex(
str(value_error),
r'^Unexpected coroutine \(async\) condition <.*> for a sync function <.*\.some_func at .*>.')
def test_postcondition(self) -> None:
async def result_greater_zero(result: int) -> bool:
return result > 0
@icontract.ensure(result_greater_zero)
def some_func() -> int:
return 100
value_error = None # type: Optional[ValueError]
try:
_ = some_func()
except ValueError as err:
value_error = err
self.assertIsNotNone(value_error)
self.assertRegex(
str(value_error),
r'^Unexpected coroutine \(async\) condition <.*> for a sync function <.*\.some_func at .*>.')
def test_snapshot(self) -> None:
async def capture_len_lst(lst: List[int]) -> int:
return len(lst)
@icontract.snapshot(capture_len_lst, name="len_lst")
@icontract.ensure(lambda OLD, lst: OLD.len_lst + 1 == len(lst))
def some_func(lst: List[int]) -> None:
lst.append(1984)
value_error = None # type: Optional[ValueError]
try:
some_func([1])
except ValueError as err:
value_error = err
self.assertIsNotNone(value_error)
self.assertRegex(
str(value_error), r'^Unexpected coroutine \(async\) snapshot capture <function .*\.capture_len_lst at .*> '
r'for a sync function <function .*\.some_func at .*>\.')
class TestSyncFunctionConditionCoroutineFail(unittest.IsolatedAsyncioTestCase):
def test_precondition(self) -> None:
async def x_greater_zero(x: int) -> bool:
return x > 0
@icontract.require(lambda x: x_greater_zero(x))
def some_func(x: int) -> int:
return x * 10
value_error = None # type: Optional[ValueError]
try:
_ = some_func(100)
except ValueError as err:
value_error = err
assert value_error is not None
self.assertRegex(
str(value_error),
r"^Unexpected coroutine resulting from the condition <function .*> for a sync function <function .*>\.$")
def test_postcondition(self) -> None:
async def result_greater_zero(result: int) -> bool:
return result > 0
@icontract.ensure(lambda result: result_greater_zero(result))
def some_func() -> int:
return 100
value_error = None # type: Optional[ValueError]
try:
_ = some_func()
except ValueError as err:
value_error = err
assert value_error is not None
self.assertRegex(
str(value_error),
r"^Unexpected coroutine resulting from the condition <function .*> for a sync function <function .*>\.$")
def test_snapshot(self) -> None:
async def capture_len_lst(lst: List[int]) -> int:
return len(lst)
@icontract.snapshot(lambda lst: capture_len_lst(lst), name="len_lst")
@icontract.ensure(lambda OLD, lst: OLD.len_lst + 1 == len(lst))
def some_func(lst: List[int]) -> None:
lst.append(1984)
value_error = None # type: Optional[ValueError]
try:
some_func([1])
except ValueError as err:
value_error = err
assert value_error is not None
self.assertRegex(
str(value_error), r'^Unexpected coroutine resulting '
r'from the snapshot capture <function .*> of a sync function <function .*>.$')
class TestAsyncInvariantsFail(unittest.IsolatedAsyncioTestCase):
def test_that_async_invariants_reported(self) -> None:
async def some_async_invariant(self: 'A') -> bool:
return self.x > 0
value_error = None # type: Optional[ValueError]
try:
# pylint: disable=unused-variable
@icontract.invariant(some_async_invariant)
class A:
def __init__(self) -> None:
self.x = 100
except ValueError as error:
value_error = error
assert value_error is not None
self.assertEqual(
"Async conditions are not possible in invariants as sync methods such as __init__ have to be wrapped.",
str(value_error))
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
4864432
|
<gh_stars>10-100
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for the zipline.assets package
"""
from collections import namedtuple
from datetime import timedelta
from functools import partial
import os
import pickle
import string
import sys
from types import GetSetDescriptorType
from unittest import TestCase
import unittest
import uuid
import warnings
from parameterized import parameterized
import numpy as np
from numpy import full, int32, int64
import pandas as pd
from six import viewkeys
import sqlalchemy as sa
from zipline.assets import (
Asset,
ExchangeInfo,
Equity,
Future,
AssetDBWriter,
AssetFinder,
)
from zipline.assets.assets import OwnershipPeriod
from zipline.assets.synthetic import (
make_commodity_future_info,
make_rotating_equity_info,
make_simple_equity_info,
)
from six import itervalues, integer_types
from toolz import valmap, concat
from zipline.assets.asset_writer import (
check_version_info,
write_version_info,
_futures_defaults,
SQLITE_MAX_VARIABLE_NUMBER,
)
from zipline.assets.asset_db_schema import ASSET_DB_VERSION
from zipline.errors import (
EquitiesNotFound,
FutureContractsNotFound,
AssetDBVersionError,
SidsNotFound,
SymbolNotFound,
)
from zipline.testing import (
all_subindices,
empty_assets_db,
powerset,
tmp_asset_finder,
)
from zipline.testing.predicates import assert_equal, assert_not_equal
from zipline.testing.fixtures import (
WithAssetFinder,
ZiplineTestCase,
WithTradingCalendars,
WithTmpDir,
WithInstanceTmpDir,
)
Case = namedtuple('Case', 'finder inputs as_of country_code expected')
minute = pd.Timedelta(minutes=1)
class AssetTestCase(TestCase):
# Dynamically list the Asset properties we want to test.
asset_attrs = [name for name, value in vars(Asset).items()
if isinstance(value, GetSetDescriptorType)]
# Very wow
asset = Asset(
1337,
real_sid='1337',
exchange_info=ExchangeInfo('THE MOON', 'MOON', '??'),
currency='USD',
symbol="DOGE",
asset_name="DOGECOIN",
start_date=pd.Timestamp('2013-12-08 9:31', tz='UTC'),
end_date=pd.Timestamp('2014-06-25 11:21', tz='UTC'),
first_traded=pd.Timestamp('2013-12-08 9:31', tz='UTC'),
auto_close_date=pd.Timestamp('2014-06-26 11:21', tz='UTC'),
)
test_exchange = ExchangeInfo('test full', 'test', '??')
asset3 = Asset(3, real_sid='3', exchange_info=test_exchange, currency='USD')
asset4 = Asset(4, real_sid='4', exchange_info=test_exchange, currency='USD')
asset5 = Asset(
5,
real_sid='5',
exchange_info=ExchangeInfo('still testing', 'still testing', '??'),
currency='USD'
)
def test_asset_object(self):
the_asset = Asset(
5061,
real_sid='5061',
exchange_info=ExchangeInfo('bar', 'bar', '??'),
currency='USD'
)
self.assertEqual({5061: 'foo'}[the_asset], 'foo')
self.assertEqual(the_asset, 5061)
self.assertEqual(5061, the_asset)
self.assertEqual(the_asset, the_asset)
self.assertEqual(int(the_asset), 5061)
self.assertEqual(str(the_asset), 'Asset(5061)')
def test_to_and_from_dict(self):
asset_from_dict = Asset.from_dict(self.asset.to_dict())
for attr in self.asset_attrs:
self.assertEqual(
getattr(self.asset, attr), getattr(asset_from_dict, attr),
)
def test_asset_is_pickleable(self):
asset_unpickled = pickle.loads(pickle.dumps(self.asset))
for attr in self.asset_attrs:
self.assertEqual(
getattr(self.asset, attr), getattr(asset_unpickled, attr),
)
def test_asset_comparisons(self):
s_23 = Asset(23, real_sid='23', exchange_info=self.test_exchange, currency='USD')
s_24 = Asset(24, real_sid='24', exchange_info=self.test_exchange, currency='USD')
self.assertEqual(s_23, s_23)
self.assertEqual(s_23, 23)
self.assertEqual(23, s_23)
self.assertEqual(int32(23), s_23)
self.assertEqual(int64(23), s_23)
self.assertEqual(s_23, int32(23))
self.assertEqual(s_23, int64(23))
# Check all int types:
for int_type in integer_types:
self.assertEqual(int_type(23), s_23)
self.assertEqual(s_23, int_type(23))
self.assertNotEqual(s_23, s_24)
self.assertNotEqual(s_23, 24)
self.assertNotEqual(s_23, "23")
self.assertNotEqual(s_23, 23.5)
self.assertNotEqual(s_23, [])
self.assertNotEqual(s_23, None)
# Compare to a value that doesn't fit into a platform int:
self.assertNotEqual(s_23, sys.maxsize + 1)
self.assertLess(s_23, s_24)
self.assertLess(s_23, 24)
self.assertGreater(24, s_23)
self.assertGreater(s_24, s_23)
def test_lt(self):
self.assertTrue(self.asset3 < self.asset4)
self.assertFalse(self.asset4 < self.asset4)
self.assertFalse(self.asset5 < self.asset4)
def test_le(self):
self.assertTrue(self.asset3 <= self.asset4)
self.assertTrue(self.asset4 <= self.asset4)
self.assertFalse(self.asset5 <= self.asset4)
def test_eq(self):
self.assertFalse(self.asset3 == self.asset4)
self.assertTrue(self.asset4 == self.asset4)
self.assertFalse(self.asset5 == self.asset4)
def test_ge(self):
self.assertFalse(self.asset3 >= self.asset4)
self.assertTrue(self.asset4 >= self.asset4)
self.assertTrue(self.asset5 >= self.asset4)
def test_gt(self):
self.assertFalse(self.asset3 > self.asset4)
self.assertFalse(self.asset4 > self.asset4)
self.assertTrue(self.asset5 > self.asset4)
def test_type_mismatch(self):
if sys.version_info.major < 3:
self.assertIsNotNone(self.asset3 < 'a')
self.assertIsNotNone('a' < self.asset3)
else:
with self.assertRaises(TypeError):
self.asset3 < 'a'
with self.assertRaises(TypeError):
'a' < self.asset3
class TestFuture(WithAssetFinder, ZiplineTestCase):
@classmethod
def make_futures_info(cls):
return pd.DataFrame.from_dict(
{
2468: {
'symbol': 'OMH15',
'root_symbol': 'OM',
'real_sid': '2468',
'currency': 'USD',
'notice_date': pd.Timestamp('2014-01-20', tz='UTC'),
'expiration_date': pd.Timestamp('2014-02-20', tz='UTC'),
'auto_close_date': pd.Timestamp('2014-01-18', tz='UTC'),
'tick_size': .01,
'multiplier': 500.0,
'exchange': "TEST",
},
0: {
'symbol': 'CLG06',
'root_symbol': 'CL',
'real_sid': '0',
'currency': 'USD',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2005-12-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-01-20', tz='UTC'),
'multiplier': 1.0,
'exchange': 'TEST',
},
},
orient='index',
)
@classmethod
def init_class_fixtures(cls):
super(TestFuture, cls).init_class_fixtures()
cls.future = cls.asset_finder.lookup_future_symbol('OMH15')
cls.future2 = cls.asset_finder.lookup_future_symbol('CLG06')
def test_repr(self):
reprd = repr(self.future)
self.assertEqual("Future(2468 [OMH15])", reprd)
def test_reduce(self):
assert_equal(
pickle.loads(pickle.dumps(self.future)).to_dict(),
self.future.to_dict(),
)
def test_to_and_from_dict(self):
dictd = self.future.to_dict()
for field in _futures_defaults.keys():
self.assertTrue(field in dictd)
from_dict = Future.from_dict(dictd)
self.assertTrue(isinstance(from_dict, Future))
self.assertEqual(self.future, from_dict)
def test_root_symbol(self):
self.assertEqual('OM', self.future.root_symbol)
def test_lookup_future_symbol(self):
"""
Test the lookup_future_symbol method.
"""
om = TestFuture.asset_finder.lookup_future_symbol('OMH15')
self.assertEqual(om.sid, 2468)
self.assertEqual(om.symbol, 'OMH15')
self.assertEqual(om.root_symbol, 'OM')
self.assertEqual(om.notice_date, pd.Timestamp('2014-01-20', tz='UTC'))
self.assertEqual(om.expiration_date,
pd.Timestamp('2014-02-20', tz='UTC'))
self.assertEqual(om.auto_close_date,
pd.Timestamp('2014-01-18', tz='UTC'))
cl = TestFuture.asset_finder.lookup_future_symbol('CLG06')
self.assertEqual(cl.sid, 0)
self.assertEqual(cl.symbol, 'CLG06')
self.assertEqual(cl.root_symbol, 'CL')
self.assertEqual(cl.start_date, pd.Timestamp('2005-12-01', tz='UTC'))
self.assertEqual(cl.notice_date, pd.Timestamp('2005-12-20', tz='UTC'))
self.assertEqual(cl.expiration_date,
pd.Timestamp('2006-01-20', tz='UTC'))
with self.assertRaises(SymbolNotFound):
TestFuture.asset_finder.lookup_future_symbol('')
with self.assertRaises(SymbolNotFound):
TestFuture.asset_finder.lookup_future_symbol('#&?!')
with self.assertRaises(SymbolNotFound):
TestFuture.asset_finder.lookup_future_symbol('FOOBAR')
with self.assertRaises(SymbolNotFound):
TestFuture.asset_finder.lookup_future_symbol('XXX99')
class AssetFinderTestCase(WithTradingCalendars, ZiplineTestCase):
asset_finder_type = AssetFinder
def write_assets(self, **kwargs):
self._asset_writer.write(**kwargs)
def init_instance_fixtures(self):
super(AssetFinderTestCase, self).init_instance_fixtures()
conn = self.enter_instance_context(empty_assets_db())
self._asset_writer = AssetDBWriter(conn)
self.asset_finder = self.asset_finder_type(conn)
def test_dont_trigger_max_variables_error(self):
# we will try to query for more variables than sqlite supports
# to make sure we are properly chunking on the client side
as_of = pd.Timestamp('2013-01-01', tz='UTC')
# we need more sids than we can query from sqlite
nsids = SQLITE_MAX_VARIABLE_NUMBER + 10
sids = range(nsids)
frame = pd.DataFrame.from_records(
[
{
'sid': sid,
'real_sid': str(sid),
'currency': 'USD',
'symbol': 'TEST.%d' % sid,
'start_date': as_of.value,
'end_date': as_of.value,
'exchange': uuid.uuid4().hex
}
for sid in sids
]
)
self.write_assets(equities=frame)
assets = self.asset_finder.retrieve_equities(sids)
assert_equal(viewkeys(assets), set(sids))
def test_compute_lifetimes(self):
assets_per_exchange = 4
trading_day = self.trading_calendar.day
first_start = pd.Timestamp('2015-04-01', tz='UTC')
equities = pd.concat(
[
make_rotating_equity_info(
num_assets=assets_per_exchange,
first_start=first_start,
frequency=trading_day,
periods_between_starts=3,
asset_lifetime=5,
exchange=exchange,
)
for exchange in (
'US_EXCHANGE_1',
'US_EXCHANGE_2',
'CA_EXCHANGE',
'JP_EXCHANGE',
)
],
ignore_index=True,
)
# make every symbol unique
equities['symbol'] = list(string.ascii_uppercase[:len(equities)])
equities['real_sid'] = equities['symbol']
# shuffle up the sids so they are not contiguous per exchange
sids = np.arange(len(equities))
np.random.RandomState(1337).shuffle(sids)
equities.index = sids
permute_sid = dict(zip(sids, range(len(sids)))).__getitem__
exchanges = pd.DataFrame.from_records([
{'exchange': 'US_EXCHANGE_1', 'country_code': 'US'},
{'exchange': 'US_EXCHANGE_2', 'country_code': 'US'},
{'exchange': 'CA_EXCHANGE', 'country_code': 'CA'},
{'exchange': 'JP_EXCHANGE', 'country_code': 'JP'},
])
sids_by_country = {
'US': equities.index[:2 * assets_per_exchange],
'CA': equities.index[
2 * assets_per_exchange:3 * assets_per_exchange
],
'JP': equities.index[3 * assets_per_exchange:],
}
self.write_assets(equities=equities, exchanges=exchanges)
finder = self.asset_finder
all_dates = pd.date_range(
start=first_start,
end=equities.end_date.max(),
freq=trading_day,
)
for dates in all_subindices(all_dates):
expected_with_start_raw = full(
shape=(len(dates), assets_per_exchange),
fill_value=False,
dtype=bool,
)
expected_no_start_raw = full(
shape=(len(dates), assets_per_exchange),
fill_value=False,
dtype=bool,
)
for i, date in enumerate(dates):
it = equities.iloc[:4][['start_date', 'end_date']].itertuples(
index=False,
)
for j, (start, end) in enumerate(it):
# This way of doing the checks is redundant, but very
# clear.
if start <= date <= end:
expected_with_start_raw[i, j] = True
if start < date:
expected_no_start_raw[i, j] = True
for country_codes in powerset(exchanges.country_code.unique()):
expected_sids = pd.Int64Index(sorted(concat(
sids_by_country[country_code]
for country_code in country_codes
)))
permuted_sids = [
sid for sid in sorted(expected_sids, key=permute_sid)
]
tile_count = len(country_codes) + ('US' in country_codes)
expected_with_start = pd.DataFrame(
data=np.tile(
expected_with_start_raw,
tile_count,
),
index=dates,
columns=pd.Int64Index(permuted_sids),
)
result = finder.lifetimes(
dates,
include_start_date=True,
country_codes=country_codes,
)
assert_equal(result.columns, expected_sids)
result = result[permuted_sids]
assert_equal(result, expected_with_start)
expected_no_start = pd.DataFrame(
data=np.tile(
expected_no_start_raw,
tile_count,
),
index=dates,
columns=pd.Int64Index(permuted_sids),
)
result = finder.lifetimes(
dates,
include_start_date=False,
country_codes=country_codes,
)
assert_equal(result.columns, expected_sids)
result = result[permuted_sids]
assert_equal(result, expected_no_start)
def test_sids(self):
# Ensure that the sids property of the AssetFinder is functioning
self.write_assets(equities=make_simple_equity_info(
[0, 1, 2],
pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-01-02'),
))
self.assertEqual({0, 1, 2}, set(self.asset_finder.sids))
def test_group_by_type(self):
equities = make_simple_equity_info(
range(5),
start_date=pd.Timestamp('2014-01-01'),
end_date=pd.Timestamp('2015-01-01'),
)
futures = make_commodity_future_info(
first_sid=6,
root_symbols=['CL'],
years=[2014],
)
# Intersecting sid queries, to exercise loading of partially-cached
# results.
queries = [
([0, 1, 3], [6, 7]),
([0, 2, 3], [7, 10]),
(list(equities.index), list(futures.index)),
]
self.write_assets(
equities=equities,
futures=futures,
)
finder = self.asset_finder
for equity_sids, future_sids in queries:
results = finder.group_by_type(equity_sids + future_sids)
self.assertEqual(
results,
{'equity': set(equity_sids), 'future': set(future_sids)},
)
@parameterized.expand([
(Equity, 'retrieve_equities', EquitiesNotFound),
(Future, 'retrieve_futures_contracts', FutureContractsNotFound),
])
def test_retrieve_specific_type(self, type_, lookup_name, failure_type):
equities = make_simple_equity_info(
range(5),
start_date=pd.Timestamp('2014-01-01'),
end_date=pd.Timestamp('2015-01-01'),
)
max_equity = equities.index.max()
futures = make_commodity_future_info(
first_sid=max_equity + 1,
root_symbols=['CL'],
years=[2014],
)
equity_sids = [0, 1]
future_sids = [max_equity + 1, max_equity + 2, max_equity + 3]
if type_ == Equity:
success_sids = equity_sids
fail_sids = future_sids
else:
fail_sids = equity_sids
success_sids = future_sids
self.write_assets(
equities=equities,
futures=futures,
)
finder = self.asset_finder
# Run twice to exercise caching.
lookup = getattr(finder, lookup_name)
for _ in range(2):
results = lookup(success_sids)
self.assertIsInstance(results, dict)
self.assertEqual(set(results.keys()), set(success_sids))
self.assertEqual(
valmap(int, results),
dict(zip(success_sids, success_sids)),
)
self.assertEqual(
{type_},
{type(asset) for asset in itervalues(results)},
)
with self.assertRaises(failure_type):
lookup(fail_sids)
with self.assertRaises(failure_type):
# Should fail if **any** of the assets are bad.
lookup([success_sids[0], fail_sids[0]])
def test_retrieve_all(self):
equities = make_simple_equity_info(
range(5),
start_date=pd.Timestamp('2014-01-01'),
end_date=pd.Timestamp('2015-01-01'),
)
max_equity = equities.index.max()
futures = make_commodity_future_info(
first_sid=max_equity + 1,
root_symbols=['CL'],
years=[2014],
)
self.write_assets(
equities=equities,
futures=futures,
)
finder = self.asset_finder
all_sids = finder.sids
self.assertEqual(len(all_sids), len(equities) + len(futures))
queries = [
# Empty Query.
(),
# Only Equities.
tuple(equities.index[:2]),
# Only Futures.
tuple(futures.index[:3]),
# Mixed, all cache misses.
tuple(equities.index[2:]) + tuple(futures.index[3:]),
# Mixed, all cache hits.
tuple(equities.index[2:]) + tuple(futures.index[3:]),
# Everything.
all_sids,
all_sids,
]
for sids in queries:
equity_sids = [i for i in sids if i <= max_equity]
future_sids = [i for i in sids if i > max_equity]
results = finder.retrieve_all(sids)
self.assertEqual(sids, tuple(map(int, results)))
self.assertEqual(
[Equity for _ in equity_sids] +
[Future for _ in future_sids],
list(map(type, results)),
)
self.assertEqual(
(
list(equities.symbol.loc[equity_sids]) +
list(futures.symbol.loc[future_sids])
),
list(asset.symbol for asset in results),
)
@parameterized.expand([
(EquitiesNotFound, 'equity', 'equities'),
(FutureContractsNotFound, 'future contract', 'future contracts'),
(SidsNotFound, 'asset', 'assets'),
])
def test_error_message_plurality(self,
error_type,
singular,
plural):
try:
raise error_type(sids=[1])
except error_type as e:
self.assertEqual(
str(e),
"No {singular} found for sid: 1.".format(singular=singular)
)
try:
raise error_type(sids=[1, 2])
except error_type as e:
self.assertEqual(
str(e),
"No {plural} found for sids: [1, 2].".format(plural=plural)
)
class AssetFinderMultipleCountries(WithTradingCalendars, ZiplineTestCase):
def write_assets(self, **kwargs):
self._asset_writer.write(**kwargs)
def init_instance_fixtures(self):
super(AssetFinderMultipleCountries, self).init_instance_fixtures()
conn = self.enter_instance_context(empty_assets_db())
self._asset_writer = AssetDBWriter(conn)
self.asset_finder = AssetFinder(conn)
@staticmethod
def country_code(n):
return 'A' + chr(ord('A') + n)
class TestAssetDBVersioning(ZiplineTestCase):
def init_instance_fixtures(self):
super(TestAssetDBVersioning, self).init_instance_fixtures()
self.engine = eng = self.enter_instance_context(empty_assets_db())
self.metadata = sa.MetaData(eng, reflect=True)
def test_check_version(self):
version_table = self.metadata.tables['version_info']
# This should not raise an error
check_version_info(self.engine, version_table, ASSET_DB_VERSION)
# This should fail because the version is too low
with self.assertRaises(AssetDBVersionError):
check_version_info(
self.engine,
version_table,
ASSET_DB_VERSION - 1,
)
# This should fail because the version is too high
with self.assertRaises(AssetDBVersionError):
check_version_info(
self.engine,
version_table,
ASSET_DB_VERSION + 1,
)
def test_write_version(self):
version_table = self.metadata.tables['version_info']
version_table.delete().execute()
# Assert that the version is not present in the table
self.assertIsNone(sa.select((version_table.c.version,)).scalar())
# This should fail because the table has no version info and is,
# therefore, consdered v0
with self.assertRaises(AssetDBVersionError):
check_version_info(self.engine, version_table, -2)
# This should not raise an error because the version has been written
write_version_info(self.engine, version_table, -2)
check_version_info(self.engine, version_table, -2)
# Assert that the version is in the table and correct
self.assertEqual(sa.select((version_table.c.version,)).scalar(), -2)
# Assert that trying to overwrite the version fails
with self.assertRaises(sa.exc.IntegrityError):
write_version_info(self.engine, version_table, -3)
def test_finder_checks_version(self):
version_table = self.metadata.tables['version_info']
version_table.delete().execute()
write_version_info(self.engine, version_table, -2)
check_version_info(self.engine, version_table, -2)
# Assert that trying to build a finder with a bad db raises an error
with self.assertRaises(AssetDBVersionError):
AssetFinder(engine=self.engine)
# Change the version number of the db to the correct version
version_table.delete().execute()
write_version_info(self.engine, version_table, ASSET_DB_VERSION)
check_version_info(self.engine, version_table, ASSET_DB_VERSION)
# Now that the versions match, this Finder should succeed
AssetFinder(engine=self.engine)
class TestVectorizedSymbolLookup(WithAssetFinder, ZiplineTestCase):
@classmethod
def make_equity_info(cls):
T = partial(pd.Timestamp, tz='UTC')
def asset(sid, symbol, start_date, end_date):
return dict(
sid=sid,
real_sid=str(sid),
currency='USD',
symbol=symbol,
start_date=T(start_date),
end_date=T(end_date),
exchange='NYSE',
)
records = [
asset(1, 'A', '2014-01-02', '2014-01-31'),
asset(2, 'A', '2014-02-03', '2015-01-02'),
asset(3, 'B', '2014-01-02', '2014-01-15'),
asset(4, 'B', '2014-01-17', '2015-01-02'),
asset(5, 'C', '2001-01-02', '2015-01-02'),
asset(6, 'D', '2001-01-02', '2015-01-02'),
asset(7, 'FUZZY', '2001-01-02', '2015-01-02'),
]
return pd.DataFrame.from_records(records)
class TestAssetFinderPreprocessors(WithTmpDir, ZiplineTestCase):
def test_asset_finder_doesnt_silently_create_useless_empty_files(self):
nonexistent_path = self.tmpdir.getpath(self.id() + '__nothing_here')
with self.assertRaises(ValueError) as e:
AssetFinder(nonexistent_path)
expected = "SQLite file {!r} doesn't exist.".format(nonexistent_path)
self.assertEqual(str(e.exception), expected)
# sqlite3.connect will create an empty file if you connect somewhere
# nonexistent. Test that we don't do that.
self.assertFalse(os.path.exists(nonexistent_path))
class TestExchangeInfo(ZiplineTestCase):
def test_equality(self):
a = ExchangeInfo('FULL NAME', 'E', 'US')
b = ExchangeInfo('FULL NAME', 'E', 'US')
assert_equal(a, b)
# same full name but different canonical name
c = ExchangeInfo('FULL NAME', 'NOT E', 'US')
assert_not_equal(c, a)
# same canonical name but different full name
d = ExchangeInfo('DIFFERENT FULL NAME', 'E', 'US')
assert_not_equal(d, a)
# same names but different country
e = ExchangeInfo('FULL NAME', 'E', 'JP')
assert_not_equal(e, a)
def test_repr(self):
e = ExchangeInfo('FULL NAME', 'E', 'US')
assert_equal(repr(e), "ExchangeInfo('FULL NAME', 'E', 'US')")
def test_read_from_asset_finder(self):
sids = list(range(8))
exchange_names = [
'NEW YORK STOCK EXCHANGE',
'NEW YORK STOCK EXCHANGE',
'NASDAQ STOCK MARKET',
'NASDAQ STOCK MARKET',
'TOKYO STOCK EXCHANGE',
'TOKYO STOCK EXCHANGE',
'OSAKA STOCK EXCHANGE',
'OSAKA STOCK EXCHANGE',
]
equities = pd.DataFrame({
'sid': sids,
'real_sid': [str(sid) for sid in sids],
'currency': ['USD'] * len(sids),
'exchange': exchange_names,
'symbol': [chr(65 + sid) for sid in sids],
})
exchange_infos = [
ExchangeInfo('NEW YORK STOCK EXCHANGE', 'NYSE', 'US'),
ExchangeInfo('NASDAQ STOCK MARKET', 'NYSE', 'US'),
ExchangeInfo('TOKYO STOCK EXCHANGE', 'JPX', 'JP'),
ExchangeInfo('OSAKA STOCK EXCHANGE', 'JPX', 'JP'),
]
exchange_info_table = pd.DataFrame(
[
(info.name, info.canonical_name, info.country_code)
for info in exchange_infos
],
columns=['exchange', 'canonical_name', 'country_code'],
)
expected_exchange_info_map = {
info.name: info for info in exchange_infos
}
ctx = tmp_asset_finder(
equities=equities,
exchanges=exchange_info_table,
)
with ctx as af:
actual_exchange_info_map = af.exchange_info
assets = af.retrieve_all(sids)
assert_equal(actual_exchange_info_map, expected_exchange_info_map)
for asset in assets:
expected_exchange_info = expected_exchange_info_map[
exchange_names[asset.sid]
]
assert_equal(asset.exchange_info, expected_exchange_info)
class TestWrite(WithInstanceTmpDir, ZiplineTestCase):
def init_instance_fixtures(self):
super(TestWrite, self).init_instance_fixtures()
self.assets_db_path = path = os.path.join(
self.instance_tmpdir.path,
'assets.db',
)
self.writer = AssetDBWriter(path)
def new_asset_finder(self):
return AssetFinder(self.assets_db_path)
def test_write_multiple_exchanges(self):
# Incrementing by two so that start and end dates for each
# generated Asset don't overlap (each Asset's end_date is the
# day after its start date).
dates = pd.date_range('2013-01-01', freq='2D', periods=5, tz='UTC')
sids = list(range(5))
df = pd.DataFrame.from_records(
[
{
'sid': sid,
'real_sid': str(sid),
'currency': 'USD',
'symbol': str(sid),
'start_date': date.value,
'end_date': (date + timedelta(days=1)).value,
# Change the exchange with each mapping period. We don't
# currently support point in time exchange information,
# so we just take the most recent by end date.
'exchange': 'EXCHANGE-%d-%d' % (sid, n),
}
for n, date in enumerate(dates)
for sid in sids
]
)
self.writer.write(equities=df)
reader = self.new_asset_finder()
equities = reader.retrieve_all(reader.sids)
for eq in equities:
expected_exchange = 'EXCHANGE-%d-%d' % (eq.sid, len(dates) - 1)
assert_equal(eq.exchange, expected_exchange)
def test_write_direct(self):
# don't include anything with a default to test that those work.
equities = pd.DataFrame({
'sid': [0, 1],
'real_sid': ['0', '1'],
'currency': ['USD', 'CAD'],
'asset_name': ['Ayy Inc.', 'Lmao LP'],
# the full exchange name
'exchange': ['NYSE', 'TSE'],
})
equity_symbol_mappings = pd.DataFrame({
'sid': [0, 1],
'symbol': ['AYY', 'LMAO'],
'company_symbol': ['AYY', 'LMAO'],
'share_class_symbol': ['', ''],
})
exchanges = pd.DataFrame({
'exchange': ['NYSE', 'TSE'],
'country_code': ['US', 'JP'],
})
self.writer.write_direct(
equities=equities,
equity_symbol_mappings=equity_symbol_mappings,
exchanges=exchanges,
)
reader = self.new_asset_finder()
equities = reader.retrieve_all(reader.sids)
expected_equities = [
Equity(
0,
'0',
ExchangeInfo('NYSE', 'NYSE', 'US'),
currency='USD',
symbol='AYY',
asset_name='Ayy Inc.',
start_date=pd.Timestamp(0, tz='UTC'),
end_date=pd.Timestamp.max.tz_localize('UTC'),
first_traded=None,
auto_close_date=None,
tick_size=0.01,
multiplier=1.0,
),
Equity(
1,
'1',
ExchangeInfo('TSE', 'TSE', 'JP'),
currency='CAD',
symbol='LMAO',
asset_name='Lmao LP',
start_date=pd.Timestamp(0, tz='UTC'),
end_date=pd.Timestamp.max.tz_localize('UTC'),
first_traded=None,
auto_close_date=None,
tick_size=0.01,
multiplier=1.0,
)
]
assert_equal(equities, expected_equities)
exchange_info = reader.exchange_info
expected_exchange_info = {
'NYSE': ExchangeInfo('NYSE', 'NYSE', 'US'),
'TSE': ExchangeInfo('TSE', 'TSE', 'JP'),
}
assert_equal(exchange_info, expected_exchange_info)
|
StarcoderdataPython
|
8042191
|
# =================================================
# <EMAIL>
# 11. 이미지 변환 - 리사이징, 이동, 회전, 원근효과
# Reference : <EMAIL>
# =================================================
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
# A. Image Resizing
def transform_resize():
img = cv.imread('../Images/11.Ji.jpg')
h, w = img.shape[:2]
img2 = cv.resize(img, None, fx=0.5, fy=1, interpolation=cv.INTER_AREA)
img3 = cv.resize(img, None, fx=1, fy=0.5, interpolation=cv.INTER_AREA)
img4 = cv.resize(img, None, fx=0.5, fy=0.5, interpolation=cv.INTER_AREA)
cv.imshow('original', img)
cv.imshow('fx=0.5', img2)
cv.imshow('fy=0.5', img3)
cv.imshow('fx=0.5, fy=0.5', img4)
cv.waitKey(0)
cv.destroyAllWindows()
#transform_resize()
# B. Image Shift
def transform_shift():
img = cv.imread('../Images/11.Ji.jpg')
h, w = img.shape[:2]
M1 = np.float32([[1, 0, 100], [0, 1, 50]])
M2 = cv.getRotationMatrix2D((w/2, h/2), 45, 1)
M3 = cv.getRotationMatrix2D((w/2, h/2), 90, 1)
img2 = cv.warpAffine(img, M1, (w, h))
img3 = cv.warpAffine(img, M2, (w, h))
img4 = cv.warpAffine(img, M3, (w, h))
cv.imshow('original', img)
cv.imshow('shift image', img2)
cv.imshow('45-rotate image', img3)
cv.imshow('90-rotate image', img4)
cv.waitKey(0)
cv.destroyAllWindows()
#transform_shift()
# C. Image Forced
def transform_forced():
img = cv.imread('../Images/12.sudoku.png')
h, w = img.shape[:2]
pts1_a = np.float32([[50, 50], [200, 50], [20, 200]])
pts2_a = np.float32([[10, 100], [200, 50], [100, 250]])
M1 = cv.getAffineTransform(pts1_a, pts2_a)
img2 = cv.warpAffine(img, M1, (w, h))
pts1_p = np.float32([[0, 0], [300, 0], [0, 300], [300, 300]])
pts2_p = np.float32([[56, 65], [368, 52], [28, 387], [389, 390]])
M2 = cv.getPerspectiveTransform(pts1_p, pts2_p)
img3 = cv.warpPerspective(img, M2, (w, h))
cv.imshow('original', img)
cv.imshow('Affine transform', img2)
cv.imshow('Perspective transform', img3)
cv.waitKey(0)
cv.destroyAllWindows()
transform_forced()
|
StarcoderdataPython
|
68796
|
import tensorflow as tf
import os
import time
import numpy as np
import glob
import matplotlib.pyplot as plt
import PIL
import imageio
import argparse
import dataset as dt
INPUT_SHAPE = (32, 32, 1)
tf.random.set_seed(777)
NORM_LIST = ["interframe_minmax", "est_minmax", "zscore"]
class ConvVAE(tf.keras.Model):
def __init__(self, latent_dim):
super(ConvVAE, self).__init__()
self.latent_dim = latent_dim
self.inference_net = tf.keras.Sequential(
[
tf.keras.layers.InputLayer(input_shape=INPUT_SHAPE),
tf.keras.layers.Conv2D(
filters=32, kernel_size=3, strides=(2, 2), activation='relu'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(
filters=64, kernel_size=3, strides=(2, 2), activation='relu'),
tf.keras.layers.Flatten(),
# No activation
tf.keras.layers.Dense(latent_dim + latent_dim),
]
)
self.generative_net = tf.keras.Sequential(
[
tf.keras.layers.InputLayer(input_shape=(latent_dim,)),
tf.keras.layers.Dense(units=8*8*32, activation=tf.nn.relu),
tf.keras.layers.Reshape(target_shape=(8, 8, 32)),
tf.keras.layers.Conv2DTranspose(
filters=64,
kernel_size=3,
strides=(2, 2),
padding="SAME",
activation='relu'),
tf.keras.layers.Conv2DTranspose(
filters=32,
kernel_size=3,
strides=(2, 2),
padding="SAME",
activation='relu'),
# No activation
tf.keras.layers.Conv2DTranspose(
filters=1, kernel_size=3, strides=(1, 1), padding="SAME"),
]
)
@tf.function
def sample(self, eps=None):
if eps is None:
eps=tf.random.normal(shape=(100, self.latent_dim))
return self.decode(eps, apply_sigmoid=False)
def encode(self, x):
mean, logvar=tf.split(self.inference_net(
x), num_or_size_splits=2, axis=1)
return mean, logvar
def reparameterize(self, mean, logvar):
eps=tf.random.normal(shape=mean.shape)
return eps * tf.exp(logvar * .5) + mean
def decode(self, z, apply_sigmoid=False):
logits=self.generative_net(z)
if apply_sigmoid:
probs=tf.sigmoid(logits)
return probs
return logits
#END OF CLASS
def log_normal_pdf(sample, mean, logvar, raxis=1):
log2pi=tf.math.log(2. * np.pi)
return tf.reduce_sum(
-.5 * ((sample - mean) ** 2. * \
tf.exp(-logvar) + logvar + log2pi),
axis=raxis)
@tf.function
def compute_loss(model, x):
mean, logvar=model.encode(x)
z=model.reparameterize(mean, logvar)
x_logit=model.decode(z)
cross_ent=tf.nn.sigmoid_cross_entropy_with_logits(
logits=x_logit, labels=x)
logpx_z=-tf.reduce_sum(cross_ent, axis=[1, 2, 3])
logpz=log_normal_pdf(z, 0., 0.)
logqz_x=log_normal_pdf(z, mean, logvar)
return -tf.reduce_mean(logpx_z + logpz - logqz_x)
@tf.function
def compute_apply_gradients(model, x, optimizer):
with tf.GradientTape() as tape:
loss=compute_loss(model, x)
gradients=tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(
zip(gradients, model.trainable_variables))
def generate_and_save_images(model, epoch, test_input, directory, title):
predictions = model.sample(test_input)
fig = plt.figure(figsize=(4,4))
plt.suptitle(title)
for i in range(predictions.shape[0]):
plt.subplot(4, 4, i+1)
plt.imshow(predictions[i, :, :, 0], cmap='jet')
plt.axis('off')
# tight_layout minimizes the overlap between 2 sub-plots
plt.savefig(os.path.join(directory, 'image_at_epoch_{:04d}.png'.format(epoch)))
plt.close(fig)
def plot_ELBO(train_elbo_log, test_elbo_log, model_dir, prefix="", suffix=""):
plt.plot(np.array(train_elbo_log), ls='-', color='blue')
plt.plot(np.array(test_elbo_log), ls='--', color='blue')
plt.title('model ELBO')
plt.ylabel('ELBO')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.savefig(os.path.join(model_dir, prefix+"model_ELBO"+suffix+".png"))
plt.close()
return
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--output_dir',
type=str,
default="output",
help='Path to the output folder')
parser.add_argument('--tmp_dir',
type=str,
default="tmp",
help='Path to the tmp files folder')
parser.add_argument('--epochs',
type=int,
default=250,
help='How many epochs to train.')
parser.add_argument('--norm',
type=str,
default="interframe_minmax",
help='Normalization method.')
parser.add_argument('--lr',
type=float,
default=1e-4,
help='Learning rate.')
parser.add_argument('--num_examples_to_generate',
type=int,
default=16,
help='How many examples to genereate in visualization gif.')
parser.add_argument('--latent_dim',
type=int,
default=100,
help='How many examples to genereate in visualization gif.')
parser.add_argument('--prefix',
type=str,
default="",
help='Prefix to identify the files.')
parser.add_argument('--suffix',
type=str,
default="",
help='Prefix to identify the files.')
parser.add_argument('--min',
type=float,
default=None,
help='Estimate of min temp.')
parser.add_argument('--max',
type=float,
default=None,
help='Estimate of max temp.')
FLAGS, unparsed = parser.parse_known_args()
def make_sure_path_exists(dir):
if not os.path.exists(dir):
os.mkdir(dir)
make_sure_path_exists(FLAGS.tmp_dir)
make_sure_path_exists(FLAGS.output_dir)
filenames = glob.glob(os.path.join(FLAGS.tmp_dir,'image*.png'))
for filename in filenames:
os.remove(filename)
if FLAGS.norm not in NORM_LIST:
raise ValueError
directory_path, ids = dt.DATASETS["20200131"]["filepath"], dt.DATASETS["20200131"]["ids"]
dataset = dt.Dataloader_RAM(directory_path, ids)
processor = dt.Processor()
data = dataset.load()
data = processor.align_timestamps(data) # align frames ()
data = processor.retime(data, step = 3)
train_images = np.vstack(data[0][0])
test_images = np.vstack(data[0][1])
train_images = train_images.reshape(train_images.shape[0], *INPUT_SHAPE).astype('float32')
test_images = test_images.reshape(test_images.shape[0], *INPUT_SHAPE).astype('float32')
#normaliation
def minmax_norm(images, min = None, max = None):
#interframe normalization, the set is assumed to come from the same recording here!
if not min:
min = images.min()
if not max:
max = images.max()
return (images-min)/(max-min)
def zscore(images, mean, std):
if not mean:
mean = images.mean()
if not std:
std = images.std()
return (images - mean)/std
def normalize_sets(norm : str, train_set, test_set, min = None, max = None):
mean = None
std = None
if (norm == "interframe_minmax") or (norm == "est_minmax"):
return minmax_norm(train_set, min, max), minmax_norm(test_set, min, max)
if norm == "zscore":
if not (mean or std):
tmp_stack = np.vstack([train_images, test_images])
if not mean:
mean = tmp_stack.mean()
if not std:
std = tmp_stack.std()
#mean = 0 for the project.
return zscore(train_set, 0, std)/10, zscore(test_set, 0, std)/10
return None
train_images, test_images = normalize_sets(FLAGS.norm, train_images, test_images, FLAGS.min, FLAGS.max)
TRAIN_BUF = 60000
BATCH_SIZE = 2300
TEST_BUF = 10000
train_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(TRAIN_BUF).batch(BATCH_SIZE)
test_dataset = tf.data.Dataset.from_tensor_slices(test_images).shuffle(TEST_BUF).batch(BATCH_SIZE)
optimizer=tf.keras.optimizers.Adam(FLAGS.lr)
random_vector_for_generation=tf.random.normal(
shape=[FLAGS.num_examples_to_generate, FLAGS.latent_dim])
model=ConvVAE(FLAGS.latent_dim)
gasi_title_template = "Normalization: {} \n LR {} LS {} Epoch {}"
gasi_title = gasi_title_template.format(FLAGS.norm, str(FLAGS.lr), str(FLAGS.latent_dim), str(0))
generate_and_save_images(model, 0, random_vector_for_generation, FLAGS.tmp_dir, title=gasi_title)
train_loss_log = []
test_loss_log = []
for epoch in range(1, FLAGS.epochs + 1):
start_time = time.time()
train_loss = tf.keras.metrics.Mean()
for train_x in train_dataset:
compute_apply_gradients(model, train_x, optimizer)
train_loss(compute_loss(model, train_x))
train_elbo = -train_loss.result()
end_time = time.time()
if epoch % 1 == 0:
loss = tf.keras.metrics.Mean()
for test_x in test_dataset:
loss(compute_loss(model, test_x))
elbo = -loss.result()
print('Epoch: {}, Train set ELBO: {}. Test set ELBO: {}, '
'time elapse for current epoch {}'.format(epoch, train_elbo,
elbo,
end_time - start_time))
gasi_title = gasi_title_template.format(FLAGS.norm, str(FLAGS.lr), str(FLAGS.latent_dim), str(epoch))
generate_and_save_images(
model, epoch, random_vector_for_generation, FLAGS.tmp_dir, title=gasi_title)
if (np.any(np.isnan(train_loss_log)) or np.any(np.isnan(test_loss_log))):
break
train_loss_log.append(train_elbo)
test_loss_log.append(elbo)
plot_ELBO(train_loss_log, test_loss_log, FLAGS.output_dir, FLAGS.prefix, FLAGS.suffix)
anim_file = os.path.join(FLAGS.output_dir, FLAGS.prefix+'convVAE'+FLAGS.suffix+'.gif')
with imageio.get_writer(anim_file, mode='I') as writer:
filenames = glob.glob(os.path.join(FLAGS.tmp_dir,'image*.png'))
filenames = sorted(filenames)
last = -1
for i,filename in enumerate(filenames):
frame = 2*(i**0.5)
if round(frame) > round(last):
last = frame
else:
continue
image = imageio.imread(filename)
writer.append_data(image)
image = imageio.imread(filename)
writer.append_data(image)
|
StarcoderdataPython
|
276816
|
<gh_stars>0
# minesweeper.py - Minesweeper
from random import randint
from colorama import Fore, Back, Style, init, deinit
from re import compile, match
class Board(object):
'''
A minesweeper board loaded with mines and numbers.
Properties:
height: An integer for the board's height / number of rows
width: An integer for the board's width / number of columns
mines: An integer for the number of mines
matrix: A 2D list to represent the board
goal: An integer copy for the number of mines
lastMove: A string for the last successful move made
alphabets: A dictionary for conversion between alphabets and numbers
'''
_alphabets = { 1: 'A', 2: 'B', 3: 'C', 4: 'D', 5: 'E', 6: 'F', 7: 'G', 8: 'H', 9: 'I', 10: 'J',
11: 'K', 12: 'L', 13: 'M', 14: 'N', 15: 'O', 16: 'P', 17: 'Q', 18: 'R', 19: 'S', 20: 'T',
21: 'U', 22: 'V', 23: 'W', 24: 'X', 25: 'Y', 26: 'Z'}
def __init__(self, height=1, width=2, mines=1):
self.height = height
self.width = width
self.mines = mines
# Generate matrix
self._matrix = []
for row in range(self._height):
self._matrix.append([])
for col in range(self._width):
self._matrix[row].append(Tile())
# Load with mines
minesLeft = self._mines
if minesLeft != 0:
done = False
while not done:
for row in range(self._height):
for col in range(self._width):
if self._matrix[row][col].content == -1:
continue
if randint(1, 10) == 1:
self._matrix[row][col].content = -1
minesLeft -= 1
if minesLeft == 0:
done = True
break
if done:
break
self._goal = self._mines
# Load with numbers
for row in range(self._height):
for col in range(self._width):
if self._matrix[row][col].content == -1:
continue
self._matrix[row][col].content = self._calculateNumber(row, col)
self._lastMove = 'Started game'
@property
def height(self):
return self._height
@height.setter
def height(self, value):
if value <= 0:
value = 1
self._height = value
@property
def width(self):
return self._width
@width.setter
def width(self, value):
# Board must have at least 2 squares
if value <= 1:
if self._height == 1:
value = 2
else:
value = 1
elif value > 50:
value = 50
self._width = value
@property
def mines(self):
return self._mines
@mines.setter
def mines(self, value):
if value < 0:
value = 1
elif value >= (self._height * self._width):
value = (self._height * self._width) - 1
self._mines = value
def printBoard(self):
'''Print the minesweeper board according to square status'''
init(autoreset=True)
rows = range(1, self._height + 1)
columns = range(1, self._width + 1)
print(' ', end='')
for col in columns:
print(Style.BRIGHT + Fore.GREEN + str(col).rjust(2), end=' ')
print()
for row in rows:
print(' ' + '+--' * self._width, end='+\n')
print(Style.BRIGHT + Fore.RED + Board._alphabets[row].rjust(2), end=' ')
for col in columns:
status = self._matrix[row - 1][col - 1].status
content = self._matrix[row - 1][col - 1].content
highlight = self._matrix[row - 1][col - 1].highlight
print('|', end='')
if status == 0:
print(Back.CYAN + ' ' + ' ', end='') # Closed tile
elif status == -1:
print(Style.BRIGHT + Back.CYAN + Fore.WHITE + ' ' + '#', end='') # Flag
else:
if content == -1:
mineString = Style.BRIGHT + Fore.WHITE
if highlight == 1:
mineString += Back.GREEN
elif highlight == -1:
mineString += Back.RED
print(mineString + ' ' + '*', end='') # Mine
else: # Open tile / numbers
numberString = Style.BRIGHT
if content == 0:
print(numberString + ' ' + ' ', end='')
continue
if content == 1:
numberString += Fore.YELLOW
elif content == 2:
numberString += Fore.MAGENTA
elif content == 3:
numberString += Fore.GREEN
elif content == 4:
numberString += Fore.RED
elif content == 5:
numberString += Fore.CYAN
elif content == 6:
numberString = Style.NORMAL + Fore.YELLOW
elif content == 7:
numberString = Style.NORMAL + Fore.WHITE
elif content == 8:
numberString += Fore.BLACK
print(numberString + ' ' + str(content), end='')
print('|')
print(' ' + '+--' * self._width, end='+\n')
deinit()
def checkMove(self, coordinate):
'''Return True or False after validating the move'''
alphabetRegex = compile(r'[a-zA-Z]')
numberRegex = compile(r'[1-9]|[1-9]\d')
n = 0
if coordinate[0] == '/':
n = 1
if alphabetRegex.match(coordinate[n]) and numberRegex.match(coordinate[n + 1:]) and coordinate[n + 1:].isdigit():
row, col = Board._convert(coordinate[n:])
if row == -1 or row > (self._height - 1) or col > (self._width - 1):
print('Move is out of range.')
return False # Out of range
else:
status = self._matrix[row][col].status
if status == 1:
print('Tile is already open.')
return False # Already open
else:
if n == 0:
if status == -1:
print('Flagged tile cannot be opened.')
return False # Already flagged
else:
return True
else:
if status == 0:
if self._mines != 0:
return True
else:
print('Out of flags.')
return False # Out of flags
else:
return True
else:
print('Invalid format.')
return False # Wrong format
def executeMove(self, coordinate):
'''Return True if move was good, and False if otherwise'''
flag = True
if coordinate[0] == '/':
row, col = Board._convert(coordinate[1:])
else:
row, col = Board._convert(coordinate)
flag = False
if flag:
if self._matrix[row][col].status == 0: # Flag
self._matrix[row][col].status = -1
self._mines -= 1
self._lastMove = 'Flagged ' + coordinate[1:].upper()
else:
self._matrix[row][col].status = 0 # Unflag
self._mines += 1
self._lastMove = 'Unflagged ' + coordinate[1:].upper()
return True
else:
if self._matrix[row][col].content == -1: # Open mine
self._matrix[row][col].status = 1
print('That was a mine.')
return False
else: # Open adjacent tiles with recursion
self._openTile(row, col)
self._lastMove = 'Opened ' + coordinate.upper()
return True
def getLastMove(self):
'''Return the last move'''
return self._lastMove
def checkWin(self):
'''Return True if game has been won, otherwise False'''
count = 0
for row in range(self._height):
for col in range(self._width):
if self._matrix[row][col].status == 0 or self._matrix[row][col].status == -1:
count += 1
if count > self._goal:
return False
return True
def revealBoard(self):
'''Reveal location of all mines on the board, and highlight mistakes'''
for i in range(self._height):
for j in range(self._width):
if self._matrix[i][j].content == -1:
if self._matrix[i][j].status == -1: # Flagged mine
self._matrix[i][j].highlight = 1
else: # Unflagged mine
self._matrix[i][j].highlight = -1
self._matrix[i][j].status = 1
self.printBoard()
def _convert(coordinate):
'''Convert coordinate to row and column integers'''
alphabet = coordinate[0].upper()
row = -1
for key in Board._alphabets:
if alphabet == Board._alphabets[key]:
row = key - 1
break
col = int(coordinate[1:]) - 1
return (row, col)
def _openTile(self, row, col):
'''Open one tile'''
if self._matrix[row][col].status == 1 or self._matrix[row][col].status == -1:
return # Stop when tile is already open or flagged
else:
self._matrix[row][col].status = 1
if self._matrix[row][col].content != 0:
return # Stop when tile has number 1 - 8
if row != 0:
self._openTile(row - 1, col) # Top
if col != 0:
self._openTile(row - 1, col -1) # Top left
if col != (self._width - 1):
self._openTile(row - 1, col + 1) # Top right
if row != (self._height - 1):
self._openTile(row + 1, col) # Bottom
if col != 0:
self._openTile(row + 1, col -1) # Bottom left
if col != (self._width - 1):
self._openTile(row + 1, col + 1) # Bottom right
if col != 0:
self._openTile(row, col - 1) # Left
if col != (self._width - 1):
self._openTile(row, col + 1) # Right
def _calculateNumber(self, row, col):
'''Return the number of mines adjacent to the coordinate'''
number = 0
if row != 0:
if self._matrix[row - 1][col].content == -1: # Top
number += 1
if col != 0: # Top left
if self._matrix[row - 1][col - 1].content == -1:
number += 1
if col != (self._width - 1): # Top right
if self._matrix[row - 1][col + 1].content == -1:
number += 1
if row != (self._height - 1):
if self._matrix[row + 1][col].content == -1: # Bottom
number += 1
if col != 0: # Bottom left
if self._matrix[row + 1][col - 1].content == -1:
number += 1
if col != (self._width - 1): # Bottom right
if self._matrix[row + 1][col + 1].content == -1:
number += 1
if col != 0:
if self._matrix[row][col - 1].content == -1: # Left
number += 1
if col != (self._width - 1):
if self._matrix[row][col + 1].content == -1: # Right
number += 1
return number
class Tile(object):
'''
A tile on a minesweeper board.
Properties:
content: An integer indicating the content the tile holds
status: An integer indicating the status of the tile
highlight: An integer indicating good or bad play
'''
def __init__(self, content=0, status=0):
self.content = content
self.status = status
self.highlight = 0 # 0: Default, 1: Good, -1: Bad
@property
def content(self):
'''-1: Mine, 0 - 8: Number of adjacent mines'''
return self._content
@content.setter
def content(self, value):
if value < -1 or value > 8:
value = 0
self._content = value
@ property
def status(self):
'''-1: Flagged, 0: Closed, 1: Open'''
return self._status
@status.setter
def status(self, value):
if value < -1 or value > 1:
value = 0
self._status = value
print(' ======================= ')
print(' M I N E S W E E P E R ')
print(' ======================= ')
print()
print('Instructions: ')
print()
print('* Type "A1" to open A1. ')
print('* An open tile which has a number indicates the number of mines')
print(' adjacent to it.')
print('* Type "/A1" to flag A1 if it is unflagged, or unflag A1 if it ')
print(' is already flaged.')
print('* Flagging a tile merely marks it as a reminder that a mine may')
print(' be there.')
print('* The game is lost as soon as a tile with mine is opened.')
print('* The game is won when all the tiles have been opened except a ')
print(' number equivalent to the number of mines remains.')
print('* At the end of the game, the tiles with mines that have been ')
print(' correctly flagged will be highlighted in green, while those ')
print(' that incorrectly flagged or unflagged will be highlighted in ')
print(' red.')
print('* Type "restart" to restart the game.')
print('* Type "quit" to quit the game.')
print()
while True:
quit = False
restart = False
print(' Mode Height Width Number of Mines')
print(' ------------ -------- -------- ---------------')
print('0: Beginner 9 9 10 ')
print('1: Intermediate 16 16 40 ')
print('2: Expert 16 30 99 ')
print('3: Custom Min: 1 Min: 1 Min: 1 ')
print(' Max: 26 Max: 50 Max: H x W - 1 ')
# Prompt mode
while True:
mode = input('\nSelect mode: ')
if mode not in '0 1 2 3'.split():
print('Invalid mode.')
else:
break
if mode == '0':
mode = 'Beginner'
board = Board(9, 9, 10)
elif mode == '1':
mode = 'Intermediate'
board = Board(16, 16, 40)
elif mode == '2':
mode = 'Expert'
board = Board(16, 30, 99)
else:
mode = 'Custom'
height = promptInteger('Enter height: ')
width = promptInteger('Enter width: ')
mines = promptInteger('Enter mines: ')
if mines == 0:
mines = 1
board = Board(height, width, mines)
lost = False
while True:
print('\n---' + '---' * board.width, end='----\n\n')
print('Mode: %s Mines Left: %d\n' % (mode, board.mines))
# Print board
board.printBoard()
# Print last move
print('\nLast Move: ' + board.getLastMove())
# Prompt move
move = ''
while True:
move = input('\n>> ')
if move.lower() == 'quit':
quit = True
break
if move.lower() == 'restart':
restart = True
break
if board.checkMove(move):
break
if quit:
break
if restart:
break
# Execute move
if not board.executeMove(move):
lost = True
break
# Check winning condition
if board.checkWin():
break
if quit:
break
if restart:
print()
continue
# Reveal board
print()
board.revealBoard()
if lost:
print('\nYou lost. Better luck next time.')
else:
print('\nCongratulations. You won!')
# Prompt again
while True:
again = input('\nPlay again? (Y/N) ').upper()
if again not in 'Y N'.split():
print('Invalid response.')
else:
break
if again == 'N':
break
print()
print()
print(' ======================================= ')
print(' Thank you for playing. See you again! ')
print(' ======================================= ')
|
StarcoderdataPython
|
285562
|
<reponame>zerodine/krypton<filename>krypton/hkpserver/libs/gossip/gossiptask.py
from krypton.hkpserver.libs.recon import ReconPartner, Recon
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2013, Zerodine GmbH (zerodine.com) "
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "Apache-2.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import logging
from tornado import httpclient
from tornado.httputil import HTTPHeaders
import re
import urllib
class GossipTask(object):
name = "Gossip Syncronisation Task"
keyId = None
gossipServers = None
asciiArmoredKey = None
logger = logging.getLogger("krypton.gossip")
numberOfTries = 0
maxNumberOfTries = 5
givingUp = False
gpgModel = None
TASK_DISTRIBUTEKEY = 1
TASK_SEARCHKEY = 2
TASK_RECON = 3
task = None
def __init__(self, task, keyId=None, gossipServers=None, asciiArmoredKey=None, gpgModel=None):
self.task = task
self.keyId = keyId
self.asciiArmoredKey = asciiArmoredKey
self.gossipServers = gossipServers
self.gpgModel = gpgModel
def doWork(self):
x = None
if self.task == self.TASK_DISTRIBUTEKEY:
x = self._doTaskDistributeKey()
elif self.task == self.TASK_SEARCHKEY:
x = self._doTaskSearchKey()
elif self.task == self.TASK_RECON:
x = self._doTaskRecon()
if x:
return x
return self._handleFail()
def _doTaskRecon(self):
for g in self.gossipServers.getAll():
partner1 = ReconPartner(url="http://localhost:8888/pks", model=None)
partner2 = ReconPartner(url="http://%s:%s/pks" % (g["host"], g["port"]), model=None)
r = Recon()
stats = r.syncPartners(
reconPartner1=partner1,
reconPartner2=partner2,
)
if stats is None:
self.logger.info("Could not initiate Reconciliation with partners %s and %s" % (partner1, partner2))
return False
if stats["changesTotal"]:
self.logger.info("Done Reconciliation with remote %s -> %s" % (partner2, str(stats)))
else:
self.logger.info("No Data to reconcile with partner1 %s to partner2 %s" % (partner1, partner2))
return True
def _doTaskSearchKey(self):
self.logger.info("Trying to get key %s" % self.keyId)
url = "http://pool.sks-keyservers.net:11371/pks/lookup?op=get&search=0x%s&options=mr" % self.keyId
http_client = httpclient.HTTPClient()
http_request = httpclient.HTTPRequest(url=url)
http_request.headers = (HTTPHeaders({"content-type": "application/pgp-keys"}))
try:
response = http_client.fetch(http_request)
except httpclient.HTTPError, e:
self.logger.warning("Problem while getting key %s (%s)" % (self.keyId, str(e)))
return False
key = re.search("-----BEGIN PGP PUBLIC KEY BLOCK.*END PGP PUBLIC KEY BLOCK-----",
response.body, re.I | re.S | re.M).group(0)
if key:
if self.gpgModel.uploadKey(asciiArmoredKey=key, force=True, externalUpload=True):
self.logger.info("Successfully imported key %s" % self.keyId)
else:
self.logger.info("Error while importing key %s. Please see the logs" % self.keyId)
return False
def _doTaskDistributeKey(self):
server = self.gossipServers.getRandom()
http_client = httpclient.HTTPClient()
http_request = httpclient.HTTPRequest(url="http://%s:%i/pks/add" % (server["host"], int(server["port"])))
post_data = {'keytext': self.asciiArmoredKey}
http_request.method = "POST"
http_request.body = urllib.urlencode(post_data)
try:
response = http_client.fetch(http_request)
if int(response.code) != 200:
return False
except httpclient.HTTPError, e:
self.logger.warning("Problem while sending key %s to other keyserver: %s" % (self.keyId, str(e)))
return False
return True
def _handleFail(self):
self.numberOfTries += 1
if self.numberOfTries >= self.maxNumberOfTries:
self.givingUp = True
return False
|
StarcoderdataPython
|
8151320
|
<reponame>pplotn/team6
from F_modules import *
# Plotting
def numstr(x):
string = str('{0:.2f}'.format(x))
return string
def F_nrms(mat,mat_true):
nrms = np.linalg.norm((mat-mat_true),ord=2)/np.linalg.norm(mat_true,ord=2)
return nrms
def F_r2(mat,mat_true):
r2=1- (np.std(mat_true.flatten()-mat.flatten()) / np.std(mat_true.flatten()) )
v1=mat.flatten()
v2=mat_true.flatten()
r2_2=r2_score(v1,v2)
return r2_2
def tight_figure(fig,**kwargs):
canvas = fig.canvas._get_output_canvas("png")
print_method = getattr(canvas, 'print_png')
print_method(io.BytesIO(), dpi=fig.dpi,
facecolor=fig.get_facecolor(), dryrun=True)
renderer = fig._cachedRenderer
bbox_inches = fig.get_tightbbox(renderer)
bbox_artists = fig.get_default_bbox_extra_artists()
bbox_filtered = []
for a in bbox_artists:
bbox = a.get_window_extent(renderer)
if a.get_clip_on():
clip_box = a.get_clip_box()
if clip_box is not None:
bbox = Bbox.intersection(bbox, clip_box)
clip_path = a.get_clip_path()
if clip_path is not None and bbox is not None:
clip_path = \
clip_path.get_fully_transformed_path()
bbox = Bbox.intersection(
bbox, clip_path.get_extents())
if bbox is not None and (
bbox.width != 0 or bbox.height != 0):
bbox_filtered.append(bbox)
if bbox_filtered:
_bbox = Bbox.union(bbox_filtered)
trans = Affine2D().scale(1.0 / fig.dpi)
bbox_extra = TransformedBbox(_bbox, trans)
bbox_inches = Bbox.union([bbox_inches, bbox_extra])
pad = kwargs.pop("pad_inches", None)
if pad is None:
pad = plt.rcParams['savefig.pad_inches']
bbox_inches = bbox_inches.padded(pad)
tight_bbox.adjust_bbox(fig, bbox_inches, canvas.fixed_dpi)
w = bbox_inches.x1 - bbox_inches.x0
h = bbox_inches.y1 - bbox_inches.y0
fig.set_size_inches(w,h)
def Plot_image(Data, Title='Title', c_lim='',x='',x_label='',y='',y_label='',
dx='',dy='',Save_flag=0,Save_pictures_path='./Pictures',
Reverse_axis=1,Curve='',Show_flag=1,Aspect='equal'):
# aspect - 'auto'
if c_lim == '': c_lim =[np.min(Data), np.max(Data)]
if x == '': x=(np.arange(np.shape(Data)[1]))
if y == '': y=(np.arange(np.shape(Data)[0]))
if dx != '': x=(np.arange(np.shape(Data)[1]))*dx
if dy != '': y=(np.arange(np.shape(Data)[0]))*dy
extent = [x.min(), x.max(), y.min(), y.max()]
#if Save_flag==1:
# plt.ion()
fig=plt.figure()
fig.dpi=330
# fig_size = plt.rcParams["figure.figsize"]
# fig_size[0] = 10.4
# fig_size[1] = 8.0
# plt.rcParams["figure.figsize"] = fig_size
plt.set_cmap('RdBu_r')
# plt.axis(extent, Aspect)
# plt.axis(extent, 'auto')
plt.title(Title)
if Reverse_axis == 1:
plt.imshow(np.flipud(Data), extent=extent, interpolation='nearest',aspect=Aspect)
plt.gca().invert_yaxis()
else:
plt.imshow((Data), extent=extent, interpolation='nearest',aspect=Aspect)
if Curve != '':
# if len(np.shape(Curve)) == 2:
# Curve=Curve[0,:]
plt.plot(x, Curve, color='white', linewidth=1.2, linestyle='--')
ax = plt.gca()
divider1 = make_axes_locatable((ax))
cax1 = divider1.append_axes("right", size="2%", pad=0.05)
cbar=plt.colorbar(cax=cax1)
plt.clim(c_lim)
plt.ylabel(y_label)
plt.xlabel(x_label)
# plt.axis('equal')
# plt.axis('tight')
tight_figure(fig)
if Save_flag == 1:
if not os.path.exists(Save_pictures_path):
os.mkdir(Save_pictures_path)
name=Save_pictures_path + '/' + Title + '.png'
print(name)
# plt.show()
# plt.show(block=True)
# plt.show(block=False)
plt.savefig(name)
if Show_flag==0:
plt.show(block=False)
# plt.show(block=True)
else:
if Show_flag == 2:
a=1
else:
plt.show()
plt.close()
return None
def Plot_accuracy(history,Title='Title',Save_pictures_path='./Pictures',Save_flag=0):
plt.figure()
plt.plot(history['mean_absolute_error'])
plt.plot(history['val_mean_absolute_error'])
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.axis('tight')
plt.legend(['Training accuracy', 'Validation accuracy'], loc='lower right')
if Save_flag == 1:
name=Save_pictures_path + '/' + Title + '.png'
print(name)
plt.savefig(name)
plt.show(block=False)
plt.close()
return None
def Plot_accuracy2(history,Title='Title',Save_pictures_path='./Pictures',Save_flag=0):
plt.figure()
plt.plot(history['coeff_determination'])
plt.plot(history['val_coeff_determination'])
plt.ylabel('R2')
plt.xlabel('Epoch')
plt.axis('tight')
plt.ylim(-1,1)
string=', R2 accuracy curve train/test='+numstr( history['coeff_determination'][len(history['coeff_determination'])-1] )+'/'+numstr(history['val_coeff_determination'][len(history['val_coeff_determination'])-1])
plt.title(Title+string)
plt.legend(['training R2','validation R2'], loc='lower right')
if Save_flag == 1:
name = Save_pictures_path + '/' + Title + '.png'
print(name)
plt.savefig(name)
plt.show(block=False)
plt.close()
return None
def Plot_loss(history,Title='Title',Save_pictures_path='./Pictures',Save_flag=0):
plt.figure()
plt.plot(history['loss'])
plt.plot(history['val_loss'])
plt.yscale('log')
plt.ylabel('Loss function')
plt.xlabel('Epoch')
plt.axis('tight')
# len(history['coeff_determination'])
# print(', R2 accuracy curve train/test='+numstr(history['coeff_determination'][-1])+'/'+numstr(history['val_coeff_determination'][-1]))
string=', R2 accuracy curve train/test='+numstr( history['coeff_determination'][len(history['coeff_determination'])-1] )+'/'+numstr(history['val_coeff_determination'][len(history['val_coeff_determination'])-1])
plt.title(Title)
plt.legend(['Training', 'Validation'], loc='upper right')
if Save_flag == 1:
name=Save_pictures_path + '/' + Title + '.png'
print(name)
plt.savefig(name)
plt.show(block=False)
plt.close()
return None
def Plot_loss_r2(history,Title='Title',Save_pictures_path='./Pictures',Save_flag=0):
plt.figure()
plt.plot(-np.array(history['loss']))
plt.plot(-np.array(history['val_loss']))
# plt.yscale('log')
# ax.set_yscale('log')
plt.ylabel('Loss function,R2')
plt.xlabel('Epoch')
plt.axis('tight')
plt.legend(['Training', 'Validation'], loc='upper right')
if Save_flag == 1:
name=Save_pictures_path + '/' + Title + '.png'
print(name)
plt.savefig(name)
plt.show(block=False)
plt.close()
return None
|
StarcoderdataPython
|
12805698
|
<reponame>Joes-BitGit/Leetcode
# DESCRIPTION
# Given a non-empty binary tree, find the maximum path sum.
# For this problem, a path is defined as any sequence of nodes
# from some starting node to any node in the tree along the parent-child connections.
# The path must contain at least one node and does not need to go through the root.
# EXAMPLE 1:
# Input: [1,2,3]
# 1
# / \
# 2 3
# Output: 6
# EXAMPLE 2:
# Input: [-10,9,20,null,null,15,7]
# -10
# / \
# 9 20
# / \
# 15 7
# Output: 42
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def maxPathSum(self, root: TreeNode) -> int:
'''
Time: O(N), number of nodes iterated once
Space: O(H), stack frames goes height level deep at most
'''
self.max_path_sum = float('-inf')
self.path_sum(root)
return self.max_path_sum
def path_sum(self, node):
if not node:
return 0
# doesn't return negatives
left = max(0, self.path_sum(node.left))
right = max(0, self.path_sum(node.right))
# checks the triangle (left parent right) compares it to the global max
# if its larger than our global then we have a path: left parent right
self.max_path_sum = max(self.max_path_sum, left + right + node.val)
# returns either the left track or the right track
# can only choose one for the next level of the stack frame
return max(left, right) + node.val
|
StarcoderdataPython
|
8096177
|
<reponame>billyrrr/onto
def default_field_resolver(source, info, **args):
"""Default field resolver.
If a resolve function is not given, then a default resolve behavior is used which
takes the property of the source object of the same name as the field and returns
it as the result, or if it's a function, returns the result of calling that function
while passing along args and context.
For dictionaries, the field names are used as keys, for all other objects they are
used as attribute names.
"""
# Ensure source is a value for which property access is acceptable.
field_name = info.field_name
value = (
source.get(field_name)
if isinstance(source, dict)
else source.graphql_field_resolve(info, **args)
)
if callable(value):
return value(info, **args)
return value
class GraphqlAttributedMixin:
from graphql import GraphQLResolveInfo
def graphql_field_resolve(self, info: GraphQLResolveInfo, *args):
field_name = info.field_name
return self.graphql_representation.get(field_name, None)
import functools
@property
@functools.lru_cache(maxsize=None)
def graphql_representation(self):
return self.to_dict()
|
StarcoderdataPython
|
6634371
|
<reponame>prasoon-uta/IBM-coud-storage<gh_stars>0
# Copyright 2014 IBM Corp.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testresources
from keystoneclient.tests.unit import client_fixtures
from keystoneclient.tests.unit.v3 import utils
class SimpleCertTests(utils.ClientTestCase, testresources.ResourcedTestCase):
resources = [('examples', client_fixtures.EXAMPLES_RESOURCE)]
def test_get_ca_certificate(self):
self.stub_url('GET', ['OS-SIMPLE-CERT', 'ca'],
headers={'Content-Type': 'application/x-pem-file'},
text=self.examples.SIGNING_CA)
res = self.client.simple_cert.get_ca_certificates()
self.assertEqual(self.examples.SIGNING_CA, res)
def test_get_certificates(self):
self.stub_url('GET', ['OS-SIMPLE-CERT', 'certificates'],
headers={'Content-Type': 'application/x-pem-file'},
text=self.examples.SIGNING_CERT)
res = self.client.simple_cert.get_certificates()
self.assertEqual(self.examples.SIGNING_CERT, res)
def load_tests(loader, tests, pattern):
return testresources.OptimisingTestSuite(tests)
|
StarcoderdataPython
|
8048370
|
<filename>solutions/problem_038.py
def is_valid(board, row):
if row in board:
return False
column = len(board)
for occupied_column, occupied_row in enumerate(board):
if abs(occupied_row - row) == abs(occupied_column - column):
return False
return True
def get_queen_configurations(board, n):
if n == len(board):
return 1
count = 0
for row in range(n):
if is_valid(board, row):
count += get_queen_configurations(board + [row], n)
return count
assert not is_valid([0, 2], 0)
assert not is_valid([0, 2], 2)
assert is_valid([0, 8], 3)
assert not is_valid([1, 3], 2)
assert is_valid([], 1)
assert get_queen_configurations([], 2) == 0
assert get_queen_configurations([], 4) == 2
assert get_queen_configurations([], 5) == 10
assert get_queen_configurations([], 8) == 92
|
StarcoderdataPython
|
8048356
|
<reponame>HappyHackingNinja/HappyHackingMCDSSurprise
import sys
from hhmcds.model import 早安鬧鐘資料
from hhmcds.surprise import 抽獎
from hhmcds.smurf import 分身
from hhmcds.imei import get_imei
import datetime
from openpyxl import workbook, styles
from hhmcds.gmail import active_then_delete
import time
def 抽獎流程(gmail, password, mail_prefix, numbers):
wb = workbook.Workbook()
ws = wb.active
ws.title = "抽獎結果"
ws.freeze_panes = 'A2'
ws.cell(row=1, column=1).value = "信箱"
ws.cell(row=1, column=1).font = styles.Font(bold=True)
ws.cell(row=1, column=2).value = "結果"
ws.cell(row=1, column=2).font = styles.Font(bold=True)
count = 1
str_imei = get_imei()
for idx in range(1, numbers):
資料 = 早安鬧鐘資料("{0}+{1:}{2:03d}@gmail.com".format(gmail, mail_prefix, idx), password, str_imei)
抽獎物件 = 抽獎(資料)
回傳資料 = 抽獎物件.執行()
if 回傳資料.抽獎成功:
count += 1
ws.cell(row=count, column=1).value = 回傳資料.信箱
ws.cell(row=count, column=2).value = 回傳資料.抽獎結果
sys.stdout.write("程序執行完成\n\n")
else:
sys.stderr.write("中斷\n")
wb.save(filename="抽獎結果{}-{:%m%d}.xlsx".format(mail_prefix, datetime.datetime.now()))
def 創建分身(gmail, password, mail_prefix, numbers):
count = 0
str_imei = get_imei()
for idx in range(1, numbers):
count += 1
if count % 10 is 0:
str_imei = get_imei()
資料 = 早安鬧鐘資料("{0}+{1:}{2:0<EMAIL>".format(gmail, mail_prefix, idx), password, str_imei)
分身物件 = 分身(資料)
if 分身物件.執行():
sys.stdout.write("程序執行完成\n\n")
else:
sys.stderr.write("中斷\n")
def 測試程式(gmail, password, mail_prefix="", numbers=100):
#創建分身(gmail, password, mail_prefix, numbers)
#sys.stdout.write("分身創建完成,等待五分鐘後開始繳活\n\n")
#time.sleep(600)
active_then_delete()
抽獎流程(gmail, password, mail_prefix, numbers)
if __name__ == "__main__":
mail_prefix = "{:%m%d}".format(datetime.datetime.now())
測試程式(gmail="HappyHackingNinja", password="<PASSWORD>", mail_prefix=mail_prefix, numbers=200)
|
StarcoderdataPython
|
12809387
|
# noqa: D, V, E241
import sublime
import ctypes
import time
import platform
import subprocess
_debug = False
def debug(*args):
if _debug:
print(*args)
SPEC = {
# dir explorer
'dir': {
'Darwin': ['open', '<__path__>'],
'Linux': ['nautilus', '--browser', '<__path__>'],
'Windows': ['explorer', '<__path__>']
},
# file explorer
'file': {
'Darwin': ['open', '-R', '<__path__>'],
'Linux': ['nautilus', '--browser', '<__path__>'],
'Windows': ['explorer /select,"<__path__>"']
},
'detach_run': {
'Darwin': ['nohup', '*__path__*'],
'Linux': ['nohup', '*__path__*'],
'Windows': ['start', '', '/I', '*__path__*']
},
# desktop open
'open': {
'Darwin': ['open', '<__path__>'],
'Linux': ['xdg-open', '<__path__>'],
'Windows': ['<__path__>'],
},
'open_with_app': {
'Darwin': ['open', '-a', '<__app__>', '<__path__>']
},
'run_custom': {
'Darwin': ['*__app__*', '*__path__*'],
'Linux': ['*__app__*', '*__path__*'],
'Windows': ['*__app__*', '*__path__*']
},
'shell': {
'Darwin': ['/bin/sh', '-c', '*__path__*'],
'Linux': ['/bin/sh', '-c', '*__path__*'],
'Windows': ['cmd.exe /c "<__path__>"'] # need extra hidden at Popen
},
'shell_keep_open': {
'Darwin': ['/bin/sh', '-c', "'<__path__>; exec /bin/sh'"],
'Linux': ['/bin/sh', '-c', "'<__path__>; exec /bin/sh'"],
'Windows': ['cmd.exe /k "<__path__>"']
},
# terminal open
# terminal_keep_open = terminal + shell_keep_open
'terminal': {
'Darwin': ['/opt/X11/bin/xterm', '-e', '*__path__*'],
'Linux': ['/usr/bin/xterm', '-e', '*__path__*'],
'Linux2': ['gnome-terminal', '-x', '*__path__*'],
'Windows': ['cmd.exe /c "<__path__>"']
},
'set_title': {
'Windows': ['TITLE <__title__>& <__path__>']
}
# termain open with pause after running
# 'pause': {
# 'Darwin': ['<__path__>; read -p "Press [ENTER] to continue..."'],
# 'Linux': ['<__path__>; read -p "Press [ENTER] to continue..."'],
# 'Windows': ['<__path__> & pause']
# }
}
class Specification:
dry_run = False
def __init__(self, args, cwd=None, hidden=False):
self.args = args
self.hidden = hidden
self.cwd = cwd
def quote(self):
self.args = ['"{}"'.format(arg) for arg in self.args]
def popen(self):
if debug:
print("popen cmd: %s" % self.args)
if self.dry_run:
return
startupinfo = None
if self.hidden:
from subprocess import STARTUPINFO, _winapi
startupinfo = STARTUPINFO()
startupinfo.dwFlags |= _winapi.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = _winapi.SW_HIDE
subprocess.Popen(self.args[0] if len(self.args) == 1 else self.args,
cwd=self.cwd, startupinfo=startupinfo)
@classmethod
def get_spec(cls, intention, path, cwd=None, app=None, title=None):
if not SPEC.get(intention):
raise Exception('unrecognized intention "{}"'.format(intention))
if not SPEC[intention].get(platform.system()):
raise Exception('unsupported os')
spec = SPEC[intention][platform.system()]
def merge(target, token, source):
if source is None:
return target
if isinstance(source, cls):
source = source.args
if not isinstance(source, list):
source = [source]
source_str = ' '.join(s if s else '""' for s in source)
merged = []
for arg in target:
if arg == '*__{}__*'.format(token):
merged.extend(source)
else:
merged.append(arg.replace('<__{}__>'.format(token), source_str))
return merged
spec = merge(spec, 'path', path)
spec = merge(spec, 'app', app)
spec = merge(spec, 'title', title or '')
hidden = intention == 'shell' and platform.system() == 'Windows'
return cls(spec, cwd=cwd, hidden=hidden)
if platform.system() == 'Windows':
EnumWindows = ctypes.windll.user32.EnumWindows
EnumWindowsProc = ctypes.WINFUNCTYPE(ctypes.c_bool,
ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(ctypes.c_int))
GetClassName = ctypes.windll.user32.GetClassNameW
GetWindowText = ctypes.windll.user32.GetWindowTextW
GetWindowTextLength = ctypes.windll.user32.GetWindowTextLengthW
IsWindowVisible = ctypes.windll.user32.IsWindowVisible
SW_MINIMIZE = 6
SW_RESTORE = 9
ShowWindow = ctypes.windll.user32.ShowWindow
GetForegroundWindow = ctypes.windll.user32.GetForegroundWindow
def get_window_class(hWnd):
buff = ctypes.create_unicode_buffer(100)
GetClassName(hWnd, buff, 99)
return buff.value
def get_window_title(hWnd):
length = GetWindowTextLength(hWnd)
buff = ctypes.create_unicode_buffer(length + 1)
GetWindowText(hWnd, buff, length + 1)
return buff.value
def list_window(match_class=None, match_title=None):
hWnds = []
def callback(hWnd, lParam):
if IsWindowVisible(hWnd):
hWnds.append(ctypes.addressof(hWnd.contents))
return True
EnumWindows(EnumWindowsProc(callback), 0)
if match_class is not None:
hWnds = filter(lambda hWnd: get_window_class(hWnd) == match_class, hWnds)
if match_title is not None:
hWnds = filter(lambda hWnd: get_window_title(hWnd) == match_title, hWnds)
return list(hWnds)
# reference: https://gist.github.com/EBNull/1419093
def forceFocus(wnd):
if GetForegroundWindow() == wnd:
return True
ShowWindow(wnd, SW_MINIMIZE)
ShowWindow(wnd, SW_RESTORE)
class WindowSingleton:
_hash = None
def __init__(self, cmd, id=None,
window_title=None, window_class=None,
title_instance=False, class_instance=False):
assert(any([id, window_title, window_class]))
self.cmd = cmd
self.id = id
self.window_title = window_title
self.window_class = window_class
# if ture, ensure() is not create window for same title more than one
self.title_instance = title_instance
# if ture, ensure() is not create window for same class more than one
self.class_instance = class_instance
debug(self.window_title, self.window_class, self.title_instance, self.class_instance)
def do_create(self):
if isinstance(self.cmd, Specification):
self.cmd.popen()
elif callable(self.cmd):
self.cmd(self)
else:
proc = subprocess.Popen(self.cmd, shell=True)
proc.wait()
time.sleep(0.5)
def create_window(self):
def get_snapshot():
return set(list_window(match_class=self.window_class,
match_title=self.window_title))
before = get_snapshot()
self.do_create()
# find the window who created recently is match the class and the title
delta = get_snapshot() - before
if delta:
hWnd = list(delta)[0]
return {'hWnd': hWnd, 'title': get_window_title(hWnd)}
def ensure(self):
# ensure window exist. otherwise, create new one and return its handle
hash_id = self.id or self.window_title or self.window_class
window = None
# try to find opened window in hash first
if self._hash.get(hash_id) is not None:
_window = self._hash.get(hash_id)
if all([IsWindowVisible(_window['hWnd']), # test window exist
not (self.window_title is not None and
self.window_title != get_window_title(_window['hWnd'])),
not (self.window_class is not None and
self.window_class != get_window_class(_window['hWnd']))
]):
window = _window
debug('window in hash: {}'.format(window))
# if this is a singlaton window,
# search already opened window in system who are match class and title
if window is None and (self.class_instance or self.title_instance):
# class_instance: ensure the program with window_class exist and onlyone
# title_instance: ensure the program with window_title exist and onlyone
windows = list_window(match_class=self.window_class if self.class_instance else None,
match_title=self.window_title if self.title_instance else None)
if windows:
window = {'hWnd': windows[0], 'title': get_window_title(windows[0])}
debug('window exists: {}'.format(window))
if window is None:
window = self.create_window()
if window is None:
raise Exception('unable excute program')
self._hash.set(hash_id, window)
# sublime.save_settings('singletion-hash.sublime-settings')
return window['hWnd']
def bring_top(self):
hWnd = self.ensure()
forceFocus(hWnd)
else:
WindowSingleton = None
def plugin_loaded():
# print(list(map(lambda x: (get_window_title(x), get_window_class(x)), list_window())))
if WindowSingleton:
WindowSingleton._hash = sublime.load_settings('singletion-hash.sublime-settings')
def plugin_unloaded():
if WindowSingleton:
sublime.save_settings('singletion-hash.sublime-settings')
|
StarcoderdataPython
|
11212348
|
from django.conf import settings
from django.conf.urls import url
from django.contrib import admin
from django.urls import include
from django.conf.urls.static import static
urlpatterns = [
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', admin.site.urls),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) + \
static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
StarcoderdataPython
|
1669605
|
<reponame>Zalewa/doomstats
#!/usr/bin/python
#-*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from django.apps import apps
class Command(BaseCommand):
def handle(self, *args, **options):
app_models = apps.get_app_config('presentation').get_models()
for model in app_models:
model.objects.all().delete()
|
StarcoderdataPython
|
1838558
|
<reponame>willf/pypatgen
'''
Created on Feb 19, 2016
@author: mike
'''
from __future__ import print_function
import codecs
import re
import argparse
TEMPLATE = r'''
\documentclass{article}
\usepackage{polyglossia}
\usepackage{xltxtra}
\usepackage{testhyphens}
\setdefaultlanguage{churchslavonic}
\newfontfamily\churchslavonicfont[Script=Cyrillic]{PonomarUnicode.otf}
\begin{document}
$BODY$
\begin{checkhyphens}
$CHECKHYPHENS$
\end{checkhyphens}
\end{document}
'''
def main_generate(args):
words = list(read_words(args.dictionary))
body= '\n'.join('\\showhyphens{%s}' % x for x in words)
checkhyphens = '\n'.join(words)
text = re.sub(r'\$BODY\$', body, TEMPLATE)
text = re.sub(r'\$CHECKHYPHENS\$', checkhyphens, text)
with codecs.open(args.output, 'w', 'utf-8') as f:
f.write(text)
def read_words(fname, strip_hyphen=True):
rex = re.compile(r'\d')
if strip_hyphen:
rex = re.compile(r'[\-\d]')
with codecs.open(fname, 'r', 'utf-8') as f:
for l in f:
l = l.strip()
if not l:
continue
if l.startswith('#') or l.startswith('%'):
continue
l = rex.sub('', l)
yield l
def main_extract(args):
with codecs.open(args.output, 'w', 'utf-8') as f:
for x in generatehyps(args.log):
f.write(x + '\n')
def generatehyps(fname):
with codecs.open(fname, 'r', 'utf-8') as f:
expect = False
for l in f:
l = l.strip()
if expect > 0:
expect -= 1
if expect == 1:
expect = 0
yield l.split()[1]
else:
if re.search(r'showhyphens: \*\*\*', l):
expect = 4
def main_strip_weights(args):
words = list(read_words(args.input, strip_hyphen=False))
with codecs.open(args.output, 'w', 'utf-8') as f:
for word in words:
f.write(word + '\n')
return 0
def main():
parser = argparse.ArgumentParser(description='Utility to compare training dictionary with actual hyphenations as generated by TeX')
sub = parser.add_subparsers(help='Commands', dest='cmd')
parser_generate = sub.add_parser('generate', help='Generate LaTex file with all dictionary words')
parser_generate.add_argument('dictionary', help='input dictionary file')
parser_generate.add_argument('output', help='output LaTeX file')
parser_extract = sub.add_parser('extract', help='Extract actual TeX hyphenations from LaTeX log file')
parser_extract.add_argument('log', help='input LaTeX log file')
parser_extract.add_argument('output', help='output hyphenation dictionary as extracted from log')
parser_strip_weights = sub.add_parser('strip_weights', help='Strips weights (numbers) from dictionary')
parser_strip_weights.add_argument('input', help='input dictionary file')
parser_strip_weights.add_argument('output', help='output dictionary with numeric weights stripped off')
args = parser.parse_args()
if args.cmd == 'generate':
parser.exit(main_generate(args))
elif args.cmd == 'extract':
parser.exit(main_extract(args))
elif args.cmd == 'strip_weights':
parser.exit(main_strip_weights(args))
else:
parser.error('Missing or unrecognized command')
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1838834
|
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.screenmanager import ScreenManager, Screen
class Gerenciador(ScreenManager):
pass
class Menu(Screen):
pass
class Tarefas(Screen):
def __init__(self, tarefas=[], **kwargs):
super().__init__(**kwargs)
for t in tarefas:
self.ids.box.add_widget(Tarefa(t))
def addWidget(self):
texto = self.ids.texto.text
self.ids.box.add_widget(Tarefa(text=texto))
self.ids.texto.text = ''
class Tarefa(BoxLayout):
def __init__(self, text='', **kwargs):
super().__init__(**kwargs)
self.ids.label.text = text
class Teste(App):
def build(self):
return Gerenciador()
Teste().run()
|
StarcoderdataPython
|
3205350
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Z-Wave Interpreter - Constants © Autolog 2020
#
# Z-Wave Interpreter common constants
# Z-Wave Command Classes and ZW_COMMANDS
# ### Shared Constants ###
ZW_BATTERY_LEVEL = 1
ZW_BATTERY_LEVEL_UI = 2
ZW_COMMAND = 4
ZW_COMMAND_BYTES = 5
ZW_COMMAND_BYTES_UI = 6
ZW_COMMAND_CLASS_UI = 8
ZW_COMMAND_CLASS_VERSION = 9
ZW_COMMAND_CLASS_VERSION_UI = 10
ZW_COMMAND_DETAIL = 11
ZW_COMMAND_PACKET_LENGTH = 12
ZW_COMMAND_UI = 13
ZW_ENDPOINT = 15
ZW_ERROR_MESSAGE = 16
ZW_INTERPRETATION_ATTEMPTED = 21
ZW_INTERPRETATION_DETAIL_UI = 22
ZW_INTERPRETATION_OVERVIEW_UI = 23
ZW_INTERPRETATION_UI = 24
ZW_INTERPRETED = 25
ZW_MANUFACTURER_FIELDS = 26
ZW_METER_TYPE = 27
ZW_METER_TYPE_UI = 28
ZW_MODE = 29
ZW_MODE_UI = 30
ZW_NODE_ID = 31
ZW_NUMBER_OF_MANUFACTURER_FIELDS = 32
ZW_PRECISION = 33
ZW_SCALE_UI = 34
ZW_SCALE_UI_COMPACT = 35
ZW_SCALES = 36
ZW_SENSOR_ALARM_TYPE = 37
ZW_SENSOR_ALARM_TYPE_UI = 38
ZW_SENSOR_TYPE = 39
ZW_SENSOR_TYPE_UI = 40
ZW_SETPOINT_TYPE = 41
ZW_SETPOINT_TYPE_UI = 42
ZW_SETPOINT_TYPES = 43
ZW_SIZE = 44
ZW_VALUE = 45
ZW_VALUE_BOOL = 46
ZW_VALUE_UI = 47
#
ZW_SECONDS = 48
ZW_COMMAND_SUCCESS = 49
ZW_TIME_DELTA = 50
ZW_SEQUENCE_NUMBER = 51
ZW_KEY_ATTRIBUTES = 52
ZW_KEY_ATTRIBUTES_UI = 53
ZW_SCENE_NUMBER = 54
ZW_INDIGO_DEVICE = 55
ZW_OPERATING_STATE = 56
ZW_OPERATING_STATE_UI = 57
ZW_FAN_MODE = 58
ZW_FAN_MODE_UI = 59
|
StarcoderdataPython
|
6476618
|
from visual import *
# <NAME>, March 2002
# Import this module to create buttons, toggle switches, sliders, and pull-down menus.
# See test routine at end of this module for an example of how to use controls.
lastcontrols = None # the most recently created controls window
gray = (0.7, 0.7, 0.7)
darkgray = (0.5, 0.5, 0.5)
class controls: # make a special window for buttons, sliders, and pull-down menus
def __init__(self, x=0, y=0, width=300, height=320, range=100,
title=None, foreground=None, background=None):
global lastcontrols
lastcontrols = self
currentdisplay = display.get_selected()
if title is None:
title = 'Controls'
if foreground is None:
foreground = color.white
if background is None:
background = color.black
self.display = display(title=title, x=x, y=y, range=range,
width=width, height=height, fov=0.5,
foreground=foreground, background=background,
userzoom=0, userspin=0,
lights=[vector(0,2,1)])
self.focus = None
self.lastpos = None
self.controllist = []
currentdisplay.select()
def addcontrol(self, control):
self.controllist.append(control)
def interact(self):
if self.display.mouse.events:
m = self.display.mouse.getevent()
if m.press == 'left' and m.pick:
picked = m.pick
if self.focus: # have been moving over menu with mouse up
picked = self.focus
for control in self.controllist:
if control.active is picked:
self.focus = control
control.highlight(m.pos)
elif m.release == 'left':
focus = self.focus
self.focus = None # menu may reset self.focus for "sticky" menu
if focus:
focus.unhighlight(m.pos)
elif self.focus: # if dragging a control
pos = self.display.mouse.pos
if pos != self.lastpos:
self.focus.update(pos)
self.lastpos = pos
class ctrl(object): # common aspects of buttons, sliders, and menus
# Note: ctrl is a subclass of "object" in order to be a new-type class which
# permits use of the new "property" feature exploited by buttons and sliders.
def __init__(self, args):
if args.has_key('controls'):
self.controls = args['controls']
elif lastcontrols is None:
self.controls = controls()
else:
self.controls = lastcontrols
self.controls.addcontrol(self)
self.pos = vector(0,0)
self.action = None
if args.has_key('pos'):
self.pos = vector(args['pos'])
if args.has_key('value'):
self.value = args['value']
if args.has_key('action'):
self.action = args['action']
def highlight(self, pos):
pass
def unhighlight(self, pos):
pass
def update(self, pos):
pass
def execute(self):
if self.action:
apply(self.action)
class button(ctrl):
def __init__(self, **args):
self.type = 'button'
self.value = 0
ctrl.__init__(self, args)
width = height = 40
bcolor = gray
edge = darkgray
self.__text = ''
if args.has_key('width'):
width = args['width']
if args.has_key('height'):
height = args['height']
if args.has_key('text'):
self.__text = args['text']
if args.has_key('color'):
bcolor = args['color']
disp = self.controls.display
framewidth = width/10.
self.thick = 2.*framewidth
self.box1 = box(display=disp, pos=self.pos+vector(0,height/2.-framewidth/2.,0),
size=(width,framewidth,self.thick), color=edge)
self.box2 = box(display=disp, pos=self.pos+vector(-width/2.+framewidth/2.,0,0),
size=(framewidth,height,self.thick), color=edge)
self.box3 = box(display=disp, pos=self.pos+vector(width/2.-framewidth/2.,0,0),
size=(framewidth,height,self.thick), color=edge)
self.box4 = box(display=disp, pos=self.pos+vector(0,-height/2.+framewidth/2.,0),
size=(width,framewidth,self.thick), color=edge)
self.button = box(display=disp, pos=self.pos+vector(0,0,self.thick/2.+1.),
size=(width-2.*framewidth,height-2.*framewidth,self.thick), color=bcolor)
self.label = label(display=disp, pos=self.button.pos, color=color.black,
text=self.__text, line=0, box=0, opacity=0)
self.active = self.button
def gettext(self):
return self.label.text
def settext(self, text):
self.label.text = text
text = property(gettext, settext) # establishes special getattr/setattr handling
def highlight(self, pos):
self.button.pos.z -= self.thick
self.label.pos.z -= self.thick
self.value = 1
def unhighlight(self, pos):
self.button.pos.z += self.thick
self.label.pos.z += self.thick
self.value = 0
self.execute()
class toggle(ctrl):
def __init__(self, **args):
self.type = 'toggle'
self.__value = 0
ctrl.__init__(self, args)
width = height = 20
self.angle = pi/6. # max rotation of toggle
bcolor = gray
edge = darkgray
self.__text0 = ''
self.__text1 = ''
if args.has_key('width'):
width = args['width']
if args.has_key('height'):
height = args['height']
if args.has_key('text0'):
self.__text0 = args['text0']
if args.has_key('text1'):
self.__text1 = args['text1']
if args.has_key('color'):
bcolor = args['color']
if args.has_key('value'):
self.__value = args['value']
diskthick = width/4.
diskradius = height/2.
ballradius = 0.6*diskradius
self.rodlength = 1.2*diskradius+ballradius
disp = self.controls.display
self.frame = frame(display=disp, pos=self.pos, axis=(1,0,0))
self.back = box(display=disp, frame=self.frame, pos=(0,0,0),
size=(width,height,0.3*diskradius), color=darkgray)
self.disk1 = cylinder(display=disp, frame=self.frame, pos=(-diskthick,0,0),
axis=(-diskthick,0), radius=diskradius, color=gray)
self.disk2 = cylinder(display=disp, frame=self.frame, pos=(diskthick,0,0),
axis=(diskthick,0), radius=diskradius, color=gray)
self.rod = cylinder(display=disp, frame=self.frame, pos=(0,0,0),
axis=(0,0,self.rodlength), radius=width/8., color=gray)
self.ball = sphere(display=disp, frame=self.frame, pos=(0,0,self.rodlength),
radius=ballradius, color=gray)
self.label0 = label(display=disp, frame=self.frame, pos=(0,-1.0*height), text=self.__text0,
line=0, box=0, opacity=0)
self.label1 = label(display=disp, frame=self.frame, pos=(0,1.0*height), text=self.__text1,
line=0, box=0, opacity=0)
self.settoggle(self.__value)
self.active = self.ball
def settoggle(self, val):
self.__value = val
if val == 1:
newpos = self.rodlength*vector(0,sin(self.angle), cos(self.angle))
else:
newpos = self.rodlength*vector(0,-sin(self.angle), cos(self.angle))
self.rod.axis = newpos
self.ball.pos = newpos
def getvalue(self):
return self.__value
def setvalue(self, val):
self.settoggle(val)
self.__value = val
value = property(getvalue, setvalue) # establishes special getattr/setattr handling
def gettext0(self):
return self.label0.text
def settext0(self, text):
self.label0.text = text
text0 = property(gettext0, settext0) # establishes special getattr/setattr handling
def gettext1(self):
return self.label1.text
def settext1(self, text):
self.label1.text = text
text1 = property(gettext1, settext1) # establishes special getattr/setattr handling
def unhighlight(self, pos):
if self.controls.display.mouse.pick is self.active:
self.__value = not(self.__value)
self.settoggle(self.__value)
self.execute()
class slider(ctrl):
def __init__(self, **args):
self.type = 'slider'
self.__value = 0
ctrl.__init__(self, args)
self.length = 100.
width = 10.
shaftcolor = darkgray
scolor = gray
self.min = 0.
self.max = 100.
self.axis = vector(1,0,0)
if args.has_key('axis'):
self.axis = vector(args['axis'])
self.length = mag(self.axis)
self.axis = norm(self.axis)
if args.has_key('length'):
self.length = args['length']
if args.has_key('width'):
width = args['width']
if args.has_key('min'):
self.min = args['min']
if self.__value == 0:
self.__value = self.min
if args.has_key('max'):
self.max = args['max']
if args.has_key('color'):
scolor = args['color']
disp = self.controls.display
self.shaft = box(display=disp,
pos=self.pos+self.axis*self.length/2., axis=self.axis,
size=(self.length,0.5*width,0.5*width), color=shaftcolor)
self.indicator = box(display=disp,
pos=self.pos+self.axis*(self.__value-self.min)*self.length/(self.max-self.min),
axis=self.axis,
size=(width,width,width), color=scolor)
self.active = self.indicator
def getvalue(self):
return self.__value
def setvalue(self, val):
self.update(self.pos+self.axis*(val-self.min)*self.length/(self.max-self.min))
self.__value = val
value = property(getvalue, setvalue) # establishes special getattr/setattr handling
def update(self, pos):
val = self.min+dot((pos-self.pos),self.axis)*(self.max-self.min)/self.length
if val < self.min:
val = self.min
elif val > self.max:
val = self.max
if val != self.__value:
self.indicator.pos = self.pos+self.axis*(val-self.min)*self.length/(self.max-self.min)
self.__value = val
self.execute()
class menu(ctrl):
def __init__(self, **args):
self.type = 'menu'
ctrl.__init__(self, args)
self.items = []
self.width = self.height = 40
self.text = None
self.color = gray
self.nitem = 0
self.open = 0 # true if menu display open in the window
self.action = 1 # dummy placeholder; what is driven is menu.execute()
if args.has_key('width'):
self.width = args['width']
if args.has_key('height'):
self.height = args['height']
if args.has_key('text'):
self.text = args['text']
if args.has_key('color'):
self.color = args['color']
self.thick = 0.2*self.width
disp = self.controls.display
self.active = box(display=disp, pos=self.pos+vector(0,0,self.thick),
size=(self.width,self.height,self.thick), color=self.color)
self.label = label(display=disp, pos=self.active.pos, color=color.black,
text=self.text, line=0, box=0, opacity=0)
def inmenu(self, pos): # return item number (0-N) where mouse is, or -1
# note that item is 0 if mouse is in menu title
if self.pos.x-self.width/2. < pos.x < self.pos.x+self.width/2.:
nitem = int((self.pos.y+self.height/2.-pos.y)/self.height)
if 0 <= nitem <= len(self.items):
return(nitem)
else:
return(-1)
return(-1)
def highlight(self, pos): # mouse down: open the menu, displaying the menu items
self.nitem = self.inmenu(pos)
if self.open: # "sticky" menu already open
if self.nitem > 0:
self.update(pos)
else:
self.unhighlight(pos)
self.open = 0
return
pos = self.pos-vector(0,self.height,0)
self.boxes = []
self.highlightedbox = None
disp = self.controls.display
for item in self.items:
self.boxes.append( (box(display=disp, pos=pos+vector(0,0,self.thick),
size=(self.width,self.height,self.thick), color=self.color),
label(display=disp, pos=pos, color=color.black,
text=item[0], line=0, box=0, opacity=0)) )
pos = pos-vector(0,self.height,0)
def unhighlight(self, pos): # mouse up: close the menu; selected item will be executed
self.nitem = self.inmenu(pos)
if self.nitem == 0 and not self.open: # don't close if mouse up in menu title
self.controls.focus = self # restore menu to be in focus
self.open = 1
return
for box in self.boxes:
box[0].visible = 0
box[1].visible = 0
self.boxes = []
self.open = 0
self.execute()
def update(self, pos): # highlight an individual item during drag
self.nitem = self.inmenu(pos)
if self.nitem > 0:
if self.highlightedbox is not None:
self.highlightedbox.color = gray
if self.items[self.nitem-1][1]: # if there is an associated action
self.highlightedbox = self.boxes[self.nitem-1][0]
self.highlightedbox.color = darkgray
else:
if self.highlightedbox is not None:
self.highlightedbox.color = gray
self.highlightedbox = None
def execute(self):
if self.nitem > 0:
action = self.items[self.nitem-1][1]
if action:
apply(action)
if __name__ == '__main__': # for testing the module
# Create "call-back" routines, routines that are called by the interact
# machinery when certain mouse events happen:
def setdir(direction): # called on button up events
cube.dir = direction
def togglecubecolor(): # called on toggle switch flips
if t1.value:
cube.color = color.cyan
else:
cube.color = color.red
def cubecolor(value): # called on a menu choice
cube.color = value
if cube.color == color.red:
t1.value = 0 # make toggle switch setting consistent with menu choice
else:
t1.value = 1
def setrate(obj): # called on slider drag events
cuberate(obj.value) # value is min-max slider position
if obj is s1:
s2.value = s1.value # demonstrate coupling of the two sliders
else:
s1.value = s2.value
def cuberate(value):
cube.dtheta = 2*value*pi/1e4
w = 350
display(x=w, y=0, width=w, height=w, range=1.5, forward=-vector(0,1,1), newzoom=1)
cube = box(color=color.red)
# In establishing the controls window, range=60 means what it usually means:
# (0,0) is in the center of the window, and (60,60) is the lower right corner.
# If range is not specified, the default is 100.
c = controls(x=0, y=0, width=w, height=w, range=60)
# Buttons have a "text" attribute (the button label) which can be read and set.
# Toggles have "text0" and "text1" attributes which can be read and set.
# Toggles and sliders have a "value" attribute (0/1, or location of indicator) which can be read and set.
# The pos attribute for buttons, toggles, and menus is the center of the control (like "box").
# The pos attribute for sliders is at one end, and axis points to the other end (like "cylinder").
# By default a control is created in the most recently created "controls" window, but you
# can change this by specifying "controls=..." when creating a button, toggle, slider, or menu.
# The Python construct "lambda: setdir(-1)" below passes the location of the setdir function
# to the interact machinery, which uses "apply" to call the function when an action
# is to be taken. This scheme ensures that the execution of the function takes place
# in the appropriate namespace context in the case of importing the controls module.
bl = button(pos=(-30,30), height=30, width=40, text='Left', action=lambda: setdir(-1))
br = button(pos=(30,30), height=30, width=40, text='Right', action=lambda: setdir(1))
s1 = slider(pos=(-15,-40), width=7, length=70, axis=(1,0.7,0), action=lambda: setrate(s1))
s2 = slider(pos=(-30,-50), width=7, length=50, axis=(0,1,0), action=lambda: setrate(s2))
t1 = toggle(pos=(40,-30), width=10, height=10, text0='Red', text1='Cyan', action=lambda: togglecubecolor())
m1 = menu(pos=(0,0,0), height=7, width=25, text='Options')
# After creating the menu heading, add menu items:
m1.items.append(('Left', lambda: setdir(-1))) # specify menu item title and action to perform
m1.items.append(('Right', lambda: setdir(1)))
m1.items.append(('---------',None)) # a dummy separator
m1.items.append(('Red', lambda: cubecolor(color.red)))
m1.items.append(('Cyan', lambda: cubecolor(color.cyan)))
s1.value = 70 # update the slider
setrate(s1) # set the rotation rate of the cube
setdir(-1) # set the rotation direction of the cube
while 1:
rate(100)
c.interact() # check for events, drive actions; must be executed repeatedly in a loop
cube.rotate(axis=(0,1,0), angle=cube.dir*cube.dtheta)
|
StarcoderdataPython
|
6645893
|
from flask import g
from lowball.builtins.error_handler import default_error_handler
from lowball.builtins.response_class import LowballResponse
class TestDefaultErrorHandler:
def test_basic_exception_handled_properly(self, basic_exception, client_with_response_class):
response = default_error_handler(basic_exception)
assert isinstance(response, LowballResponse)
assert response.status_code == 500
assert g.response_is_exception
assert g.response_exception_log_data == {"error_type": str(type(basic_exception)),
"error_msg": str(basic_exception)}
def test_lowball_exception_is_handled_properly(self, lowball_exception, client_with_response_class):
response = default_error_handler(lowball_exception)
assert isinstance(response, LowballResponse)
assert response.status_code == lowball_exception.code
assert g.response_is_exception
assert g.response_exception_log_data == lowball_exception.additional_log_data
def test_http_exception_code_less_than_500_handled_properly(self, http_exception_lt_500,
client_with_response_class):
response = default_error_handler(http_exception_lt_500)
assert isinstance(response, LowballResponse)
assert response.status_code == http_exception_lt_500.code
def test_http_exception_greater_than_equal_500_handled_properly(self, http_exception_gte_500,
client_with_response_class):
response = default_error_handler(http_exception_gte_500)
assert isinstance(response, LowballResponse)
assert response.status_code == http_exception_gte_500.code
assert g.response_is_exception
assert g.response_exception_log_data == {"error_type": str(type(http_exception_gte_500)),
"error_msg": str(http_exception_gte_500)}
|
StarcoderdataPython
|
12803192
|
<gh_stars>0
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.views.decorators.csrf import csrf_exempt
import base64,time,random,hashlib,json,re,django,platform
from . import server_helper,match,view,web_socket,search_process,GlobalVar,anticheatz
from www.index import player as index_player
def hash_md5(str):
return hashlib.md5(str).hexdigest()
def get_json(result):
return HttpResponse(json.dumps(result, ensure_ascii=False), content_type="application/json,charset=utf-8")
def process_getdata_by_key(key):
return GlobalVar.runSQL('SELECT * FROM userdata WHERE `Key` = %s LIMIT 1', key)
def process_playerlist_decode(playerlist):
return json.loads(base64.b64decode(playerlist).decode(encoding='GBK'))
def process_playerlist_encode(playerlist):
return base64.b64encode(json.dumps(playerlist).encode(encoding='GBK'))
def process_playerlist_remove(playerlist, name):
#del playerlist[name]
playerlist.pop(name)
return playerlist
def get_by_steamid(steamid):
return GlobalVar.runSQL(
'SELECT * FROM userdata WHERE `SteamID` = %s LIMIT 1', (steamid))
def get_by_name(player_name):
return GlobalVar.runSQL(
'SELECT * FROM userdata WHERE `username` = %s LIMIT 1', (player_name))
def process_exit_room(sec_key):
Check = process_getdata_by_key(sec_key)
if not Check:
return False
room_id = Check[0][GlobalVar.sql_userdata_roomid]
myName = Check[0][GlobalVar.sql_userdata_username]
if room_id == '0':
return False
Check = GlobalVar.runSQL(
'SELECT * FROM roomlist WHERE `RoomID` = %s LIMIT 1', (room_id))
if not Check:
GlobalVar.runSQL(
"UPDATE userdata SET `roomid` = '0' WHERE `Key` = %s LIMIT 1", sec_key)
return True
player_list = Check[0][GlobalVar.sql_roomlist_PlayerList]
player_number = Check[0][GlobalVar.sql_roomlist_PlayerNumber]
ready_number = Check[0][GlobalVar.sql_roomlist_ReadyNumber]
player_list_decode = process_playerlist_decode(player_list)
found = False
for name in player_list_decode:
if name == myName:
found = True
if not found:
GlobalVar.runSQL(
"UPDATE userdata SET `roomid` = '0' WHERE `Key` = %s LIMIT 1", sec_key)
return True
if Check[0][GlobalVar.sql_roomlist_ingame]:
return True
# if search_process.check_in_search(Check):
# search_process.stop_search(room_id)
if player_number == 1:
if search_process.check_in_search(Check):
search_process.stop_search(room_id)
GlobalVar.runSQL(
"UPDATE userdata SET `roomid` = '0' WHERE `Key` = %s LIMIT 1", sec_key)
GlobalVar.runSQL(
"DELETE FROM roomlist WHERE `RoomID` = %s LIMIT 1", room_id)
return True
player_list_decode = process_playerlist_remove(player_list_decode, myName)
web_socket.send_player_leave_room(room_id, myName)
new_player_num = player_number - 1
new_ready_num = ready_number - 1
new_max_rank = 100
#{'ready': False, 'Rank': 100, 'ico': 'null'}
for name in player_list_decode:
#print('name:' + name + "rank:" + str(player_list_decode[name]['Rank']))
if player_list_decode[name]['Rank'] > new_max_rank:
new_max_rank = player_list_decode[name]['Rank']
player_list_encode = process_playerlist_encode(
player_list_decode).decode(encoding='GBK')
GlobalVar.runSQL(
"UPDATE userdata SET `roomid` = '0' WHERE `Key` = %s LIMIT 1", sec_key)
GlobalVar.runSQL(
"UPDATE roomlist SET `Rank`=%s,`PlayerNumber` = %s ,`ReadyNumber`= %s,`PlayerList`= %s WHERE `RoomID` = %s LIMIT 1", (new_max_rank, new_player_num, new_ready_num, player_list_encode, room_id))
return True
def get_invietcode(name,code):
check = GlobalVar.runSQL("SELECT * FROM invitecode WHERE `code` = %s and `used` = 0 limit 1", code)
if not check:
return False
GlobalVar.runSQL("UPDATE invitecode SET `used` = 1,`name` = %s WHERE `code` = %s limit 1", (name, code))
return True
@csrf_exempt
def do_register(request):
result = {
'msgType': 'register',
'uFuck': 1,
'success': 0
}
room_config = {
'ico': 'default.jpg',
'title': '菜鸡房间',
'text': '这个人是菜鸡,因为他的房间还是默认的标题和内容!',
'maps': ['de_dust2', 'de_nuke', 'de_mirage', 'de_overpass', 'de_cache', 'de_inferno', 'de_train', 'de_cbble'],
'public': 1
}
data = {
'kill': 0,
'dead': 0,
'first': 0,
'headshot': 0,
'help': 0,
'music': 39227624,
'autoplay': 1,
'matched': [],
'rank': {},
'ico': 'null'
}
if request.method == 'POST':
if True:
if 'Regname' in request.POST and 'Regpass' in request.POST and 'Regemail' in request.POST and 'auth' in request.POST and 'InviteCode' in request.POST:
if not view.auth_send_post(request.POST['auth']):
result['uFuck'] = 6
return get_json(result)
if not re.findall(r'^[0-9a-zA-Z\_\-]+(\.[0-9a-zA-Z\_\-]+)*@[0-9a-zA-Z]+(\.[0-9a-zA-Z]+){1,}$', request.POST['Regemail']):
result['uFuck'] = 2
return get_json(result)
name_proccesed = web_socket.htmlescape(request.POST['Regname'])
if name_proccesed == '你是个好人' or name_proccesed == 'huoji':
result['uFuck'] = 5
return get_json(result)
if len(name_proccesed) > 15:
result['uFuck'] = 7
return get_json(result)
email_proccesed = web_socket.htmlescape(
request.POST['Regemail'])
if not re.search(u'^[_a-zA-Z0-9\u4e00-\u9fa5]+$', name_proccesed):
result['uFuck'] = 5
return get_json(result)
Check = GlobalVar.runSQL(
'SELECT * FROM userdata WHERE username = %s LIMIT 1', (name_proccesed))
if Check:
result['uFuck'] = 3
return get_json(result)
Check = GlobalVar.runSQL(
'SELECT * FROM userdata WHERE email = %s LIMIT 1', (email_proccesed))
if Check:
result['uFuck'] = 4
return get_json(result)
if not get_invietcode(name_proccesed, request.POST['InviteCode']):
result['uFuck'] = 8
return get_json(result)
password = <PASSWORD>_md5(
hash_md5(request.POST['Regpass'].encode(encoding='GBK')).encode(encoding='GBK'))
TheKey = hash_md5(base64.b64encode(
str(
name_proccesed +
email_proccesed
).encode(encoding='GBK')
+ password.encode(encoding='GBK')
))
data_encode = process_playerlist_encode(
data).decode(encoding='GBK')
GlobalVar.runSQL(
'INSERT INTO userdata (`username`,`password`,`email`,`Key`,`roomconfig`,`data`) VALUES (%s,%s,%s,%s,%s,%s)', (name_proccesed, password, email_proccesed, TheKey, process_playerlist_encode(room_config).decode(encoding='GBK'), data_encode))
result['uFuck'] = 0
result['success'] = 1
return get_json(result)
#except:
# return HttpResponse('you mother fuck up')
return get_json(result)
@csrf_exempt
def do_login(request):
result = {
'msgType': 'Login',
'uFuck': 1,
'secKey': 'NULL',
'success': 0
}
if 'logname' in request.POST and 'logpass' in request.POST:
Check = GlobalVar.runSQL(
'SELECT * FROM userdata WHERE username = %s LIMIT 1', (request.POST['logname']))
if not Check:
result['uFuck'] = 2
return get_json(result)
hashed_key = hash_md5(hash_md5(request.POST['logpass'].encode(
encoding='GBK')).encode(encoding='GBK'))
if Check[0][GlobalVar.sql_userdata_password] != hashed_key:
result['uFuck'] = 3
return get_json(result)
if Check[0][GlobalVar.sql_userdata_banned]:
result['uFuck'] = 4
return get_json(result)
result['secKey'] = Check[0][GlobalVar.sql_userdata_Key]
result['uFuck'] = 0
result['success'] = 1
return get_json(result)
return get_json(result)
def process_check_room(key):
Check = process_getdata_by_key(key)
if not Check:
return 0
sec_key = Check[0][GlobalVar.sql_userdata_Key]
player_name = Check[0][GlobalVar.sql_userdata_username]
room_id = Check[0][GlobalVar.sql_userdata_roomid]
banned = Check[0][GlobalVar.sql_userdata_banned]
if room_id == '0':
return 0
if banned == 1:
return 110
Check = GlobalVar.runSQL(
'SELECT * FROM roomlist WHERE `RoomID` = %s LIMIT 1', room_id)
if not Check:
GlobalVar.runSQL(
"UPDATE userdata SET `roomid` = '0' WHERE `Key` = %s LIMIT 1", key)
return 0
return room_id
def do_check_steamid(request):
result = {
'msgType': 'CheckSteamid',
'success': 0
}
if request.GET and 'key' in request.GET:
data = GlobalVar.runSQL('select * from userdata where `SteamID` is null and `Key` = %s limit 1',request.GET['key'])
if not data:
return get_json(result)
result['success'] = 1
return get_json(result)
return get_json(result)
def do_check(request):
result = {
'msgType': 'CheckRoom',
'uFuck': 1,
'RoomID': 'NULL',
'ingame': 0,
'success': 0
}
if request.method != 'GET':
return get_json(result)
try:
if 'key' in request.GET:
result['uFuck'] = 0
result['success'] = 1
result['RoomID'] = 0
room_id = process_check_room(request.GET['key'])
if room_id == 0:
return get_json(result)
elif room_id == 110:
result['uFuck'] = 3
result['RoomID'] = room_id
return get_json(result)
else:
Check = GlobalVar.runSQL(
'SELECT * FROM roomlist WHERE `RoomID` = %s AND `ingame` = 0 LIMIT 1', room_id)
if not Check:
result['ingame'] = 1
result['uFuck'] = 2
result['RoomID'] = room_id
return get_json(result)
except:
result['success'] = 0
return get_json(result)
return get_json(result)
def do_exit(request):
result = {
'msgType': 'ExitRoom',
'uFuck': 1,
'success': 0
}
# 要加try
if True:
if 'key' in request.GET:
result['success'] = process_exit_room(request.GET['key'])
return get_json(result)
# except:
return get_json(result)
def do_join(request):
result = {
'msgType': 'JoinRoom',
'uFuck': 1,
'RoomID': 'NULL',
'success': 0
}
if request.method != 'GET':
return get_json(result)
# 要加try
try:
if 'key' in request.GET and 'create' in request.GET and 'roomid' in request.GET:
if request.GET['create'] != 'true' and request.GET['create'] != 'false':
return get_json(result)
sec_key = request.GET['key']
Check = process_getdata_by_key(sec_key)
if not Check:
return get_json(result)
room_id = process_check_room(sec_key)
if room_id != 0:
result['uFuck'] = 2
result['RoomID'] = room_id
return get_json(result)
player_name = Check[0][GlobalVar.sql_userdata_username]
player_rank = Check[0][GlobalVar.sql_userdata_rank]
player_room_config = Check[0][GlobalVar.sql_userdata_roomconfig]
player_ico = Check[0][GlobalVar.sql_userdata_PlayerInfo]
if Check[0][GlobalVar.sql_userdata_banned]:
result['RoomID'] = '好聪明哦'
return get_json(result)
if request.GET['create'] == 'true':
player_room_config_decode = process_playerlist_decode(
player_room_config)
time_tick = str(time.time()).replace('.', '')
room_id = hash_md5(
str(time_tick + str(player_name)).encode(encoding='GBK'))[0:6]
player_list = process_playerlist_encode({
player_name:
{
'ready': False,
'Rank': player_rank,
'ico': player_ico
}
}).decode(encoding='GBK')
GlobalVar.runSQL(
"UPDATE userdata SET `roomid` = %s WHERE `Key` = %s LIMIT 1", (room_id, sec_key))
GlobalVar.runSQL(
'INSERT INTO roomlist (`RoomID`,`ingame`,`PlayerNumber`,`PlayerList`,`Rank`,`config`,`public`) VALUES (%s,%s,%s,%s,%s,%s,%s)', (room_id, 0, 1, player_list, player_rank, player_room_config, player_room_config_decode['public']))
result['uFuck'] = 0
result['success'] = 1
result['RoomID'] = room_id
# print(result['success'])
return get_json(result)
elif request.GET['create'] == 'false':
room_id = request.GET['roomid']
result['RoomID'] = room_id
Check = GlobalVar.runSQL(
'SELECT * FROM roomlist WHERE `RoomID` = %s LIMIT 1', (room_id))
if not Check:
result['uFuck'] = 3
return get_json(result)
if Check[0][GlobalVar.sql_roomlist_PlayerNumber] >= 5:
result['uFuck'] = 4
return get_json(result)
if Check[0][GlobalVar.sql_roomlist_ingame] == 1:
result['uFuck'] = 6
return get_json(result)
if search_process.check_in_search(Check):
search_process.stop_search(room_id)
result['success'] = 1
GlobalVar.runSQL(
"UPDATE userdata SET `roomid` = %s WHERE `Key` = %s LIMIT 1", (room_id, sec_key))
plyaer_list_decode = process_playerlist_decode(
Check[0][GlobalVar.sql_roomlist_PlayerList])
plyaer_list_decode[player_name] = {
'ready': False,
'Rank': player_rank,
'ico': player_ico
}
new_player_num = Check[0][GlobalVar.sql_roomlist_PlayerNumber] + 1
new_ready_num = Check[0][GlobalVar.sql_roomlist_ReadyNumber] + 1
new_max_rank = 100
for name in plyaer_list_decode:
if plyaer_list_decode[name]['Rank'] > new_max_rank:
new_max_rank = plyaer_list_decode[name]['Rank']
player_list_encode = process_playerlist_encode(
plyaer_list_decode).decode(encoding='GBK')
#print(str((new_max_rank, player_list_encode,
# new_player_num, new_ready_num, room_id)))
GlobalVar.runSQL(
"UPDATE roomlist SET `Rank` = %s ,`PlayerList` = %s,`PlayerNumber`=%s,`ReadyNumber`=%s WHERE `RoomID` = %s LIMIT 1",
(new_max_rank, player_list_encode, new_player_num, new_ready_num, room_id))
result['uFuck'] = 0
result['success'] = 1
result['RoomID'] = room_id
web_socket.send_player_join_room(room_id, player_name)
return get_json(result)
except:
return get_json(result)
@csrf_exempt
def process(request, moudle):
result = HttpResponse()
if moudle in 'register':
result = do_register(request)
if moudle in 'login':
result = do_login(request)
if moudle in 'check_in_room':
result = do_check(request)
if moudle in 'exit_room':
result = do_exit(request)
if moudle in 'join_room':
result = do_join(request)
if moudle in 'check_steamid':
result = do_check_steamid(request)
if moudle in 'resolve_server':
result = server_helper.resolve_server(request)
if moudle in 'get_all_casual_server':
result = server_helper.get_all_casual_server(request)
if moudle in 'get_player':
result = index_player.api_get(request)
if moudle in 'set_music':
result = index_player.api_setmusic(request)
if moudle in 'api_get_by_key':
result = index_player.api_get_byKey(request)
if moudle in 'update_image':
result = index_player.update_image(request)
if moudle in 'match_api':
result = match.main(request)
if moudle in 'get_match':
result = match.get_matched_info(request)
if moudle in 'anticheat':
result = anticheatz.main(request)
return result
|
StarcoderdataPython
|
3489323
|
'''
Common testing environment configuration.
'''
import os
# Default environment configuration file.
DEFAULT_ENVIRONMENT="environment/default.json"
# Default image comparison tolerance.
DEFAULT_TOLERANCE = 1e-12
# Default image test timeout.
DEFAULT_TIMEOUT = 600
IMAGE_TESTS_DIR = "Tests/image_tests"
# Supported image extensions.
IMAGE_EXTENSIONS = ['.png', '.jpg', '.tga', '.bmp', '.pfm', '.exr']
# Suffix to use for error images.
ERROR_IMAGE_SUFFIX = '.error.png'
if os.name == 'nt':
# Build configurations.
BUILD_CONFIGS = {
'ReleaseD3D12': {
'build_dir': 'Bin/x64/Release',
},
'DebugD3D12': {
'build_dir': 'Bin/x64/Debug'
},
'ReleaseVK': {
'build_dir': 'Bin/x64/Release'
},
'DebugVK': {
'build_dir': 'Bin/x64/Debug'
}
}
DEFAULT_BUILD_CONFIG = 'ReleaseD3D12'
SOLUTION_FILE="Falcor.sln"
# Executables.
FALCOR_TEST_EXE = 'FalcorTest.exe'
MOGWAI_EXE = 'Mogwai.exe'
IMAGE_COMPARE_EXE = 'ImageCompare.exe'
else:
raise RuntimeError('Testing is only supported on Windows')
|
StarcoderdataPython
|
8082890
|
# Generated by Django 3.0.6 on 2020-05-27 22:54
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='CustomUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=254, unique=True, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active.Unselect this instead of deleting accounts.', verbose_name='active')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('gender', models.IntegerField(choices=[(1, 'Male'), (2, 'Female'), (3, 'Other')], default=3, verbose_name='gender')),
('bio', models.TextField(blank=True, max_length=500, verbose_name='biography')),
('avatar', models.ImageField(blank=True, upload_to='tmp/avatars', verbose_name='photo')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('skill_1', models.CharField(blank=True, max_length=30, verbose_name='first skill')),
('level_1', models.IntegerField(blank=True, choices=[(1, 'Beginner'), (2, 'Middle'), (3, 'Boss'), (4, 'The Best One')], default=1, verbose_name='first skill level')),
('skill_2', models.CharField(blank=True, max_length=30, verbose_name='second skill')),
('level_2', models.IntegerField(blank=True, choices=[(1, 'Beginner'), (2, 'Middle'), (3, 'Boss'), (4, 'The Best One')], default=1, verbose_name='second skill level')),
('skill_3', models.CharField(blank=True, max_length=30, verbose_name='third skill')),
('level_3', models.IntegerField(blank=True, choices=[(1, 'Beginner'), (2, 'Middle'), (3, 'Boss'), (4, 'The Best One')], default=1, verbose_name='third skill level')),
('skill_4', models.CharField(blank=True, max_length=30, verbose_name='fourth skill')),
('level_4', models.IntegerField(blank=True, choices=[(1, 'Beginner'), (2, 'Middle'), (3, 'Boss'), (4, 'The Best One')], default=1, verbose_name='fourth skill level')),
('plate_img_1', models.ImageField(blank=True, upload_to='tmp/plates', verbose_name='first image')),
('plate_text_1', models.TextField(blank=True, max_length=200, verbose_name='first text')),
('plate_img_2', models.ImageField(blank=True, upload_to='tmp/plates', verbose_name='second image')),
('plate_text_2', models.TextField(blank=True, max_length=200, verbose_name='second text')),
('plate_img_3', models.ImageField(blank=True, upload_to='tmp/plates', verbose_name='third image')),
('plate_text_3', models.TextField(blank=True, max_length=200, verbose_name='third text')),
('plate_img_4', models.ImageField(blank=True, upload_to='tmp/plates', verbose_name='fourth image')),
('plate_text_4', models.TextField(blank=True, max_length=200, verbose_name='fourth text')),
('template', models.IntegerField(choices=[(1, 'Square'), (2, 'Bricks'), (3, 'Ladder'), (4, 'Journal')], default=1, verbose_name='profile template')),
('finished', models.BooleanField(default=False, verbose_name='registration finished')),
('owner', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Code',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', models.CharField(max_length=6, verbose_name='encoded numeric part')),
('sort', models.IntegerField(choices=[(1, 'Personal'), (2, 'Enterprise'), (3, 'Self-Hosted')], verbose_name='code type')),
('code', models.CharField(max_length=8, verbose_name='invitation code')),
('url', models.CharField(max_length=150, verbose_name='invitation link')),
('qr', models.FileField(upload_to='qr_codes', verbose_name='invitation qr code')),
('joined', models.IntegerField(default=0, verbose_name='number of invited people')),
('owner', models.OneToOneField(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='invite_code', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='customuser',
name='invitation_code',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='invited_list', to='main.Code', verbose_name='joined using code'),
),
migrations.AddField(
model_name='customuser',
name='user_permissions',
field=models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions'),
),
]
|
StarcoderdataPython
|
6449985
|
<filename>mapproxy/test/system/test_mapserver.py
# This file is part of the MapProxy project.
# Copyright (C) 2011 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import os
import sys
import stat
import shutil
from io import BytesIO
import pytest
from mapproxy.request.wms import WMS111MapRequest
from mapproxy.compat.image import Image
from mapproxy.test.image import is_png
from mapproxy.test.system import SysTest
pytestmark = pytest.mark.skipif(
sys.platform == "win32", reason="CGI tests not ported for Windows"
)
@pytest.fixture(scope="module")
def config_file():
return "mapserver.yaml"
class TestMapServerCGI(SysTest):
@pytest.fixture(scope="class")
def additional_files(self, base_dir):
shutil.copy(
os.path.join(os.path.dirname(__file__), "fixture", "cgi.py"),
base_dir.strpath,
)
os.chmod(
base_dir.join("cgi.py").strpath, stat.S_IXUSR | stat.S_IRUSR | stat.S_IWUSR
)
base_dir.join("tmp").mkdir()
def setup(self):
self.common_map_req = WMS111MapRequest(
url="/service?",
param=dict(
service="WMS",
version="1.1.1",
bbox="-180,0,0,80",
width="200",
height="200",
layers="ms",
srs="EPSG:4326",
format="image/png",
styles="",
request="GetMap",
),
)
def test_get_map(self, app):
resp = app.get(self.common_map_req)
assert resp.content_type == "image/png"
data = BytesIO(resp.body)
assert is_png(data)
img = Image.open(data)
img = img.convert("RGB")
assert img.getcolors() == [(200 * 200, (255, 0, 0))]
|
StarcoderdataPython
|
8139238
|
<reponame>iribirii/daily_scripts
#!/usr/bin/env python3
# Author: <NAME>
# 01-12-2020
'''
##################
## Description: ##
##################
This script generates gaussian inputs from the selected xyz files.
############
## Usage: ##
############
xyztogjf file 'commands for gaussian'
'''
# Imports
import sys
# Open XYZ file and read it
file = sys.argv[1]
name = file.split('.')[0]
commands = sys.argv[2]
logfile = open(file, 'r')
text = logfile.readlines()
logfile.close()
ofile = open(name+'.gjf', "w+")
ofile.write('%nproc=40\n')
ofile.write('%mem=150GB\n')
ofile.write('# $commands\n')
ofile.write('\n')
ofile.write(file+'\n')
ofile.write('\n')
ofile.write('1 1\n')
for i in text[2:]:
ofile.write(''.join(i))
ofile.write('\n')
ofile.close()
|
StarcoderdataPython
|
6688007
|
<filename>archived/image_transfer/client/client.py
import socket
from PIL import Image
import numpy as np
port = 8001
host = 'localhost'
s = socket.socket()
s.connect((host,port))
imageFolder = "./images/"
imageFormat = ".png"
imageIndex = 0
n = 0
while True:
imagebytes = s.recv(307200*4)
if imagebytes:
print(n)
n += 1
print ("received data")
print (len(imagebytes))
if (len(imagebytes) == 307200*3):
print("saving image")
data = np.frombuffer(imagebytes, dtype=np.uint8)
data.shape = (480,640,3)
filename = imageFolder + str(imageIndex) + imageFormat
imageIndex += 1
if (imageIndex == 20):
imageIndex = 0
img = Image.fromarray(data, 'RGB')
img.show()
img.save(filename)
|
StarcoderdataPython
|
3498697
|
def lucky_sum(a, b, c):
sm = 0
for n in (a, b, c):
if n != 13:
sm += n
else:
break
return sm
def close_far(a, b, c):
a_b_diff = abs(a - b)
a_c_diff = abs(a - c)
b_c_diff = abs(b - c)
return (
(a_b_diff <= 1 and a_c_diff >= 2 and b_c_diff >= 2) !=
(a_c_diff <= 1 and a_b_diff >= 2 and b_c_diff >= 2)
)
def round_sum(a, b, c):
rounded_values = [int(round(num, -1)) for num in (a, b, c)]
return sum(rounded_values)
def no_teen_sum(a, b, c):
nums = (a, b, c)
return sum(fix_teen(n) for n in nums)
def fix_teen(n):
return 0 if n not in (15, 16) and 13 <= n <= 19 else n
def make_bricks(small, big, goal):
big_needed = min(big, goal // 5)
return goal - (big_needed * 5) <= small
def make_chocolate(small, big, goal):
if goal >= 5 * big:
remainder = goal - 5 * big
else:
remainder = goal % 5
if remainder <= small:
return remainder
return -1
|
StarcoderdataPython
|
1741725
|
"""Funções de suporte para Árvore Binária."""
def gerar_nova_sub_arvore(esquerda, direita):
"""Combina ambos os galhos para formar uma nova sub-árvore, se necessário.
Se o nodo a ser removido possui dois filhos, um será ser promovido ao espaço
do que está sendo removido, mas o outro galho não pode ser perdido, então deve
ser adicionado de volta à árvore.
"""
if not esquerda:
# Esquerda está vazia, nova sub-árvore é simplesmente a da direita
esquerda.nivel -= 1
return direita
elif not direita:
# Direita está vazia, nova sub-árvore é simplesmente a da esquerda
direita.nivel -= 1
return esquerda
# Ambos os lados possuem nodos, insere direita na esquerda e retorna esquerda
# como sendo a nova raiz.
esquerda.nivel -= 1
esquerda.adicionar(direita)
return esquerda
|
StarcoderdataPython
|
8118410
|
<reponame>andersfischernielsen/ROS-dependency-checker
#!/usr/bin/env python
#
# Copyright 2017 Fraunhofer Institute for Manufacturing Engineering and Automation (IPA)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement, print_function
import threading
import sys
import rospy
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus, KeyValue
from cob_msgs.msg import AccessPoint
DIAG_NAME = 'Wifi Status (ddwrt)'
WARN_TIME = 30
ERROR_TIME = 60
def wifi_to_diag(msg):
stat = DiagnosticStatus()
stat.name = DIAG_NAME
stat.level = DiagnosticStatus.OK
stat.message = 'OK'
stat.values.append(KeyValue(key='ESSID', value=msg.essid))
stat.values.append(KeyValue(key='Mac Address', value=msg.macaddr))
stat.values.append(KeyValue(key='Signal', value=str(msg.signal)))
stat.values.append(KeyValue(key='Noise', value=str(msg.noise)))
stat.values.append(KeyValue(key='Sig/Noise', value=str(msg.snr)))
stat.values.append(KeyValue(key='Channel', value=str(msg.channel)))
stat.values.append(KeyValue(key='Rate', value=msg.rate))
stat.values.append(KeyValue(key='TX Power', value=msg.tx_power))
stat.values.append(KeyValue(key='Quality', value=str(msg.quality)))
return stat
def mark_diag_stale(diag_stat = None, error = False):
if not diag_stat:
diag_stat = DiagnosticStatus()
diag_stat.message = 'No Updates'
diag_stat.name = DIAG_NAME
else:
diag_stat.message = 'Updates Stale'
diag_stat.level = DiagnosticStatus.WARN
if error:
diag_stat.level = DiagnosticStatus.ERROR
return diag_stat
class WifiMonitor(object):
def __init__(self):
self._mutex = threading.Lock()
self._last_msg = None
self._last_update_time = None
self._start_time = rospy.get_time()
self._diag_pub = rospy.Publisher('/diagnostics', DiagnosticArray, queue_size=50)
self._ddwrt_sub = rospy.Subscriber('ddwrt/accesspoint', AccessPoint, self._cb)
def _cb(self, msg):
with self._mutex:
self._last_msg = msg
self._last_update_time = rospy.get_time()
def publish_stats(self):
with self._mutex:
if self._last_msg:
ddwrt_stat = wifi_to_diag(self._last_msg)
update_diff = rospy.get_time() - self._last_update_time
if update_diff > WARN_TIME:
ddwrt_stat = mark_diag_stale(ddwrt_stat)
if (rospy.get_time() - self._last_update_time) > ERROR_TIME:
ddwrt_stat = mark_diag_stale(ddwrt_stat, True)
ddwrt_stat.values.append(KeyValue(key='Time Since Update', value=str(update_diff)))
else:
error_state = (rospy.get_time() - self._start_time) > ERROR_TIME
ddwrt_stat = mark_diag_stale(None, error_state)
ddwrt_stat.values.append(KeyValue(key='Time Since Update', value="N/A"))
msg = DiagnosticArray()
msg.header.stamp = rospy.get_rostime()
msg.status.append(ddwrt_stat)
self._diag_pub.publish(msg)
if __name__ == '__main__':
try:
rospy.init_node('ddwrt_diag')
except rospy.exceptions.ROSInitException:
print('Wifi monitor is unable to initialize node. Master may not be running.', file=sys.stderr)
sys.exit(2)
wifi_monitor = WifiMonitor()
rate = rospy.Rate(1.0)
try:
while not rospy.is_shutdown():
rate.sleep()
wifi_monitor.publish_stats()
except KeyboardInterrupt:
pass
except Exception, e:
import traceback
traceback.print_exc()
sys.exit(0)
|
StarcoderdataPython
|
3291419
|
<gh_stars>0
"""
Per session GetMute() SetMute() using ISimpleAudioVolume.
"""
from __future__ import print_function
from pycaw.pycaw import AudioUtilities, ISimpleAudioVolume
def main():
sessions = AudioUtilities.GetAllSessions()
for session in sessions:
volume = session._ctl.QueryInterface(ISimpleAudioVolume)
print("volume.GetMute(): %s" % volume.GetMute())
volume.SetMute(1, None)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
8049761
|
from .checker import Checker
template_path = "example/template.json"
conf_valid_path = "example/config_valid.json"
conf_error_path = "example/config_with_error.json"
checker = Checker(template_path)
error = checker(conf_valid_path)
print(error)
error = checker(conf_error_path)
print(error)
|
StarcoderdataPython
|
343176
|
import numpy as np
import sympy as sp
from functools import singledispatch
import FIAT
from FIAT.polynomial_set import mis, form_matrix_product
import gem
from finat.finiteelementbase import FiniteElementBase
from finat.sympy2gem import sympy2gem
class FiatElement(FiniteElementBase):
"""Base class for finite elements for which the tabulation is provided
by FIAT."""
def __init__(self, fiat_element):
super(FiatElement, self).__init__()
self._element = fiat_element
@property
def cell(self):
return self._element.get_reference_element()
@property
def degree(self):
# Requires FIAT.CiarletElement
return self._element.degree()
@property
def formdegree(self):
return self._element.get_formdegree()
def entity_dofs(self):
return self._element.entity_dofs()
def entity_closure_dofs(self):
return self._element.entity_closure_dofs()
def space_dimension(self):
return self._element.space_dimension()
@property
def index_shape(self):
return (self._element.space_dimension(),)
@property
def value_shape(self):
return self._element.value_shape()
def basis_evaluation(self, order, ps, entity=None, coordinate_mapping=None):
'''Return code for evaluating the element at known points on the
reference element.
:param order: return derivatives up to this order.
:param ps: the point set.
:param entity: the cell entity on which to tabulate.
'''
space_dimension = self._element.space_dimension()
value_size = np.prod(self._element.value_shape(), dtype=int)
fiat_result = self._element.tabulate(order, ps.points, entity)
result = {}
# In almost all cases, we have
# self.space_dimension() == self._element.space_dimension()
# But for Bell, FIAT reports 21 basis functions,
# but FInAT only 18 (because there are actually 18
# basis functions, and the additional 3 are for
# dealing with transformations between physical
# and reference space).
index_shape = (self._element.space_dimension(),)
for alpha, fiat_table in fiat_result.items():
if isinstance(fiat_table, Exception):
result[alpha] = gem.Failure(self.index_shape + self.value_shape, fiat_table)
continue
derivative = sum(alpha)
table_roll = fiat_table.reshape(
space_dimension, value_size, len(ps.points)
).transpose(1, 2, 0)
exprs = []
for table in table_roll:
if derivative < self.degree:
point_indices = ps.indices
point_shape = tuple(index.extent for index in point_indices)
exprs.append(gem.partial_indexed(
gem.Literal(table.reshape(point_shape + index_shape)),
point_indices
))
elif derivative == self.degree:
# Make sure numerics satisfies theory
exprs.append(gem.Literal(table[0]))
else:
# Make sure numerics satisfies theory
assert np.allclose(table, 0.0)
exprs.append(gem.Zero(self.index_shape))
if self.value_shape:
beta = self.get_indices()
zeta = self.get_value_indices()
result[alpha] = gem.ComponentTensor(
gem.Indexed(
gem.ListTensor(np.array(
[gem.Indexed(expr, beta) for expr in exprs]
).reshape(self.value_shape)),
zeta),
beta + zeta
)
else:
expr, = exprs
result[alpha] = expr
return result
def point_evaluation(self, order, refcoords, entity=None):
'''Return code for evaluating the element at an arbitrary points on
the reference element.
:param order: return derivatives up to this order.
:param refcoords: GEM expression representing the coordinates
on the reference entity. Its shape must be
a vector with the correct dimension, its
free indices are arbitrary.
:param entity: the cell entity on which to tabulate.
'''
if entity is None:
entity = (self.cell.get_dimension(), 0)
entity_dim, entity_i = entity
# Spatial dimension of the entity
esd = self.cell.construct_subelement(entity_dim).get_spatial_dimension()
assert isinstance(refcoords, gem.Node) and refcoords.shape == (esd,)
# Dispatch on FIAT element class
return point_evaluation(self._element, order, refcoords, (entity_dim, entity_i))
@property
def mapping(self):
mappings = set(self._element.mapping())
if len(mappings) != 1:
return None
else:
result, = mappings
return result
@singledispatch
def point_evaluation(fiat_element, order, refcoords, entity):
raise AssertionError("FIAT element expected!")
@point_evaluation.register(FIAT.FiniteElement)
def point_evaluation_generic(fiat_element, order, refcoords, entity):
# Coordinates on the reference entity (SymPy)
esd, = refcoords.shape
Xi = sp.symbols('X Y Z')[:esd]
space_dimension = fiat_element.space_dimension()
value_size = np.prod(fiat_element.value_shape(), dtype=int)
fiat_result = fiat_element.tabulate(order, [Xi], entity)
result = {}
for alpha, fiat_table in fiat_result.items():
if isinstance(fiat_table, Exception):
result[alpha] = gem.Failure((space_dimension,) + fiat_element.value_shape(), fiat_table)
continue
# Convert SymPy expression to GEM
mapper = gem.node.Memoizer(sympy2gem)
mapper.bindings = {s: gem.Indexed(refcoords, (i,))
for i, s in enumerate(Xi)}
gem_table = np.vectorize(mapper)(fiat_table)
table_roll = gem_table.reshape(space_dimension, value_size).transpose()
exprs = []
for table in table_roll:
exprs.append(gem.ListTensor(table.reshape(space_dimension)))
if fiat_element.value_shape():
beta = (gem.Index(extent=space_dimension),)
zeta = tuple(gem.Index(extent=d)
for d in fiat_element.value_shape())
result[alpha] = gem.ComponentTensor(
gem.Indexed(
gem.ListTensor(np.array(
[gem.Indexed(expr, beta) for expr in exprs]
).reshape(fiat_element.value_shape())),
zeta),
beta + zeta
)
else:
expr, = exprs
result[alpha] = expr
return result
@point_evaluation.register(FIAT.CiarletElement)
def point_evaluation_ciarlet(fiat_element, order, refcoords, entity):
# Coordinates on the reference entity (SymPy)
esd, = refcoords.shape
Xi = sp.symbols('X Y Z')[:esd]
# Coordinates on the reference cell
cell = fiat_element.get_reference_element()
X = cell.get_entity_transform(*entity)(Xi)
# Evaluate expansion set at SymPy point
poly_set = fiat_element.get_nodal_basis()
degree = poly_set.get_embedded_degree()
base_values = poly_set.get_expansion_set().tabulate(degree, [X])
m = len(base_values)
assert base_values.shape == (m, 1)
base_values_sympy = np.array(list(base_values.flat))
# Find constant polynomials
def is_const(expr):
try:
float(expr)
return True
except TypeError:
return False
const_mask = np.array(list(map(is_const, base_values_sympy)))
# Convert SymPy expression to GEM
mapper = gem.node.Memoizer(sympy2gem)
mapper.bindings = {s: gem.Indexed(refcoords, (i,))
for i, s in enumerate(Xi)}
base_values = gem.ListTensor(list(map(mapper, base_values.flat)))
# Populate result dict, creating precomputed coefficient
# matrices for each derivative tuple.
result = {}
for i in range(order + 1):
for alpha in mis(cell.get_spatial_dimension(), i):
D = form_matrix_product(poly_set.get_dmats(), alpha)
table = np.dot(poly_set.get_coeffs(), np.transpose(D))
assert table.shape[-1] == m
zerocols = np.isclose(abs(table).max(axis=tuple(range(table.ndim - 1))), 0.0)
if all(np.logical_or(const_mask, zerocols)):
vals = base_values_sympy[const_mask]
result[alpha] = gem.Literal(table[..., const_mask].dot(vals))
else:
beta = tuple(gem.Index() for s in table.shape[:-1])
k = gem.Index()
result[alpha] = gem.ComponentTensor(
gem.IndexSum(
gem.Product(gem.Indexed(gem.Literal(table), beta + (k,)),
gem.Indexed(base_values, (k,))),
(k,)
),
beta
)
return result
class Regge(FiatElement): # naturally tensor valued
def __init__(self, cell, degree):
super(Regge, self).__init__(FIAT.Regge(cell, degree))
class HellanHerrmannJohnson(FiatElement): # symmetric matrix valued
def __init__(self, cell, degree):
super(HellanHerrmannJohnson, self).__init__(FIAT.HellanHerrmannJohnson(cell, degree))
class ScalarFiatElement(FiatElement):
@property
def value_shape(self):
return ()
class Bubble(ScalarFiatElement):
def __init__(self, cell, degree):
super(Bubble, self).__init__(FIAT.Bubble(cell, degree))
class FacetBubble(ScalarFiatElement):
def __init__(self, cell, degree):
super(FacetBubble, self).__init__(FIAT.FacetBubble(cell, degree))
class CrouzeixRaviart(ScalarFiatElement):
def __init__(self, cell, degree):
super(CrouzeixRaviart, self).__init__(FIAT.CrouzeixRaviart(cell, degree))
class Lagrange(ScalarFiatElement):
def __init__(self, cell, degree):
super(Lagrange, self).__init__(FIAT.Lagrange(cell, degree))
class DiscontinuousLagrange(ScalarFiatElement):
def __init__(self, cell, degree):
super(DiscontinuousLagrange, self).__init__(FIAT.DiscontinuousLagrange(cell, degree))
class Serendipity(ScalarFiatElement):
def __init__(self, cell, degree):
super(Serendipity, self).__init__(FIAT.Serendipity(cell, degree))
class DPC(ScalarFiatElement):
def __init__(self, cell, degree):
super(DPC, self).__init__(FIAT.DPC(cell, degree))
class DiscontinuousTaylor(ScalarFiatElement):
def __init__(self, cell, degree):
super(DiscontinuousTaylor, self).__init__(FIAT.DiscontinuousTaylor(cell, degree))
class VectorFiatElement(FiatElement):
@property
def value_shape(self):
return (self.cell.get_spatial_dimension(),)
class RaviartThomas(VectorFiatElement):
def __init__(self, cell, degree):
super(RaviartThomas, self).__init__(FIAT.RaviartThomas(cell, degree))
class BrezziDouglasMarini(VectorFiatElement):
def __init__(self, cell, degree):
super(BrezziDouglasMarini, self).__init__(FIAT.BrezziDouglasMarini(cell, degree))
class BrezziDouglasFortinMarini(VectorFiatElement):
def __init__(self, cell, degree):
super(BrezziDouglasFortinMarini, self).__init__(FIAT.BrezziDouglasFortinMarini(cell, degree))
class Nedelec(VectorFiatElement):
def __init__(self, cell, degree):
super(Nedelec, self).__init__(FIAT.Nedelec(cell, degree))
class NedelecSecondKind(VectorFiatElement):
def __init__(self, cell, degree):
super(NedelecSecondKind, self).__init__(FIAT.NedelecSecondKind(cell, degree))
|
StarcoderdataPython
|
5185636
|
<reponame>hiraksarkar/BioBombe<filename>10.gene-expression-signatures/scripts/nbconverted/2.investigate-sex-signature-genes.py
#!/usr/bin/env python
# coding: utf-8
# # Investigating Sex Signature Features
#
# **<NAME>, 2019**
# In[1]:
import os
import sys
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
from sklearn import preprocessing
sys.path.append("../8.gtex-interpret")
from scripts.utils import load_weight_matrix, apply_signature
# In[2]:
np.random.seed(123)
# In[3]:
get_ipython().run_line_magic('matplotlib', 'inline')
# ## Load and Process Gene Dictionary
# In[4]:
# Load curated gene names from versioned resource
commit = '<PASSWORD>'
url = 'https://raw.githubusercontent.com/cognoma/genes/{}/data/genes.tsv'.format(commit)
gene_df = pd.read_table(url)
symbol_to_entrez = dict(zip(gene_df.symbol,
gene_df.entrez_gene_id))
gene_df = gene_df.dropna(axis='rows', subset=['synonyms'])
gene_df.synonyms = gene_df.synonyms.str.split('|')
all_syn = (
gene_df.apply(lambda x: pd.Series(x.synonyms), axis=1)
.stack()
.reset_index(level=1, drop=True)
)
# Name the synonym series and join with rest of genes
all_syn.name = 'all_synonyms'
gene_with_syn_df = gene_df.join(all_syn)
# Remove rows that have redundant symbols in all_synonyms
gene_with_syn_df = (
gene_with_syn_df
# Drop synonyms that are duplicated - can't be sure of mapping
.drop_duplicates(['all_synonyms'], keep=False)
# Drop rows in which the symbol appears in the list of synonyms
.query('symbol not in all_synonyms')
)
# Create a synonym to entrez mapping and add to dictionary
synonym_to_entrez = dict(zip(gene_with_syn_df.all_synonyms,
gene_with_syn_df.entrez_gene_id))
symbol_to_entrez.update(synonym_to_entrez)
# Load gene updater
url = 'https://raw.githubusercontent.com/cognoma/genes/{}/data/updater.tsv'.format(commit)
updater_df = pd.read_table(url)
old_to_new_entrez = dict(zip(updater_df.old_entrez_gene_id,
updater_df.new_entrez_gene_id))
gene_df.entrez_gene_id = gene_df.entrez_gene_id.astype(str)
print(gene_df.shape)
gene_df.head()
# ## Load and Proces Sex Genes
#
# Using the resource `Sex-Associated Gene Database` (SAGD) ([Shi et al. 2018](https://doi.org/10.1093/nar/gky1040))
#
# Downloading from http://bioinfo.life.hust.edu.cn/SAGD#!/browse_gene
#
# Selecting human species, all tissues, all stages. The downloaded file is included in this repo.
# In[5]:
sex_gene_file = os.path.join("download", "browse_gene_9606.csv")
sex_gene_df = pd.read_csv(sex_gene_file)
# Translate the symbol column to entrez_gene_id
sex_gene_map = sex_gene_df.Symbol.replace(symbol_to_entrez)
sex_gene_map = sex_gene_map.replace(old_to_new_entrez)
sex_gene_df = sex_gene_df.assign(entrez_gene_id=sex_gene_map)
# Median collapse duplicate gene IDs across SAGD groups
sex_gene_df = (
sex_gene_df
.groupby(["Species", "Symbol", "entrez_gene_id"])
.median()
.sort_values(by="Padj")
.reset_index()
)
sex_gene_df.entrez_gene_id = sex_gene_df.entrez_gene_id.astype(str)
sex_gene_df = sex_gene_df.assign(neg_log_p=-1 * np.log10(sex_gene_df.Padj + 1e-300))
print(sex_gene_df.shape)
sex_gene_df.head()
# In[6]:
sex_gene_df.neg_log_p.hist(bins=100)
# ## Load Sex Signatures
# In[7]:
gtex_seed = '451283'
gtex_k = 200
gtex_feature = "nmf_111"
# In[8]:
# Load the gtex weight matrix containing the best sex feature
gtex_weight_df = (
load_weight_matrix(dataset='GTEX',
z_dim=gtex_k,
seed=gtex_seed)
.reset_index()
)
gtex_weight_df.gene_id = gtex_weight_df.gene_id.astype(str)
gtex_weight_df.head()
# In[9]:
# Align the weight matrix to the Sex Gene Database
gtex_sex_feature = (
gtex_weight_df
.merge(gene_df,
left_on="gene_id",
right_on="entrez_gene_id",
how="left")
.loc[:, gene_df.columns.tolist() + [gtex_feature]]
.assign(abs_value_feature = gtex_weight_df.loc[:, gtex_feature].abs().tolist())
)
gtex_sex_feature.entrez_gene_id = gtex_sex_feature.entrez_gene_id.astype(str)
gtex_sex_feature = (
gtex_sex_feature
.merge(sex_gene_df,
left_on="entrez_gene_id",
right_on="entrez_gene_id",
how="left")
.sort_values(by="abs_value_feature", ascending=False)
.dropna(subset=["entrez_gene_id", "Padj"])
.reset_index(drop=True)
)
print(gtex_sex_feature.shape)
# Show the top 10 genes
gtex_sex_feature.head(10)
# In[10]:
gtex_sex_feature.plot(kind="scatter", x=gtex_feature, y="neg_log_p")
# ## TCGA Sex Signature
# In[11]:
tcga_seed = '165158'
tcga_k = 200
tcga_feature = "ica_151"
# In[12]:
# Load the TCGA weight matrix containing the best sex feature
tcga_weight_df = (
load_weight_matrix(dataset='TCGA',
z_dim=tcga_k,
seed=tcga_seed)
.reset_index()
)
tcga_weight_df.gene_id = tcga_weight_df.gene_id.astype(str)
tcga_weight_df.head()
# In[13]:
# Align the weight matrix to the Sex Gene Database
tcga_sex_feature = (
tcga_weight_df
.merge(gene_df,
left_on="gene_id",
right_on="entrez_gene_id",
how="left")
.loc[:, gene_df.columns.tolist() + [tcga_feature]]
.assign(abs_value_feature = tcga_weight_df.loc[:, tcga_feature].abs().tolist())
)
tcga_sex_feature.entrez_gene_id = tcga_sex_feature.entrez_gene_id.astype(str)
tcga_sex_feature = (
tcga_sex_feature
.merge(sex_gene_df,
left_on="entrez_gene_id",
right_on="entrez_gene_id",
how="left")
.sort_values(by="abs_value_feature", ascending=False)
.dropna(subset=["entrez_gene_id", "Padj"])
.reset_index(drop=True)
)
print(tcga_sex_feature.shape)
tcga_sex_feature.head(20)
# In[14]:
tcga_sex_feature.plot(kind="scatter", x=tcga_feature, y="neg_log_p")
|
StarcoderdataPython
|
1830643
|
"""
A module to encapsulate the user experience logic
"""
from __future__ import with_statement
import os
import re
import subprocess
import sys
import time
import traceback
import chalk
from twisted.logger import globalLogPublisher
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
from hendrix.contrib import SettingsError
from hendrix.deploy import base, cache
from hendrix.logger import hendrixObserver
from hendrix.mechanics.concurrency.exceptions import RedisException
from hendrix.options import cleanOptions
from .options import HendrixOptionParser
try:
from tiempo.conn import REDIS
from tiempo.locks import lock_factory
redis_available = True
except ImportError:
redis_available = False
class Reload(FileSystemEventHandler):
def __init__(self, options, *args, **kwargs):
super(Reload, self).__init__(*args, **kwargs)
self.reload, self.options = cleanOptions(options)
if not self.reload:
raise RuntimeError(
'Reload should not be run if --reload has not been passed to '
'the command as an option.'
)
self.process = subprocess.Popen(
['hx', 'start_reload'] + self.options
)
def on_any_event(self, event):
if event.is_directory:
return
ext = os.path.splitext(event.src_path)[1]
if ext == '.py':
self.process = self.restart()
chalk.eraser()
chalk.yellow("Detected changes, restarting...")
def restart(self):
self.process.kill()
process = subprocess.Popen(
['hx', 'start_reload'] + self.options
)
return process
def hendrixLauncher(action, options, with_tiempo=False):
"""
Decides which version of HendrixDeploy to use and then
launches it.
"""
if options['key'] and options['cert'] and options['cache']:
from hendrix.deploy import hybrid
HendrixDeploy = hybrid.HendrixDeployHybrid
elif options['key'] and options['cert']:
from hendrix.deploy import tls
HendrixDeploy = tls.HendrixDeployTLS
elif options['cache']:
HendrixDeploy = cache.HendrixDeployCache
else:
HendrixDeploy = base.HendrixDeploy
if with_tiempo:
deploy = HendrixDeploy(action='start', options=options)
deploy.run()
else:
deploy = HendrixDeploy(action, options)
deploy.run()
def assignDeploymentInstance(action, options):
try:
hendrixLauncher(action, options)
except Exception as e:
tb = sys.exc_info()[2]
msg = traceback.format_exc(tb)
chalk.red(msg, pipe=sys.stderr)
os._exit(1)
def logReload(options):
"""
encompasses all the logic for reloading observer.
"""
event_handler = Reload(options)
observer = Observer()
observer.schedule(event_handler, path='.', recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
pid = os.getpid()
chalk.eraser()
chalk.green('\nHendrix successfully closed.')
os.kill(pid, 15)
observer.join()
exit('\n')
def launch(*args, **options):
"""
launch acts on the user specified action and options by executing
Hedrix.run
"""
action = args[0]
if options['reload']:
logReload(options)
else:
assignDeploymentInstance(action, options)
def findSettingsModule():
"Find the settings module dot path within django's manage.py file"
try:
with open('manage.py', 'r') as manage:
manage_contents = manage.read()
search = re.search(
r"([\"\'](?P<module>[a-z\.]+)[\"\'])", manage_contents
)
if search: # django version < 1.7
settings_mod = search.group("module")
else:
# in 1.7, manage.py settings declaration looks like:
# os.environ.setdefault(
# "DJANGO_SETTINGS_MODULE", "example_app.settings"
# )
search = re.search(
"\".*?\"(,\\s)??\"(?P<module>.*?)\"\\)$",
manage_contents, re.I | re.S | re.M
)
settings_mod = search.group("module")
os.environ.setdefault('DJANGO_SETTINGS_MODULE', settings_mod)
except IOError as e:
msg = (
str(e) + '\nPlease ensure that you are in the same directory '
'as django\'s "manage.py" file.'
)
raise IOError(chalk.red(msg), None, sys.exc_info()[2])
except AttributeError:
settings_mod = ''
return settings_mod
def djangoVsWsgi(options):
# settings logic
if not options['wsgi']:
settings_mod = findSettingsModule()
user_settings = options['settings']
if not settings_mod and not user_settings:
msg = (
'\nEither specify:\n--settings mysettings.dot.path\nOR\n'
'export DJANGO_SETTINGS_MODULE="mysettings.dot.path"\nOR\n'
'in your manage.py file specify '
'os.environ.setdefault("DJANGO_SETTINGS_MODULE", '
'"mysettings.dot.path")'
)
raise SettingsError(chalk.red(msg), None, sys.exc_info()[2])
elif user_settings:
# if the user specified the settings to use then these take
# precedence
options['settings'] = user_settings
elif settings_mod:
options['settings'] = settings_mod
else:
try:
base.HendrixDeploy.importWSGI(options['wsgi'])
except ImportError:
raise ImportError("The path '%s' does not exist" % options['wsgi'])
return options
def exposeProject(options):
# sys.path logic
if options['pythonpath']:
project_path = options['pythonpath']
if not os.path.exists(project_path):
raise IOError("The path '%s' does not exist" % project_path)
sys.path.append(project_path)
else:
sys.path.append(os.getcwd())
def devFriendly(options):
# if the dev option is given then also set reload to true
# note that clean options will remove reload so to honor that we use get
# in the second part of the conditional
dev_mode = options['dev']
options['reload'] = True if dev_mode else options.get('reload', False)
options['loud'] = True if dev_mode else options['loud']
return options
def noiseControl(options):
# terminal noise/info logic
# allows the specification of the log file location
if not options['loud']:
log_path = options['log']
globalLogPublisher.addObserver(hendrixObserver(log_path))
return None
def subprocessLaunch():
"""
This function is called by the hxw script.
It takes no arguments, and returns an instance of HendrixDeploy
"""
if not redis_available:
raise RedisException("can't launch this subprocess without tiempo/redis.")
try:
action = 'start'
options = REDIS.get('worker_args')
assignDeploymentInstance(action='start', options=options)
except Exception:
chalk.red('\n Encountered an unhandled exception while trying to %s hendrix.\n' % action, pipe=sys.stderr)
raise
def main(args=None):
"The function to execute when running hx"
if args is None:
args = sys.argv[1:]
options, args = HendrixOptionParser.parse_args(args)
options = vars(options)
try:
action = args[0]
except IndexError:
HendrixOptionParser.print_help()
return
exposeProject(options)
options = djangoVsWsgi(options)
options = devFriendly(options)
redirect = noiseControl(options)
try:
launch(*args, **options)
except Exception:
chalk.red('\n Encountered an unhandled exception while trying to %s hendrix.\n' % action, pipe=sys.stderr)
raise
|
StarcoderdataPython
|
3419169
|
<reponame>dutxubo/nni
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import pycuda.driver as cuda
import pycuda.autoinit # pylint: disable=unused-import
import tensorrt as trt
EXPLICIT_BATCH = 1
def GiB(val):
return val * 1 << 30
# Simple helper data class that's a little nicer to use than a 2-tuple.
class HostDeviceMem(object):
def __init__(self, host_mem, device_mem):
"""
This function builds an engine from an onnx model with calibration process.
Parameters
----------
host_mem : host memory
Memory buffers of host
device_mem : device memory
Memory buffers of device
"""
self.host = host_mem
self.device = device_mem
def __str__(self):
return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
def __repr__(self):
return self.__str__()
def allocate_buffers(engine):
"""
Allocates all buffers required for an engine, i.e. host/device inputs/outputs.
Parameters
----------
engine : tensorrt.ICudaEngine
An ICudaEngine for executing inference on a built network
Returns
-------
list
All input HostDeviceMem of an engine
list
All output HostDeviceMem of an engine
GPU bindings
Device bindings
GPU stream
A stream is a sequence of commands (possibly issued by different host threads) that execute in order
"""
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
for binding in engine:
size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
dtype = trt.nptype(engine.get_binding_dtype(binding))
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
return inputs, outputs, bindings, stream
# This function is generalized for multiple inputs/outputs for full dimension networks.
# inputs and outputs are expected to be lists of HostDeviceMem objects.
def do_inference_v2(context, bindings, inputs, outputs, stream):
# Transfer input data to the GPU.
[cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
# Run inference.
context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
# Transfer predictions back from the GPU.
[cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
# Synchronize the stream
stream.synchronize()
# Return only the host outputs.
return [out.host for out in outputs]
|
StarcoderdataPython
|
4804528
|
<reponame>KOSMAsubmm/kosma_gildas_dlc<gh_stars>0
from sicparse import OptionParser
#
import numpy as np
import matplotlib
#if options.plot != "interactive":
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys
from scipy.optimize import leastsq, least_squares
import time
#
from scipy.interpolate import LSQUnivariateSpline,UnivariateSpline
import os
from scipy.optimize import leastsq
from scipy import interpolate
from datetime import datetime
import pandas as pd
# least_squares
import pyclass
# create logger
import logging
from sys import getsizeof
import pickle
#
import signal
import re
import pickle
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.tick_params(axis='both', which='minor', labelsize=12)
plt.tick_params(axis='both', which='major', labelsize=16)
def emit_colored_ansi(fn):
def new(*args):
levelno = args[1].levelno
if(levelno >= 50):
color = '\x1b[31m' # red
elif(levelno >= 40):
color = '\x1b[31m' # red
elif(levelno >= 30):
color = '\x1b[33m' # yellow
elif(levelno >= 20):
color = '\x1b[32m' # green
elif(levelno >= 10):
color = '\x1b[35m' # pink
else:
color = '\x1b[0m' # normal
args[1].levelname = color + args[1].levelname + '\x1b[0m' # normal
return fn(*args)
return new
def setup_logging(log_name="spline_fit",log_filename='/tmp/spline_fit.log',level="info"):
#
log_lookup = {}
log_lookup['info'] = logging.INFO
log_lookup['debug'] = logging.DEBUG
log_lookup['warning'] = logging.WARNING
log_lookup['error'] = logging.ERROR
#
module_logger = logging.getLogger('spline_fit')
# check if handlers are already present
if len(module_logger.handlers)>=2:
return module_logger
#
module_logger.setLevel(log_lookup[level])
# create file handler which logs even debug messages
fh = logging.FileHandler(log_filename)
fh.setLevel(log_lookup[level])
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(log_lookup[level])
# create formatter and add it to the handlers
#formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
#log_format = "%(asctime)s: [%(levelname)s: %(filename)s:%(lineno)s - %(funcName)s() ] %(message)s"
#log_format = "%(asctime)s: [%(levelname)s: %(filename)s:%(lineno)s - %(funcName)s() ] %(message)s"
log_format = "[%(levelname)s]: %(filename)s:%(lineno)s - %(funcName)s() %(message)s"
formatter = logging.Formatter(log_format)
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logging.StreamHandler.emit = emit_colored_ansi(logging.StreamHandler.emit)
# add the handlers to the logger
module_logger.addHandler(fh)
module_logger.addHandler(ch)
return module_logger
def error_func(p,x,y,spline):
return p*spline(x)-y
def error_func_offset(p,x,y,spline):
return p[1]+p[0]*spline(x)-y
# fit a shift in the if frequency
def error_func_shift(p,x,y,spline):
return p[0]*spline(x+p[1])-y
class Spectrum():
def __init__(self, class_env=True, kalibrate_env=False):
self.log = logging.getLogger('spline_fit')
if class_env:
self.import_class()
self.get_class_windows()
elif kalibrate_env:
self.import_kalibrate()
#
self.generate_if_freq_axis()
self.generate_windows_mask()
def import_class(self):
#
self.log.debug("generating spectrum container")
self.intensity = pyclass.gdict.ry.__sicdata__.copy()
self.x = pyclass.gdict.rx.__sicdata__.copy()
self.fres = pyclass.gdict.r.head.spe.fres.__sicdata__.tolist()
self.vres = pyclass.gdict.r.head.spe.vres.__sicdata__.tolist()
self.ref_channel = pyclass.gdict.r.head.spe.rchan.__sicdata__.tolist()
self.channels = pyclass.gdict.channels.__sicdata__.tolist()
self.xmin = pyclass.gdict.user_xmin.__sicdata__.tolist()
self.xmax = pyclass.gdict.user_xmax.__sicdata__.tolist()
self.lower_axis_unit = pyclass.gdict.set.las.unit.__sicdata__.tolist()
self.number = pyclass.gdict.number.__sicdata__.tolist()
self.telescope = pyclass.gdict.telescope.tolist()
self.scan = pyclass.gdict.scan.tolist()
self.subscan = pyclass.gdict.subscan.tolist()
self.line = pyclass.gdict.line.tolist()
self.source = pyclass.gdict.source.tolist()
self.bad = pyclass.gdict.r.head.spe.bad.tolist()
self.intensity_nan = self.intensity
self.intensity_nan[self.intensity==self.bad]=float('nan')
# check if user section there
if hasattr(pyclass.gdict.r, 'user'):
self.mission_id = pyclass.gdict.r.user.sofia.mission_id.tolist()
self.aot_id = pyclass.gdict.r.user.sofia.aot_id.tolist()
self.aor_id = pyclass.gdict.r.user.sofia.aor_id.tolist()
self.processing_steps = pyclass.gdict.r.user.sofia.processing_steps.tolist()
else:
self.log.warning("no user section, using defaults values for user section")
self.mission_id = "no user section"
self.aot_id = "no user section"
self.aor_id = "no user section"
self.processing_steps = "no user section"
def generate_if_freq_axis(self):
#
self.if_freq = np.abs(np.arange(len(self.intensity))*self.fres + self.fres/2)
def get_class_windows(self):
#
self.log.debug('getting class windows')
lower_range = pyclass.gdict.set.las.wind1.__sicdata__
upper_range = pyclass.gdict.set.las.wind2.__sicdata__
self.log.debug('got class windows')
#
self.windows = [window for window in zip(lower_range, upper_range) if window != (0.0,0.0)]
def generate_windows_mask(self):
windows_index = []
for window in self.windows:
window_index = ((self.x > min(window)) & (self.x < max(window)))
windows_index.append(list(window_index))
# mask for bad channels
windows_index.append((np.isnan(self.intensity_nan)))
#windows_index.append((self.intensity_nan))
#
self.crop_edge_if_range = 200.0
windows_index.append(((self.if_freq<self.crop_edge_if_range) | (self.if_freq>(max(self.if_freq)-self.crop_edge_if_range))))
#windows_index.append(((self.if_freq>self.crop_edge_if_range) | (self.if_freq<(max(self.if_freq)-self.crop_edge_if_range))))
# combime masks
self.mask = np.column_stack(tuple(windows_index)).any(axis=1)
class SplineFit:
def __init__(self,
spectrum,
spline_nodes=50,
fit_data_smoothness = 250,
fit_spline = False,
spline_catalog_filename=None,
store_spline_in_archive=False,
scale_spline_from_archive=False,
plot="none",
search_scan_range = 5,
output_plots_path = "./plots",
fit_type = "scale_spline"
):
#
#
self.crop_edge_if_range = 100.0 # ignore +/- if band edges
self.plot = plot
self.spline_nodes = spline_nodes
self.spectrum = spectrum
self.output_path = output_plots_path
self.fit_spline = fit_spline
self.fit_data_smoothness =fit_data_smoothness
self.spline_catalog_filename = spline_catalog_filename
self.store_spline_in_archive = store_spline_in_archive
self.scale_spline_from_archive = scale_spline_from_archive
self.search_scan_range = search_scan_range
self.plot_tag = options.plot_name_tag
self.fit_type = fit_type
self.catalog_columns = [
"telescope",
"scan",
"subscan",
"source",
"line",
"mission_id",
"aot_id",
"aor_id",
"processing_steps",
"spline",
"spline_type"
]
self.log = logging.getLogger('spline_fit')
if not os.path.exists(os.path.abspath(os.path.dirname(spline_catalog_filename))):
msg ="output spline folder not found {0}".format(os.path.dirname(spline_catalog_filename))
self.log.error(msg)
pyclass.message(pyclass.seve.e, "Spline fit", msg)
sys.exit()
if not isinstance(spectrum,Spectrum):
self.log.error("Input spectrum not a spectrum class")
return
#
def __call__(self):
#
self.crop_spectra()
self.smooth_spectrum()
self.load_catalog()
#
if self.scale_spline_from_archive:
self.find_best_match_spline_from_archive()
elif self.fit_spline:
try:
self.fit_fixed_node_spline()
except:
self.log.error("fixed node fit failed for {0}".format(self.spectrum.number))
#try:
# self.fit_variable_node_spline()
#except:
# self.log.error("variable node fit failed for {0}".format(self.spectrum.number))
#
def crop_spectra(self):
#
self.log.debug("cropping spectra")
#print np.where()
self.crop_index, = np.where((self.spectrum.if_freq<self.crop_edge_if_range) | (self.spectrum.if_freq>(max(self.spectrum.if_freq)-self.crop_edge_if_range)))
#self.spectrum.intensity[self.crop_index] = self.spectrum.bad
def smooth_spectrum(self):
if any(pyclass.gdict.rx):
self.log.debug("smoothing input spectra using class smooth routine")
if self.fit_data_smoothness == 0:
pyclass.comm("smooth box {0}".format(self.fit_data_smoothness))
self.intensity_smooth = pyclass.gdict.ry.copy()
smooth_fres = pyclass.gdict.r.head.spe.fres.__sicdata__.tolist()
self.if_freq_smooth = np.abs(np.arange(len(self.intensity_smooth))*smooth_fres + smooth_fres/2)
self.x_smooth = pyclass.gdict.rx.copy()
pyclass.comm("get number")
else:
self.log.debug("smoothing input spectra using numpy smooth routine")
self.if_freq_smooth, self.intensity_smooth = smooth(self.spectrum.if_freq,
self.spectrum.intensity,
self.fit_data_smoothness,
True,
self.spectrum.bad)
self.x_smooth, nan = smooth(self.spectrum.x,
self.spectrum.intensity,
self.fit_data_smoothness,
True,
self.spectrum.bad)
#
def generate_flat_spline(self):
channel_nb = 4000
steps=100
edge = 100
self.spline = LSQUnivariateSpline(x = np.arange(channel_nb),
y = np.ones(channel_nb),
t = np.arange(edge,channel_nb-edge,steps),
k = 3)
return self.store_spline_in_catalog(self.spline, spline_type="flat",return_row_only=True)
def fit_fixed_node_spline(self):
self.log.info("starting fit for spectra number: {0.spectrum.number},"\
"spline nodes: {0.spline_nodes}, "\
"smooth box: {0.fit_data_smoothness}".format(self))
node_channels = np.arange(3,self.spline_nodes-3)*\
((max(self.spectrum.if_freq)-min(self.spectrum.if_freq))/self.spline_nodes)
weight = []
weight.append(self.intensity_smooth==self.spectrum.bad)
weight.append(np.isnan(self.intensity_smooth))
w = np.column_stack(tuple(weight)).any(axis=1)
# check for n
#
# selected valid nodes positions
valid_nodes = np.where((np.min(self.if_freq_smooth[~w]) < node_channels) & (node_channels < np.max(self.if_freq_smooth[~w])))
#
self.spline_fixed = LSQUnivariateSpline(x=self.if_freq_smooth[~w],
y=self.intensity_smooth[~w],
t=node_channels[valid_nodes],
k=3)
#t=node_channels[valid_nodes],k=3)
self.spline_fit_fixed = self.spline_fixed(self.spectrum.if_freq)
self.spline_fit_fixed[self.crop_index] = self.spectrum.bad
#
#
self.spectrum.best_fit_spline = self.spline_fit_fixed
self.spectrum.corrected_intensity = self.spectrum.intensity_nan - self.spectrum.best_fit_spline
#
if self.store_spline_in_archive:
self.store_spline_in_catalog(self.spline_fixed, spline_type="fixed_grid")
def fit_variable_node_spline(self):
self.log.info("starting variable spline fit for spectra number: {0.spectrum.number}".format(self))
w = np.isnan(self.intensity_smooth)
bad = self.spectrum.intensity==self.spectrum.bad
self.spline_variable = UnivariateSpline(self.if_freq_smooth[~w],
self.intensity_smooth[~w])
self.spline_variable.set_smoothing_factor(0.1)
#
self.spline_fit_variable = self.spline_variable(self.spectrum.if_freq)
self.spline_fit_variable[self.crop_index] = self.spectrum.bad
self.spectrum.best_fit_spline = self.spline_fit_variable
self.spectrum.corrected_intensity = self.spectrum.intensity_nan - self.spectrum.best_fit_spline
#
if self.store_spline_in_archive:
self.store_spline_in_catalog(self.spline_fixed, spline_type="variable_grid")
def plot_spline_fit(self,fit,show=True,save="save_best"):
if type(fit) is pd.core.frame.DataFrame:
fit = fit.iloc[-1,:]
self.log.warning("two entries found for index: {0.name} plot last one".format(fit))
self.best_fit = fit
spline = fit.spline
scale_factor = fit.scale_factor
shift_factor = fit.shift_factor
shift_offset = fit.shift_offset
#
#fig,(ax_data,ax_res) = plt.subplots(2,1,figsize=(15,7))
fig = plt.figure(figsize=(10,5))
ax_data = fig.add_subplot(211)
ax_res = fig.add_subplot(212)
bad_data = self.spectrum.intensity==self.spectrum.bad
#
ax_data.plot(self.spectrum.x, self.spectrum.intensity_nan,lw=1, label="data")
ax_data.plot(self.spectrum.x[~self.spectrum.mask], self.spectrum.intensity_nan[~self.spectrum.mask], 'g',lw=1, label="data used for fit")
nan,smooth_spectra = smooth(self.spectrum.x,self.spectrum.intensity_nan,self.fit_data_smoothness,False,self.spectrum.bad)
ax_data.plot(self.spectrum.x, smooth_spectra, 'b',lw=1, label="")
#
ax_data.plot(self.spectrum.x, shift_offset + scale_factor*spline(self.spectrum.if_freq + shift_factor),
'r', lw=1, label="scaled spline ({0:3.2f})".format(scale_factor) )
ax_data.plot(self.spectrum.x, spline(self.spectrum.if_freq), 'r--', lw=1, label="master spline" )
#
index = []
for knot in spline.get_knots():
index.append(find_index_nearest(self.spectrum.if_freq,knot))
ax_data.plot(self.spectrum.x[index], shift_offset + scale_factor*spline(spline.get_knots()), 'ro')
ax_data.grid()
ax_data.legend(loc="upper left", ncol=2)
# residual
residual = self.spectrum.intensity - shift_offset - scale_factor*spline(self.spectrum.if_freq+shift_factor)
nan,smooth_residual = smooth(self.spectrum.x,residual,50,False,self.spectrum.bad)
ax_res.plot(self.spectrum.x[~bad_data], residual[~bad_data], lw=1, label="residual")
ax_res.plot(self.spectrum.x, smooth_residual, lw=1, label="smoothed residual")
ax_res.grid()
ax_res.legend(loc="upper left", ncol=2)
ax_res.set_xlabel("V$_{LSR}$ ($\mbox{km\,s}^{-1}$)")
ax_res.set_ylabel("T$_{mb}$ (K)")
ax_data.set_ylabel("T$_{mb}$ (K)")
#ax_data.set_xlabel("Velocity (km/s)")
#
xmin = pyclass.gdict.user_xmin.__sicdata__
xmax = pyclass.gdict.user_xmax.__sicdata__
ymin = pyclass.gdict.user_ymin.__sicdata__
ymax = pyclass.gdict.user_ymax.__sicdata__
#print ymax-ymin
if (ymax-ymin!=1.0):
ax_data.set_ylim((ymin, ymax))
ax_res.set_ylim((ymin, ymax))
title = "{1.spectrum.number};{1.spectrum.scan}:{1.spectrum.subscan}, scale-factor: {0.scale_factor:3.3f}, shift-factor: {0.shift_factor:3.3f}, offset-factor: {0.shift_offset:3.3f} \n chi-squared:{0.chi_squared:3.3f} {0.name}".format(fit,self)
title = title.replace("_","\_")
fig.suptitle(title)
fig.tight_layout()
#
if show==True:
plt.show(block=False)
if "save_best" in save:
filename = "{0.output_path}/{0.spectrum.number}_{0.spectrum.scan}_{0.spectrum.subscan}_{0.spectrum.telescope}_{0.fit_type}_sr_{0.search_scan_range}{0.plot_tag}".format(self)
self.log.info("saving {0}.png".format(filename))
#
if "with_pickle" in self.plot:
fig.tight_layout()
fig.suptitle("")
self.log.info("saving {0}.pkl".format(filename))
with open(filename+".pkl", 'wb') as f:
pickle.dump(fig, f)
fig.savefig(filename+".png")
plt.close(fig)
def find_best_match_spline_from_archive(self):
#
self.log.info("finding best match spline from archive for {0.spectrum.number} within {0.search_scan_range} scan numbers of {0.spectrum.number}".format(self))
if self.spline_catalog is None:
self.log.error("No spline archive loaded, exiting")
sys.exit(2)
self.fit_catalog = self.spline_catalog.copy()
# ]
self.fit_catalog = self.fit_catalog[((self.fit_catalog.scan > self.spectrum.scan-self.search_scan_range) & (self.fit_catalog.scan < self.spectrum.scan+self.search_scan_range)) &
(self.fit_catalog.telescope == self.spectrum.telescope) &
(self.fit_catalog.spline_type == "fixed_grid")
]
self.fit_catalog = pd.concat([self.fit_catalog,self.generate_flat_spline()])
self.fit_catalog['scale_factor'] = np.zeros(len(self.fit_catalog))
self.fit_catalog['shift_factor'] = np.zeros(len(self.fit_catalog))
self.fit_catalog['shift_offset'] = np.zeros(len(self.fit_catalog))
self.fit_catalog['chi_squared'] = np.zeros(len(self.fit_catalog))
self.fit_catalog['rms'] = np.zeros(len(self.fit_catalog))
self.fit_catalog['std'] = np.zeros(len(self.fit_catalog))
#print self.fit_catalog.columns
# check for duplicates, some duplicates in the catalog, mess up best fit selection
self.fit_catalog = self.fit_catalog.drop_duplicates(subset=self.index_parameters,keep="first")
#
#
x = self.spectrum.if_freq
y = self.spectrum.intensity_nan
self.log.debug("looping over {0} splines".format(len(self.fit_catalog)))
if self.fit_type == "scale_spline_fixed_shift":
shift_factors = [0.0,100.0]
else:
shift_factors = [0.0]
fitted_splines = []
for shift_factor in shift_factors:
for i,(index,row) in enumerate(self.fit_catalog.iterrows()):
name = row.name
self.log.debug("fitting ({0}/{1}):{2}".format(i,len(self.fit_catalog),index))
spline = row.spline
try:
if self.fit_type == "scale_spline":
popt = least_squares(error_func,x0=[0.0],args=(x[~self.spectrum.mask],y[~self.spectrum.mask],row.spline))
shift_factor = 0.0
shift_offset = 0.0
elif self.fit_type == "scale_spline_and_offset":
popt = least_squares(error_func_offset,x0=[0.0,-200.0],args=(x[~self.spectrum.mask],y[~self.spectrum.mask],row.spline))
shift_factor = 0.0
elif self.fit_type == "scale_spline_and_shift":
popt = least_squares(error_func_shift,x0=[0.0,100.0],args=(x[~self.spectrum.mask],y[~self.spectrum.mask],row.spline))
elif self.fit_type == "scale_spline_fixed_shift":
self.log.debug("fitted with {0} shift_factor".format(shift_factor))
popt = least_squares(error_func,x0=[0.0],args=(x[~self.spectrum.mask]+shift_factor,y[~self.spectrum.mask],row.spline))
except Exception as e:
self.log.error(e)
continue
if len(popt.x) == 1:
scale_factor = popt.x[0]
self.log.debug("fitted with {0} scale_factor".format(scale_factor))
shift_offset = 0.0
elif self.fit_type == "scale_spline_and_offset":
scale_factor = popt.x[0]
shift_factor = 0.0
shift_offset = popt.x[1]
self.log.debug("fitted with {0} shift_factor".format(shift_factor))
self.log.debug("fitted with {0} shift_offset".format(shift_offset))
elif len(popt.x) == 2:
scale_factor = popt.x[0]
shift_factor = popt.x[1]
shift_offset = 0.0
self.log.debug("fitted with {0} shift_factor".format(shift_factor))
self.log.debug("fitted with {0} scale_factor".format(scale_factor))
#
#plot_fitted_spline_in_gildas(pause=False)
residual = self.spectrum.intensity_nan - shift_offset - scale_factor*spline(self.spectrum.if_freq + shift_factor)
#chi_squared = np.nanmean((residual[~self.spectrum.mask])**2/(scale_factor*spline(self.spectrum.if_freq[~self.spectrum.mask]))**2)
chi_squared = np.nanmean((residual[~self.spectrum.mask])**2)
nan,smooth_residual = smooth(self.spectrum.if_freq,residual,self.fit_data_smoothness,False,self.spectrum.bad)
chi_squared = np.nanmean((smooth_residual[~self.spectrum.mask])**2)
std = np.nanstd(residual[~self.spectrum.mask])
rms = np.sqrt(np.nanmean(np.square(residual[~self.spectrum.mask])))
row.scale_factor = scale_factor
row.shift_factor = shift_factor
row.shift_offset = shift_offset
row.chi_squared = chi_squared
row.rms = rms
row.std = std
row.name = "{0}_shift_{1:3.1f}_offset_{2:3.1f}".format(name,shift_factor,shift_offset)
#
fitted_splines.append(row)
#
# add a polynomial order 1
poly_tables = []
if self.fit_type == 'scale_spline_and_offset':
poly_order = 0
else:
poly_order = 0
for poly_order in range(poly_order):
poly = np.poly1d(np.polyfit(x[~self.spectrum.mask],y[~self.spectrum.mask], poly_order))
channel_nb = 4000
steps=100
edge = 100
poly_spline = LSQUnivariateSpline(x = np.arange(channel_nb),
y = poly(np.arange(channel_nb)),
t = np.arange(edge,channel_nb-edge,steps),
k = 3)
residual = self.spectrum.intensity_nan - poly(self.spectrum.if_freq)
nan,smooth_residual = smooth(self.spectrum.if_freq,residual,self.fit_data_smoothness,False,self.spectrum.bad)
chi_squared = np.nanmean((smooth_residual[~self.spectrum.mask])**2)
poly_table = self.store_spline_in_catalog(poly_spline, spline_type="poly_{0}".format(poly_order),return_row_only=True)
poly_table["chi_squared"] = chi_squared
poly_table["scale_factor"] = [1.0]
poly_table["shift_factor"] = [0.0]
poly_table["shift_offset"] = [0.0]
poly_tables.append(poly_table)
#
#
self.fit_catalog = pd.DataFrame(fitted_splines)
if len(poly_tables) > 0:
poly_table = pd.concat(poly_tables)
self.fit_catalog = pd.concat([self.fit_catalog,poly_table])
#plt.scatter(np.arange(len(self.fit_catalog)), fitted_splines.chi_squared)
best_spline_idx = self.fit_catalog.chi_squared.idxmin()
self.best_fit_spline = self.fit_catalog.loc[best_spline_idx,:]
self.spectrum.best_fit_spline = self.best_fit_spline.shift_offset + self.best_fit_spline.scale_factor*self.best_fit_spline.spline(self.spectrum.if_freq + self.best_fit_spline.shift_factor)
# don't add intensity offset back in for high continuum sources
#self.spectrum.corrected_intensity = self.spectrum.intensity_nan - self.best_fit_spline.shift_offset - self.best_fit_spline.scale_factor*self.best_fit_spline.spline(self.spectrum.if_freq + self.best_fit_spline.shift_factor)
if ((self.fit_type == 'scale_spline_and_offset') and (self.best_fit_spline.spline_type=='flat')):
self.spectrum.corrected_intensity = self.spectrum.intensity_nan
#self.log.warning("doing nothing")
else:
self.spectrum.corrected_intensity = self.spectrum.intensity_nan - self.best_fit_spline.scale_factor*self.best_fit_spline.spline(self.spectrum.if_freq + self.best_fit_spline.shift_factor)
self.spectrum.corrected_intensity[np.isnan(self.spectrum.corrected_intensity)] = self.spectrum.bad
self.spectrum.processing_steps = self.best_fit_spline.processing_steps
self.log.info("best fit: {0}".format(self.best_fit_spline.processing_steps))
#
if self.plot=="interactive":
fig, ax = plt.subplots()
ax.scatter(self.fit_catalog.scan, self.fit_catalog.chi_squared, picker=True, label="spline fits")
ax.scatter(self.fit_catalog[self.fit_catalog.spline_type=="flat"].scan,
self.fit_catalog[self.fit_catalog.spline_type=="flat"].chi_squared,
c='r', marker="*", s = 100, label="Base 0")
ax.scatter(self.fit_catalog[self.fit_catalog.spline_type.str.contains("poly")].scan,
self.fit_catalog[self.fit_catalog.spline_type.str.contains("poly")].chi_squared,
c='k', marker="*", s = 100, label="Base 1")
ax.scatter([self.fit_catalog.scan.loc[best_spline_idx]],
[self.fit_catalog.chi_squared.loc[best_spline_idx]],
c='g', marker="*", s=100, label="Best fit")
ax.set_yscale("log")
plt.axvline(x=self.spectrum.scan, linewidth=2, color='r', alpha=0.5)
fig.suptitle("{0.scan}:{0.subscan} {0.number} {0.telescope}\n click on point to select fit".format(self.spectrum))
fig.canvas.mpl_connect('pick_event', self.plot_onpick)
#cid = fig.canvas.mpl_connect('button_press_event', self.onclick)
plt.legend()
plt.show()
elif "save_best" in self.plot:
self.plot_spline_fit(self.fit_catalog.loc[best_spline_idx,:],show=False,save=self.plot)
def plot_onpick(self,event):
#print event.__dict__, event.ind
for i in event.ind:
self.plot_spline_fit(self.fit_catalog.iloc[i])
def onclick(self,event):
print('%s click: button=%d, x=%d, y=%d, xdata=%f, ydata=%f' %
('double' if event.dblclick else 'single', event.button,
event.x, event.y, event.xdata, event.ydata))
def load_catalog(self):
if not os.path.exists(self.spline_catalog_filename):
self.log.warning("spline catalog {0.spline_catalog_filename} not found".format(self))
self.log.info("starting a fresh catalog")
#self.spline_catalog = pd.DataFrame(dict(zip(self.catalog_columns,[[] for i in range(len(self.catalog_columns))])))
self.spline_catalog = None
else:
self.log.debug("loading catalog {0.spline_catalog_filename}".format(self))
#self.spline_catalog = pd.read_pickle(self.spline_catalog_filename)
self.spline_catalog = pd.read_pickle(self.spline_catalog_filename)
def check_if_spline_in_catalog(self, row):
#
return
def reduce_spline_size(self,spline):
'''
drop excess data from spline
blows up catalog size
'''
_data = []
for ele in spline._data:
if type(ele)==np.ndarray:
_data.append(ele[:2])
else:
_data.append(ele)
_data = tuple(_data)
spline.__setattr__("_data",_data)
def store_spline_in_catalog(self, spline,spline_type,return_row_only=False):
row_dict = {}
index_parameters = ["telescope", "scan", "subscan", "source", "line","spline_type","spline_nodes"]
self.index_parameters = index_parameters
# generate row
for param in self.catalog_columns:
param_value = getattr(self.spectrum,param,None)
if param_value is not None:
row_dict[param] = param_value
#
self.reduce_spline_size(spline)
row_dict["spline"] = spline
row_dict["spline_nodes"] = self.spline_nodes
row_dict["spline_type"] = spline_type
row_dict["time_added"] = datetime.now()
# check if spline already in catalog
index = "_".join([str(row_dict[param]).replace(" ","") for param in index_parameters])
found = re.findall("(\d+:\d+)",self.spectrum.processing_steps)
if len(found) == 2:
index += "_"
index += "_".join(found)
#
self.log.debug("{0}".format(index))
self.log.debug("adding spline to catalog")
row_table = pd.DataFrame(row_dict,index=[index])
if return_row_only:
return row_table
if self.spline_catalog is None:
self.spline_catalog = row_table
else:
#
if index in self.spline_catalog.index.values:
self.log.warning("spline {0}".format(index))
self.log.warning("already in catalog for, overwriting what was there")
self.spline_catalog = self.spline_catalog.drop(index)
self.spline_catalog = pd.concat([self.spline_catalog,row_table])
self.spline_catalog.to_pickle(self.spline_catalog_filename)
def find_index_nearest(array,value):
idx = (np.abs(array-value)).argmin()
return idx
def smooth(x,y, box_pts, class_like=True, bad_channel="-1.23456000e+10"):
box = np.ones(box_pts)/box_pts
bad_index = y==bad_channel
y_smooth = np.convolve(y[~bad_index], box, mode="same")
#
if class_like:
x_smooth_grid = x[np.arange(0,len(y)-box_pts,box_pts)+int(box_pts/2)]
interpolate_y = interpolate.griddata(points=x[~bad_index],
values=y_smooth,
xi=x_smooth_grid)
#
return x_smooth_grid,interpolate_y
else:
return x,y_smooth
def compare_class_python_smooth():
x = pyclass.gdict.rx.__sicdata__
y = pyclass.gdict.ry.__sicdata__
# check smooth data
fig, (ax_smooth) = plt.subplots(1,1)
bad_index = y==pyclass.gdict.r.head.spe.bad.__sicdata__
ax_smooth.plot(x[~bad_index],y[~bad_index], label="raw data")
smooth_box = 100
try:
pyclass.comm("smooth box {0}".format(smooth_box))
except Exception as e:
print e
ax_smooth.plot(pyclass.gdict.rx.__sicdata__,
pyclass.gdict.ry.__sicdata__,
'ro', ms=7, label="class smooth box {0}".format(smooth_box))
# smooth python way
smooth_x, smooth_y = smooth(x,y, smooth_box,
class_like=True,
bad_channel=pyclass.gdict.r.head.spe.bad.__sicdata__)
ax_smooth.plot(smooth_x, smooth_y, 'k+',
ms=3, label="numpy smooth box {0}".format(smooth_box)
)
#
plt.legend()
fig.show()
def handler(num, flame):
log = logging.getLogger('spline_fit')
log.info("!!ctrl+C!!")
pyclass.message(pyclass.seve.e, "Spline fit", "Ctrl+C pressed")
sys.exit()
def plot_fitted_spline_in_gildas(with_variable_nodes=True,pause=True):
pyclass.comm("set unit v")
pyclass.comm("pl")
#pyclass.comm("clear")
#pyclass.comm("box")
pyclass.comm("pen 0")
pyclass.comm("hist rx kosma%ry_orig /BLANKING 'r%head%spe%bad' 0")
pyclass.comm("pen 1")
pyclass.comm("hist rx kosma%spline_fit /BLANKING 'r%head%spe%bad' 0")
pyclass.comm("pen 2")
pyclass.comm("hist rx kosma%spline_corrected /BLANKING 'r%head%spe%bad' 0")
pyclass.comm("pen 3")
if pause:
time.sleep(2)
def main():
plot_choices = ["interactive","save_best","save_best_with_pickle","gildas_plot","none"]
parser = OptionParser()
parser.add_option("--fit_spline", dest="fit_spline", nargs=1, default=True)
# number of nodes per spectra
parser.add_option( "--spline_nodes",
dest="spline_nodes",
nargs=1,
type=int,
default=100,
help="number of nodes to for spline fit, default %default")
#
parser.add_option( "--smoothness_of_fit_data",
dest="smoothness_of_fit_data",
nargs=1,
type=int,
default=100,
help="number of channels to smooth data per fitting spline")
# logging options
parser.add_option( "--logging_level",
dest="logging_level",
nargs=1,
default="info",
choices = ["info", "debug", "warning", "error"],
help="set logging level %default" )
#
parser.add_option( "--show_plot", dest="show_plot", nargs=1, default=False,
help="show plot summarizing fit" )
parser.add_option("--spline_archive_filename", dest="spline_archive_filename", nargs=1, default="/tmp/spline_archive.csv",
help="file where splines templates are stored, stored in pandas table"
)
parser.add_option("--store_spline_in_archive", dest="store_spline_in_archive", action="store_true",
default=False, help="store fitted spline in the archive"
)
parser.add_option("--scale_spline_from_archive", dest="scale_spline_from_archive", action="store_true",
default=False, help="check which spline in the archive give the best fit to the data"
)
parser.add_option("--plot", dest="plot", default="none", help="plot fitted spline choices: {0}".format(",".join(plot_choices)), choices = plot_choices
)
parser.add_option("--search_scan_range", dest="search_scan_range", type=int,
default=5, help="range of scan numbers to look for spline"
)
parser.add_option("--output_plots_path", dest="output_plots_path", type=str,
default="./plots", help="path to save output plots to "
)
parser.add_option("--plots_tag", dest="plot_name_tag", type=str,
default="", help="additional string to identify different reduction"
)
parser.add_option("--spline_fit_type", dest="spline_fit_type",
default="scale_spline", choices = ["scale_spline_and_shift","scale_spline","scale_spline_fixed_shift", "scale_spline_and_offset"],
help="How to scale spline, either simply scale, or scale and shift frequency axis [+/- 150 MHz]"
)
#
signal.signal(signal.SIGINT, handler)
try:
global options
(options, args) = parser.parse_args()
except:
pyclass.message(pyclass.seve.e, "Spline", "Invalid option")
pyclass.sicerror()
return
#
#
options.output_plots_path = options.output_plots_path.replace("\"","")
if not os.path.exists(options.output_plots_path):
os.mkdir(options.output_plots_path)
# sample spacing
#
if (not pyclass.gotgdict()):
pyclass.get(verbose=False)
# check is there a spectrum loaded
if not hasattr(pyclass.gdict, "ry"):
pyclass.message(pyclass.seve.e, "Spline", "no spectra loaded")
pyclass.sicerror()
return
# set logging level
module_logger = setup_logging(log_name="spline_fit", log_filename='/tmp/spline_fit.log', level=options.logging_level)
# set debug level
log_lookup = {}
log_lookup['info'] = logging.INFO
log_lookup['debug'] = logging.DEBUG
log_lookup['warning'] = logging.WARNING
log_lookup['error'] = logging.ERROR
module_logger.setLevel(log_lookup[options.logging_level])
for logger_handle in module_logger.handlers:
logger_handle.setLevel(log_lookup[options.logging_level])
#
#compare_class_python_smooth()
if not hasattr(pyclass.gdict.r, 'user'):
pyclass.comm("set var user")
pyclass.comm("import sofia")
pyclass.comm("get {0}".format(pyclass.gdict.number.__sicdata__))
if not hasattr(pyclass.gdict, 'kosma'):
module_logging.info("creating kosma structure")
pyclass.comm("define structure kosma /global")
#
if not hasattr(pyclass.gdict.kosma, "ry_orig"):
#pyclass.comm("define structure kosma")
pyclass.comm("def double kosma%ry_orig /like rx")
#
spec = Spectrum()
#
# generata array
for var in ["spline_fit","spline_corrected","ry_orig"]:
#print "already exists {0}".format(var), hasattr(pyclass.gdict.kosma, var)
if not hasattr(pyclass.gdict.kosma, var):
module_logger.info("generating kosma%{0}".format(var))
pyclass.comm("def double kosma%{0} /like rx".format(var))
if len(getattr(pyclass.gdict.kosma,var)) != pyclass.gdict.channels.__sicdata__.tolist():
module_logger.info("deleting kosma%{0}".format(var))
pyclass.comm("delete /var kosma%{0}".format(var))
#time.sleep(1.5) # delete needs some time..
pyclass.comm("def double kosma%{0} /like rx".format(var))
setattr(pyclass.gdict.kosma, "ry_orig", spec.intensity )
if np.count_nonzero(~np.isnan(spec.intensity))<spec.channels/2.0:
module_logger.error("no data found")
return
#
if not hasattr(pyclass.gdict.kosma, "spline_corrected"):
#pyclass.comm("delete /var kosma%spline_corrected")
pyclass.comm("def double kosma%spline_corrected /like rx /global")
#return
spl_fit = SplineFit(spectrum = spec,
spline_nodes = options.spline_nodes,
fit_data_smoothness = options.smoothness_of_fit_data,
fit_spline = options.fit_spline,
spline_catalog_filename = options.spline_archive_filename.replace("\"",""),
store_spline_in_archive = options.store_spline_in_archive,
scale_spline_from_archive = options.scale_spline_from_archive,
plot = options.plot,
search_scan_range = options.search_scan_range,
output_plots_path = options.output_plots_path,
fit_type = options.spline_fit_type
)
spl_fit()
#
#setattr(pyclass.gdict.r.user.sofia,"processing_steps",spl_fit.best_fit.name)
# overplot on gildas
setattr(pyclass.gdict.kosma, "spline_fit", spec.best_fit_spline)
setattr(pyclass.gdict.kosma, "spline_corrected", spec.corrected_intensity)
if options.plot=="gildas_plot":
plot_fitted_spline_in_gildas()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
94936
|
<gh_stars>1-10
import sys
import os
from venv import create
import iotpackage.ModelTraining as mt
import iotpackage.FeatureSelection as fsec
import time
import pandas as pd
import numpy as np
import iotpackage.Utils as utils
from iotpackage.__vars import dictGroups, featureGroups
import argparse
import json
VERBOSE = 1
# Loads config for the experiment runs
def loadConfigFromPath(config_path):
with open(config_path, 'r') as f:
config_data = json.load(f)
return config_data
def loadConfig(config_name, config_dir=None):
if config_dir is None:
IOTBASE = os.getenv('IOTBASE')
if IOTBASE is None:
raise ValueError(f"Environment Variable 'IOTBASE' not set")
config_dir = os.path.join(IOTBASE, 'src', 'model_configs')
config_path = os.path.join(config_dir, config_name)
return loadConfigFromPath(config_path)
def getParentDir(path):
return os.path.split(path)[0]
def parentDirExists(path):
parentDir = getParentDir(path)
return os.path.exists(parentDir)
def createParentDir(path):
parentDir = getParentDir(path)
os.makedirs(parentDir)
return
def runFingerprintingDevicesExp(config=''):
if config != '':
config_data = loadConfigFromPath(config)
else:
config_data = loadConfig('FingerprintingDevicesExpConfig.json')
result_path = os.path.join(IOTBASE, config_data['output_path'])
if not parentDirExists(result_path): createParentDir(result_path)
if VERBOSE: print(f'Running FingerprintingDevicesExp')
if VERBOSE: print(f"result_path: {result_path}", flush=True)
fs = fsec.FeatureSelector( n_dict=config_data['n_dict'],
n_tls_tcp=config_data['n_dict'],
n_udp=config_data['n_udp'],
n_dns=config_data['n_dns'],
n_ntp=config_data['n_ntp'],
n_protocol=config_data['n_protocol'],
one_hot_encode=config_data['one_hot_encode'])
model = mt.FingerprintingDevicesExp(train_datasets=config_data['train_dataset_paths'], fs=fs, devices=config_data['devices'])
model.run(result_path, runs=config_data['runs'], features=config_data['features'], errors=config_data['errors'])
def runMultiDatasetCombinedClassifier(config=''):
if config != '':
config_data = loadConfigFromPath(config)
else:
config_data = loadConfig('MultiDatasetCombinedClassifier.json')
result_path = os.path.join(IOTBASE, config_data['output_path'])
if not parentDirExists(result_path): createParentDir(result_path)
if VERBOSE: print(f'Running MultiDatasetCombinedClassifier')
if VERBOSE: print(f"result_path: {result_path}", flush=True)
fs = fsec.FeatureSelector( simple_groups=config_data['simple_groups'],
dict_groups=config_data['dict_groups'],
n_dict=config_data['n_dict'],
n_tls_tcp=config_data['n_dict'],
n_udp=config_data['n_udp'],
n_dns=config_data['n_dns'],
n_ntp=config_data['n_ntp'],
n_protocol=config_data['n_protocol'],
one_hot_encode=config_data['one_hot_encode'])
model = mt.MultiDatasetCombinedClassifier(fs=fs, train_datasets=config_data['train_dataset_paths'], cv=config_data['cv'], label_col=config_data['label_col'])
model.run(result_path, errors=config_data['errors'], plot_cm=config_data['plot_cm'], runs=config_data['runs'], features=config_data['features'])
def runKnownUnknownClassifier(config=''):
if config != '':
config_data = loadConfigFromPath(config)
else:
config_data = loadConfig('KnownUnknownClassifier.json')
result_path = os.path.join(IOTBASE, config_data['output_path'])
if not parentDirExists(result_path): createParentDir(result_path)
if VERBOSE: print(f'Running KnownUnknownClassifier')
if VERBOSE: print(f"result_path: {result_path}", flush=True)
fs = fsec.FeatureSelector( simple_groups=config_data["simple_groups"],
dict_groups=config_data["dict_groups"],
n_dict=config_data['n_dict'],
n_tls_tcp=config_data['n_dict'],
n_udp=config_data['n_udp'],
n_dns=config_data['n_dns'],
n_ntp=config_data['n_ntp'],
n_protocol=config_data['n_protocol'],
one_hot_encode=config_data['one_hot_encode'])
model = mt.KnownUnknownClassifier(fs=fs, train_datasets=config_data['train_dataset_paths'], cv=config_data['cv'], label_col=config_data['label_col'])
model.run(result_path, runs=config_data['runs'], split_type=config_data['split_type'], non_iot_filter=config_data['non_iot_filter'], features=config_data['features'])
def runMultiDatasetCommonClassifier(config=''):
if config != '':
config_data = loadConfigFromPath(config)
else:
config_data = loadConfig('MultiDatasetCommonClassifier.json')
result_path = os.path.join(IOTBASE, config_data['output_path'])
if not parentDirExists(result_path): createParentDir(result_path)
if VERBOSE: print(f'Running MultiDatasetCommonClassifier')
if VERBOSE: print(f"result_path: {result_path}", flush=True)
fs = fsec.FeatureSelector( simple_groups=config_data['simple_groups'],
dict_groups=config_data['dict_groups'],
n_dict=config_data['n_dict'],
n_tls_tcp=config_data['n_tls_tcp'],
n_udp=config_data['n_udp'],
n_dns=config_data['n_dns'],
n_ntp=config_data['n_ntp'],
n_protocol=config_data['n_protocol'],
one_hot_encode=config_data['one_hot_encode'])
model = mt.MultiDatasetCommonClassifier( train_datasets=config_data['train_dataset_paths'],
test_datasets=config_data['test_dataset_paths'],
fs=fs,
label_col=config_data['label_col'])
model.run( result_path=result_path,
runs=config_data['runs'],
errors=config_data['errors'],
data_size=config_data['data_size'],
features=config_data['features'])
def runMultiDatasetCombinedClassifierIoTvsNonIoT(config=''):
if config != '':
config_data = loadConfigFromPath(config)
else:
config_data = loadConfig('MultiDatasetCombinedClassifierIoTvsNonIoT.json')
result_path = os.path.join(IOTBASE, config_data['output_path'])
if not parentDirExists(result_path): createParentDir(result_path)
if VERBOSE: print(f'Running MultiDatasetCombinedClassifierIoTvsNonIoT')
if VERBOSE: print(f"result_path: {result_path}", flush=True)
fs = fsec.FeatureSelector( simple_groups=config_data['simple_groups'],
dict_groups=config_data['dict_groups'],
n_dict=config_data['n_dict'],
n_tls_tcp=config_data['n_tls_tcp'],
n_udp=config_data['n_udp'],
n_dns=config_data['n_dns'],
n_ntp=config_data['n_ntp'],
n_protocol=config_data['n_protocol'],
one_hot_encode=config_data['one_hot_encode'])
model = mt.MultiDatasetCombinedClassifierIoTvsNonIoT( train_datasets=config_data['train_dataset_paths'],
test_datasets=config_data['test_dataset_paths'],
fs=fs,
label_col=config_data['label_col'],
cv=config_data['cv'],
print_details=config_data['print_details'])
model.run( result_path=result_path,
runs=config_data['runs'],
errors=config_data['errors'],
features=config_data['features'])
def runTargetVsNonTargetClassifier(config=''):
if config != '':
config_data = loadConfigFromPath(config)
else:
config_data = loadConfig('TargetVsNonTargetClassifier.json')
result_path = os.path.join(IOTBASE, config_data['output_path'])
if not parentDirExists(result_path): createParentDir(result_path)
if VERBOSE: print(f'Running TargetVsNonTargetClassifier')
if VERBOSE: print(f"result_path: {result_path}", flush=True)
fs = fsec.FeatureSelector( simple_groups=config_data['simple_groups'],
dict_groups=config_data['dict_groups'],
n_dict=config_data['n_dict'],
n_tls_tcp=config_data['n_tls_tcp'],
n_udp=config_data['n_udp'],
n_dns=config_data['n_dns'],
n_ntp=config_data['n_ntp'],
n_protocol=config_data['n_protocol'],
one_hot_encode=config_data['one_hot_encode'])
model = mt.TargetVsNonTargetClassifier( fs=fs,
train_datasets=config_data['train_dataset_paths'])
model.run(result_path, runs=config_data['runs'], features=config_data['features'], devices=config_data['devices'], min_balance=config_data['min_balance'])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--combined", action="store_true", help="Run the combined classifier, aka multi dataset combined classifier")
group.add_argument("--common", action="store_true", help="Run the common classifier, aka multi dataset common classifier")
group.add_argument("--known-unknown", action="store_true", help="Run the known unknown classifier")
group.add_argument("--combined-iot-vs-noniot", action="store_true", help="Run the multi dataset combined iot vs non-iot classifier")
group.add_argument("--target", action="store_true", help="Run the target vs non target classifier")
group.add_argument("--fingerprint", action="store_true", help="Run the fingerprinting devices experiment")
parser.add_argument("--config", default="", type=str, help="Path to config file to use for this experiment")
args = parser.parse_args()
global IOTBASE
IOTBASE = os.getenv('IOTBASE')
if args.combined:
runMultiDatasetCombinedClassifier(args.config)
elif args.common:
runMultiDatasetCommonClassifier(args.config)
elif args.known_unknown:
runKnownUnknownClassifier(args.config)
elif args.combined_iot_vs_noniot:
runMultiDatasetCombinedClassifierIoTvsNonIoT(args.config)
elif args.target:
runTargetVsNonTargetClassifier(args.config)
elif args.fingerprint:
runFingerprintingDevicesExp(args.config)
else:
raise ValueError("unknown run type")
|
StarcoderdataPython
|
4902520
|
"""Tests for logger: model Comment."""
from django.test import TestCase
from geokey.core.models import LoggerHistory
from geokey.users.tests.model_factories import UserFactory
from geokey.projects.tests.model_factories import ProjectFactory
from geokey.categories.tests.model_factories import CategoryFactory
from geokey.contributions.tests.model_factories import (
LocationFactory,
ObservationFactory,
CommentFactory,
)
class LogCommentTest(TestCase):
"""Test model Comment."""
def setUp(self):
"""Set up test."""
self.user = UserFactory.create()
self.project = ProjectFactory.create(**{
'creator': self.user})
self.category = CategoryFactory.create(**{
'creator': self.user,
'project': self.project})
self.location = LocationFactory.create(**{
'creator': self.user})
self.observation = ObservationFactory.create(**{
'creator': self.user,
'location': self.location,
'project': self.project,
'category': self.category})
self.comment = CommentFactory.create(**{
'creator': self.user,
'commentto': self.observation})
def test_log_create(self):
"""Test when comment gets created."""
log_count_init = LoggerHistory.objects.count()
comment = CommentFactory.create(**{
'creator': self.user,
'commentto': self.observation})
log = LoggerHistory.objects.last()
log_count = LoggerHistory.objects.count()
self.assertNotEqual(log.user, {
'id': str(self.user.id),
'display_name': self.user.display_name})
self.assertEqual(log.project, {
'id': str(self.project.id),
'name': self.project.name})
self.assertEqual(log.usergroup, None)
self.assertEqual(log.category, {
'id': str(self.category.id),
'name': self.category.name})
self.assertEqual(log.field, None)
self.assertEqual(log.location, {
'id': str(self.location.id),
'name': self.location.name})
self.assertEqual(log.observation, {
'id': str(self.observation.id)})
self.assertEqual(log.comment, {
'id': str(comment.id)})
self.assertEqual(log.subset, None)
self.assertEqual(log.action, {
'id': 'created',
'class': 'Comment'})
self.assertEqual(log_count, log_count_init + 1)
self.assertEqual(log.historical, None)
def test_log_create_response(self):
"""Test when response gets created."""
log_count_init = LoggerHistory.objects.count()
response = CommentFactory.create(**{
'creator': self.user,
'commentto': self.observation,
'respondsto': self.comment})
log = LoggerHistory.objects.last()
log_count = LoggerHistory.objects.count()
self.assertNotEqual(log.user, {
'id': str(self.user.id),
'display_name': self.user.display_name})
self.assertEqual(log.project, {
'id': str(self.project.id),
'name': self.project.name})
self.assertEqual(log.usergroup, None)
self.assertEqual(log.category, {
'id': str(self.category.id),
'name': self.category.name})
self.assertEqual(log.field, None)
self.assertEqual(log.location, {
'id': str(self.location.id),
'name': self.location.name})
self.assertEqual(log.observation, {
'id': str(self.observation.id)})
self.assertEqual(log.comment, {
'id': str(response.id)})
self.assertEqual(log.subset, None)
self.assertEqual(log.action, {
'id': 'created',
'class': 'Comment',
'subaction': 'respond',
'comment_id': str(self.comment.id)})
self.assertEqual(log_count, log_count_init + 1)
self.assertEqual(log.historical, None)
def test_log_delete(self):
"""Test when comment gets deleted."""
log_count_init = LoggerHistory.objects.count()
self.comment.delete()
log = LoggerHistory.objects.last()
log_count = LoggerHistory.objects.count()
self.assertNotEqual(log.user, {
'id': str(self.user.id),
'display_name': self.user.display_name})
self.assertEqual(log.project, {
'id': str(self.project.id),
'name': self.project.name})
self.assertEqual(log.usergroup, None)
self.assertEqual(log.category, {
'id': str(self.category.id),
'name': self.category.name})
self.assertEqual(log.field, None)
self.assertEqual(log.location, {
'id': str(self.location.id),
'name': self.location.name})
self.assertEqual(log.observation, {
'id': str(self.observation.id)})
self.assertEqual(log.comment, {
'id': str(self.comment.id)})
self.assertEqual(log.subset, None)
self.assertEqual(log.action, {
'id': 'deleted',
'class': 'Comment',
'field': 'status',
'value': 'deleted'})
self.assertEqual(log_count, log_count_init + 1)
history = self.comment.history.get(pk=log.historical.get('id'))
self.assertEqual(history.id, self.comment.id)
def test_log_delete_nested(self):
"""Test when comment that has responses gets deleted."""
response = CommentFactory.create(**{
'creator': self.user,
'commentto': self.observation,
'respondsto': self.comment})
log_count_init = LoggerHistory.objects.count()
self.comment.delete()
log_count = LoggerHistory.objects.count()
self.assertEqual(log_count, log_count_init + 2)
logs = LoggerHistory.objects.all().order_by('-pk')[:2]
# Response gets deleted
self.assertNotEqual(logs[1].user, {
'id': str(self.user.id),
'display_name': self.user.display_name})
self.assertEqual(logs[1].project, {
'id': str(self.project.id),
'name': self.project.name})
self.assertEqual(logs[1].usergroup, None)
self.assertEqual(logs[1].category, {
'id': str(self.category.id),
'name': self.category.name})
self.assertEqual(logs[1].field, None)
self.assertEqual(logs[1].location, {
'id': str(self.location.id),
'name': self.location.name})
self.assertEqual(logs[1].observation, {
'id': str(self.observation.id)})
self.assertEqual(logs[1].comment, {
'id': str(response.id)})
self.assertEqual(logs[1].subset, None)
self.assertEqual(logs[1].action, {
'id': 'deleted',
'class': 'Comment',
'subaction': 'respond',
'comment_id': str(self.comment.id)})
self.assertEqual(logs[1].historical, None)
# Comment gets deleted
self.assertNotEqual(logs[0].user, {
'id': str(self.user.id),
'display_name': self.user.display_name})
self.assertEqual(logs[0].project, {
'id': str(self.project.id),
'name': self.project.name})
self.assertEqual(logs[0].usergroup, None)
self.assertEqual(logs[0].category, {
'id': str(self.category.id),
'name': self.category.name})
self.assertEqual(logs[0].field, None)
self.assertEqual(logs[0].location, {
'id': str(self.location.id),
'name': self.location.name})
self.assertEqual(logs[0].observation, {
'id': str(self.observation.id)})
self.assertEqual(logs[0].comment, {
'id': str(self.comment.id)})
self.assertEqual(logs[0].subset, None)
self.assertEqual(logs[0].action, {
'id': 'deleted',
'class': 'Comment',
'field': 'status',
'value': 'deleted'})
history = self.comment.history.get(pk=logs[0].historical.get('id'))
self.assertEqual(history.id, self.comment.id)
|
StarcoderdataPython
|
5169473
|
#
# Copyright (C) 2015 <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os.path
import config
from SCons.Script import *
from autoconf import *
""" A list of files which are installed in the final RootFS """
rootfs_files = []
Export('rootfs_files')
def UseLibraries(env, libs = [], arch = None):
"""
Prepares a given environment, by adding library dependencies.
"""
# By default exclude host. If arch filter is set, apply it.
if (not arch and env['ARCH'] == 'host') or (arch and env['ARCH'] != arch):
return
# Loop all required libraries.
for lib in libs:
# Add them to the include and linker path.
env.Append(CPPPATH = [ '#lib/' + lib ])
env.Append(LIBPATH = [ '#' + env['BUILDROOT'] + '/lib/' + lib ])
env.Append(LIBS = [ lib ])
def UseServers(env, servers = []):
"""
Prepares a given environment by adding server dependencies
"""
if '#server' not in env['CPPPATH']:
env.Append(CPPPATH = [ '#server' ])
for serv in servers:
env.Append(CPPPATH = [ '#server/' + serv ])
def HostProgram(env, target, source):
if env['ARCH'] == 'host':
env.Program(target, source)
def TargetProgram(env, target, source, install_dir = None):
if env['ARCH'] != 'host':
env.Program(target, source)
env.TargetInstall(target, install_dir)
def TargetLibrary(env, lib, source):
if env['ARCH'] != 'host':
env.Library(lib, source)
def CopyStrFunc(target, source, env):
return " " + env.subst_target_source("COPY $SOURCE => $TARGET", 0, target, source)
def DirStrFunc(target):
return " MKDIR " + target
def TargetInstall(env, source, target = None):
if env['ARCH'] != 'host':
SCons.Tool.install.install_action.strfunction = CopyStrFunc
if not target:
target = '${ROOTFS}/' + Dir('.').srcnode().path
env.Install(target, source)
rootfs_files.append(str(target) + os.sep + os.path.basename(source))
def SubDirectories():
dir_list = []
dir_src = Dir('.').srcnode().abspath
if dir_src:
for f in os.listdir(dir_src):
if os.path.isdir(dir_src + os.sep + f):
dir_list.append(f)
SConscript( dirs = dir_list )
Export('SubDirectories')
# Create target, host and kernel environments.
host = Environment(tools = ["default", "phony", "test"],
toolpath = ["support/scons"])
host.AddMethod(HostProgram, "HostProgram")
host.AddMethod(TargetProgram, "TargetProgram")
host.AddMethod(TargetLibrary, "TargetLibrary")
host.AddMethod(UseLibraries, "UseLibraries")
host.AddMethod(UseServers, "UseServers")
host.AddMethod(TargetInstall, "TargetInstall")
host.Append(ROOTFS = '#${BUILDROOT}/rootfs')
host.Append(ROOTFS_FILES = [])
host.Append(bin = '${ROOTFS}/bin',
etc = '${ROOTFS}/etc',
server = '${ROOTFS}/server',
boot = '${ROOTFS}/boot')
target = host.Clone(tools = ["default", "bootimage", "iso", "binary", "linn", "phony", "test"],
toolpath = ["support/scons"])
# Apply configuration
config.initialize(target, host, ARGUMENTS)
config.write_header(target)
config.write_header(host)
# Enables verbose compilation command output.
if not target['VERBOSE']:
target['CXXCOMSTR'] = host['CXXCOMSTR'] = " CXX $TARGET"
target['CCCOMSTR'] = host['CCCOMSTR'] = " CC $TARGET"
target['ASCOMSTR'] = host['ASCOMSTR'] = " AS $TARGET"
target['ASPPCOMSTR'] = host['ASPPCOMSTR'] = " AS $TARGET"
target['ARCOMSTR'] = host['ARCOMSTR'] = " AR $TARGET"
target['RANLIBCOMSTR'] = host['RANLIBCOMSTR'] = " LIB $TARGET"
target['LINKCOMSTR'] = host['LINKCOMSTR'] = " LD $TARGET"
target['COPYSTR'] = host['COPYSTR'] = " COPY $SOURCE => $TARGET"
# Verify the configured CFLAGS.
if not GetOption('clean'):
CheckCCFlags(target)
CheckCXXFlags(target)
|
StarcoderdataPython
|
4875151
|
from ..utils import DataikuException
from ..utils import DataikuUTF8CSVReader
from ..utils import DataikuStreamedHttpUTF8CSVReader
import json
import time
from .metrics import ComputedMetrics
from .ml import DSSMLTask
from .utils import DSSDatasetSelectionBuilder
class DSSAnalysisStepBuilder(object):
def __init__(self, step_type=None, step_name=None):
self.step = {'metaType':'PROCESSOR', 'type':step_type, 'name':step_name, 'params':{}}
def build(self):
"""Returns the built step dict"""
return self.step
def with_type(self, step_type):
"""Sets the step's type"""
self.step["type"] = step_type
return self
def with_name(self, step_name):
"""Sets the step's name"""
self.step["name"] = step_name
return self
class DSSAnalysisDefinition():
"""
Object to manipulate the definition of a visual analysis
"""
def __init__(self, analysis, acp):
self.analysis = analysis
self.acp = acp
def get_raw(self):
"""
Gets the raw dictionary of the visual analysis definition
"""
return self.acp
def get_raw_script(self):
"""
Gets the raw dictionary of visual analysis' script settings (including steps, sampling, ...)
"""
acp = self.get_raw()
if not 'script' in acp:
acp['script'] = {'steps':[]}
return acp['script']
def get_raw_script_steps(self):
"""
Gets the raw dictionary of visual analysis' script steps
"""
script = self.get_raw_script()
if not 'steps' in script:
script['steps'] = []
return script['steps']
def get_raw_script_sampling(self):
"""
Gets the raw dictionary of visual analysis' script sampling
"""
script = self.get_raw_script()
if not 'explorationSampling' in script:
script['explorationSampling'] = {}
return script['explorationSampling']
def save(self):
"""
Shortcut to :meth:`DSSAnalysis.set_definition()`
"""
self.analysis.set_definition(self)
def add_step(self, step):
"""
Add a step to the script
:param object selection: A :class:`DSSAnalysisStepBuilder` to build the settings of the step.
"""
steps = self.get_raw_script_steps()
if isinstance(step, DSSAnalysisStepBuilder):
steps.append(step.build())
else:
steps.append(step)
def set_script_sampling_selection(self, selection):
"""
Sets the sampling for the script
:param object selection: A :class:`DSSDatasetSelectionBuilder` to build the settings of the extract of the dataset.
"""
sampling = self.get_raw_script_sampling()
if isinstance(selection, DSSDatasetSelectionBuilder):
sampling['selection'] = selection.build()
else:
sampling['selection'] = selection
class DSSAnalysis(object):
"""A handle to interact with a DSS visual analysis"""
def __init__(self, client, project_key, analysis_id):
self.client = client
self.project_key = project_key
self.analysis_id = analysis_id
########################################################
# Analysis deletion
########################################################
def delete(self, drop_data=False):
"""
Delete the dataset
:param bool drop_data: Should the data of the dataset be dropped
"""
return self.client._perform_empty("DELETE", "/projects/%s/lab/%s/" % (self.project_key, self.analysis_id))
########################################################
# Analysis definition
########################################################
def get_definition(self):
"""
Get the definition of the analysis
:return: a DSSAnalysisDefinition object to interact with the settings
:rtype: :class:`dataikuapi.dss.analysis.DSSAnalysisDefinition`
"""
acp = self.client._perform_json("GET", "/projects/%s/lab/%s/" % (self.project_key, self.analysis_id))
return DSSAnalysisDefinition(self, acp)
def set_definition(self, definition):
"""
Set the definition of the analysis
Args:
definition: the definition, as a JSON object or a :class:`dataikuapi.dss.analysis.DSSAnalysisDefinition`.
You should only set a definition object that has been retrieved using the get_definition call.
"""
if isinstance(definition, DSSAnalysisDefinition):
acp = definition.get_raw()
else:
acp = definition
return self.client._perform_json("PUT", "/projects/%s/lab/%s/" % (self.project_key, self.analysis_id), body=acp)
########################################################
# ML
########################################################
def create_prediction_ml_task(self,
target_variable,
ml_backend_type="PY_MEMORY",
guess_policy="DEFAULT",
prediction_type=None,
wait_guess_complete=True):
"""Creates a new prediction task in this visual analysis lab
for a dataset.
:param string target_variable: Variable to predict
:param string ml_backend_type: ML backend to use, one of PY_MEMORY, MLLIB or H2O
:param string guess_policy: Policy to use for setting the default parameters. Valid values are: DEFAULT, SIMPLE_FORMULA, DECISION_TREE, EXPLANATORY and PERFORMANCE
:param string prediction_type: The type of prediction problem this is. If not provided the prediction type will be guessed. Valid values are: BINARY_CLASSIFICATION, REGRESSION, MULTICLASS
:param boolean wait_guess_complete: if False, the returned ML task will be in 'guessing' state, i.e. analyzing the input dataset to determine feature handling and algorithms.
You should wait for the guessing to be completed by calling
``wait_guess_complete`` on the returned object before doing anything
else (in particular calling ``train`` or ``get_settings``)
:return :class dataiku.dss.ml.DSSMLTask
"""
obj = {
"taskType" : "PREDICTION",
"targetVariable" : target_variable,
"backendType": ml_backend_type,
"guessPolicy": guess_policy
}
if prediction_type is not None:
obj["predictionType"] = prediction_type
ref = self.client._perform_json("POST", "/projects/%s/lab/%s/models/" % (self.project_key, self.analysis_id), body=obj)
mltask = DSSMLTask(self.client, self.project_key, self.analysis_id, ref["mlTaskId"])
if wait_guess_complete:
mltask.wait_guess_complete()
return mltask
def create_clustering_ml_task(self,
ml_backend_type="PY_MEMORY",
guess_policy="KMEANS",
wait_guess_complete=True):
"""Creates a new clustering task in a new visual analysis lab
for a dataset.
The returned ML task will be in 'guessing' state, i.e. analyzing
the input dataset to determine feature handling and algorithms.
You should wait for the guessing to be completed by calling
``wait_guess_complete`` on the returned object before doing anything
else (in particular calling ``train`` or ``get_settings``)
:param string ml_backend_type: ML backend to use, one of PY_MEMORY, MLLIB or H2O
:param string guess_policy: Policy to use for setting the default parameters. Valid values are: KMEANS and ANOMALY_DETECTION
:param boolean wait_guess_complete: if False, the returned ML task will be in 'guessing' state, i.e. analyzing the input dataset to determine feature handling and algorithms.
You should wait for the guessing to be completed by calling
``wait_guess_complete`` on the returned object before doing anything
else (in particular calling ``train`` or ``get_settings``)
"""
obj = {
"taskType" : "CLUSTERING",
"backendType": ml_backend_type,
"guessPolicy": guess_policy
}
ref = self.client._perform_json("POST", "/projects/%s/lab/%s/models/" % (self.project_key, self.analysis_id), body=obj)
mltask = DSSMLTask(self.client, self.project_key, self.analysis_id, ref["mlTaskId"])
if wait_guess_complete:
mltask.wait_guess_complete()
return mltask
def list_ml_tasks(self):
"""
List the ML tasks in this visual analysis
Returns:
the list of the ML tasks summaries, each one as a JSON object
"""
return self.client._perform_json("GET", "/projects/%s/lab/%s/models/" % (self.project_key, self.analysis_id))
def get_ml_task(self, mltask_id):
"""
Get a handle to interact with a specific ML task
Args:
mltask_id: the identifier of the desired ML task
Returns:
A :class:`dataikuapi.dss.ml.DSSMLTask` ML task handle
"""
return DSSMLTask(self.client, self.project_key, self.analysis_id, mltask_id)
# some basic steps
class DSSFormulaStepBuilder(DSSAnalysisStepBuilder):
def __init__(self, step_name=None):
super(DSSFormulaStepBuilder, self).__init__(step_type='CreateColumnWithGREL', step_name=step_name)
def with_output_column(self, column_name):
"""Sets the step's output column's name"""
self.step["params"]["column"] = column_name
return self
def with_error_column(self, column_name):
"""Sets the step's output column's name"""
self.step["params"]["errorColumn"] = column_name
return self
def with_expression(self, expression):
"""Sets the step's expression"""
self.step["params"]["expression"] = expression
return self
class AppliesToStepBuilder(DSSAnalysisStepBuilder):
def __init__(self, step_type=None, step_name=None):
super(AppliesToStepBuilder, self).__init__(step_type=step_type, step_name=step_name)
self.step["params"]["appliesTo"] = 'SINGLE_COLUMN'
def with_column_selection_mode(self, column_selection_mode):
"""Sets the step's column selection mode (SINGLE_COLUMN, COLUMNS, PATTERN, ALL)"""
self.step["params"]["appliesTo"] = column_selection_mode
return self
def with_columns(self, *column_names):
"""Sets the step's selected columns"""
self.step["params"]["columns"] = [c for c in column_names]
return self
def with_column_regex(self, regex):
"""Sets the step's column selection regular expression"""
self.step["params"]["appliesToPattern"] = regex
return self
def with_single_column_selection(self, column_name):
"""Sets the step's as applying to a single column"""
return self.with_column_selection_mode('SINGLE_COLUMN').with_columns(column_name)
def with_multiple_column_selection(self, *column_names):
"""Sets the step's as applying to a single column"""
return self.with_column_selection_mode('COLUMNS').with_columns(column_names)
def with_regex_column_selection(self, regex):
"""Sets the step's as applying to a single column"""
return self.with_column_selection_mode('PATTERN').with_column_regex(regex)
def with_all_column_selection(self, column_name):
"""Sets the step's as applying to all columns"""
return self.with_column_selection_mode('ALL')
class FilterAndFlagStepBuilder(AppliesToStepBuilder):
def __init__(self, step_type=None, step_name=None):
super(FilterAndFlagStepBuilder, self).__init__(step_type=step_type, step_name=step_name)
self.step["params"]["booleanMode"] = 'AND'
self.step["params"]["action"] = 'REMOVE_ROW'
def with_action(self, action):
"""Sets the step's action on match (KEEP_ROW, REMOVE_ROW, CLEAR_CELL, DONTCLEAR_CELL, FLAG)"""
self.step["params"]["action"] = action
return self
def with_boolean_mode(self, boolean_mode):
"""Sets the step's mode for combining matches in different columns (AND, OR)"""
self.step["params"]["booleanMode"] = boolean_mode
return self
def with_flag_column(self, column_name):
"""Sets the step's column for outputing the flag"""
self.step["params"]["flagColumn"] = column_name
return self
class FilterOnValueStepBuilder(FilterAndFlagStepBuilder):
def __init__(self, step_name=None):
super(FilterOnValueStepBuilder, self).__init__(step_type='FlagOnValue', step_name=step_name)
def with_values(self, *values):
"""Sets the step's flagged values"""
self.step["params"]["values"] = [v for v in values]
return self
def with_matching_mode(self, matching_mode):
"""Sets the step's matching_mode (FULL_STRING, SUBSTRING, PATTERN)"""
self.step["params"]["matchingMode"] = matching_mode
return self
def with_normalization_mode(self, normalization_mode):
"""Sets the step's normalization_mode (EXACT, LOWERCASE, NORMALIZED)"""
self.step["params"]["normalizationMode"] = normalization_mode
return self
class FilterOnBadTypeStepBuilder(FilterAndFlagStepBuilder):
def __init__(self, step_name=None):
super(FilterOnBadTypeStepBuilder, self).__init__(step_type='FilterOnBadType', step_name=step_name)
def with_meaning(self, meaning):
"""Sets the step's meaning to check"""
self.step["params"]["type"] = meaning
return self
class RemoveRowsStepBuilder(AppliesToStepBuilder):
def __init__(self, step_name=None):
super(RemoveRowsStepBuilder, self).__init__(step_type='RemoveRowsOnEmpty', step_name=step_name)
def with_meaning(self, keep):
"""Sets the step's behavior when an empty value is found : True=keep, False=drop (default)"""
self.step["params"]["keep"] = keep
return self
|
StarcoderdataPython
|
1760544
|
from ..geometry import Position, Size
from . import toolkit
class Stack(toolkit.Container, toolkit.Widget):
"""Free form container where all children are stacked on top of each other.
Widgets are rendered in FIFO order and each widget use whole panel.
Will overdraw previous ones if they overlap.
"""
def layout_content(self, manager, parent, panel, z_order):
for child in self.children:
widget = manager.create_child(parent)
z_order = child.layout(manager, widget, panel, z_order+1)
return z_order
def calc_sizes(available_size, sizes):
if all(sizes):
return sizes
# Calc size for fixed size elements
reserved = sum(size for size in sizes if size)
left = available_size - reserved
# Now calc default size for elements withour fixed size
dynamic_num = len([size for size in sizes if not size])
default_size = left // dynamic_num
# TODO: What about modulo?
return [size or default_size for size in sizes]
class Row(toolkit.Container, toolkit.Widget):
"""Horizontal container.
Widgets are rendererd in FIFO order from left to right.
align=LEFT |AA BB CC |
align=RIGHT | AA BB CC|
align=CENTER | AA BB CC |
"""
def __init__(self, content=None, *, align):
super().__init__(content=content, align=align)
@property
def width(self):
if self._width:
return self._width
widths = [child.width for child in self.children]
if 0 in widths:
return 0
return sum(widths)
@property
def height(self):
if self._height:
return self._height
heights = [child.height for child in self.children]
if 0 in heights:
return 0
heights.append(0)
return max(heights)
def layout_content(self, manager, parent, panel, z_order):
z_orders = [z_order, ]
position = Position.ZERO
widths = [child.width for child in self.children]
calc_widths = calc_sizes(panel.width, widths)
for i, child in enumerate(self.children):
widget = manager.create_child(parent)
size = Size(calc_widths[i], child.height or panel.height)
subpanel = panel.create_panel(position, size)
child_z_order = child.layout(manager, widget, subpanel, z_order+1)
z_orders.append(child_z_order or 0)
position += Position(calc_widths[i], 0)
return max(z_orders)
# TODO: ???
# class JustifiedRow:
# # |AA BB CC|
# pass
class List(toolkit.Container, toolkit.Widget):
"""Vertical container.
Widgets are rendererd in FIFO order from top to bottom.
"""
def __init__(self, content=None, *, align):
super().__init__(content=content, align=align)
@property
def width(self):
if self._width:
return self._width
widths = [child.width for child in self.children]
if 0 in widths:
return 0
widths.append(0)
return max(widths)
@property
def height(self):
if self._height:
return self._height
heights = [child.height for child in self.children]
if 0 in heights:
return 0
return sum(heights)
def layout_content(self, manager, parent, panel, z_order):
z_orders = [z_order, ]
position = Position.ZERO
heights = [child.height for child in self.children]
calc_heights = calc_sizes(panel.height, heights)
for i, child in enumerate(self.children):
widget = manager.create_child(parent)
size = Size(child.width or panel.width, calc_heights[i])
subpanel = panel.create_panel(position, size)
child_z_order = child.layout(manager, widget, subpanel, z_order+1)
z_orders.append(child_z_order or 0)
position += Position(0, calc_heights[i])
return max(z_orders)
class Split(toolkit.Container, toolkit.Widget):
"""Container that renders widgets on each side of splitted panel."""
def __init__(self, content=None, *, left=None, right=None, top=None, bottom=None):
super().__init__(content=content)
self.left = left
self.right = right
self.top = top
self.bottom = bottom
def layout_content(self, manager, parent, panel, z_order):
z_orders = []
subpanels = panel.split(self.left, self.right, self.top, self.bottom)
for i, child in enumerate(self.children):
if child:
widget = manager.create_child(parent)
child_z_order = child.layout(manager, widget, subpanels[i], z_order+1)
z_orders.append(child_z_order or 0)
if i >= 2:
break
return z_orders and max(z_orders) or z_order
|
StarcoderdataPython
|
9726757
|
# <NAME>, ИУ7-12
# Защита (текст)
# Найти предложения, в которых все слова состоят из чередующихся
# согласных и гласных букв.
sogl = 'бвгджзйклмнпрстфхцчшщъьБВГДЖЗЙКЛМНПРСТФХЦЧШЩЪЬ'
sogl = list(sogl[:])
gl = 'аеёиоуыэюяАЕЁИОУЫЭЮЯ'
gl = list(gl[:])
razd = '.?!'
razd = list(razd[:])
#print(gl,sogl,razd)
def splitp(s):
k = []
z = ''
for i in s:
if i in razd:
z += i
k.append(z)
z = ''
else:
z += i
return k
def isCh(s):
if len(s) == 0:
return True
flag = (s[0] in sogl)
for i in s:
if (i not in gl) and (i not in sogl):
continue
if i in gl and flag:
return False
if i in sogl and not flag:
return False
flag = not flag
return True
def printbw(s,n):
k = 0
for i in s.split():
k += len(i)
if k >= n or len(i)>n:
k = 0
print()
print(i,end = ' ')
print()
text = '<NAME>. А я сижу на парте. Инжу надо закрыть.\
А сколько у меня баллов по теоринфе? Много! Или мало? Давайте подпишем ещё \
строчку - так интереснее! Ну хорошо, эту покажу. Напишу-ка я ещё.'
print('Исходный текст:')
printbw(text,60)
text = list(splitp(text))
print('\nРазбитый по предложениям:')
for i in text:
print(i.lstrip())
v = []
print('\nПредложения, в которых все слова из чередующихся согл/гл букв:')
for i in text:
flag = True
for j in i.split():
flag *= isCh(j)
if flag:
v.append(i.lstrip())
for i in v:
printbw(i,60)
|
StarcoderdataPython
|
185331
|
from __future__ import absolute_import
from logging import getLogger
logger = getLogger("gui_builder.fields")
import traceback
from .widgets import wx_widgets as widgets
try:
unicode
except NameError:
unicode = str
class UnboundField(object):
creation_counter = 0
_GUI_FIELD = True
def __init__(self, field, *args, **kwargs):
self.field = field
self.args = args
self.kwargs = kwargs
self.extra_callbacks = []
UnboundField.creation_counter += 1
self.creation_counter = UnboundField.creation_counter
def bind(self, parent=None, name=None, **kwargs):
kwargs.update(self.kwargs)
return self.field(
bound_name=name,
parent=parent,
extra_callbacks=self.extra_callbacks,
*self.args,
**kwargs
)
def add_callback(self, trigger=None):
if not isinstance(trigger, str):
self.kwargs["callback"] = trigger
return trigger
def add_callback_decorator(function):
self.extra_callbacks.append((trigger, function))
return function
return add_callback_decorator
class GUIField(object):
widget_type = None
__autolabel__ = False
widget_args = ()
widget_kwargs = {}
callback = None
extra_callbacks = None
default_value = None
def __new__(cls, *args, **kwargs):
if "parent" in kwargs or kwargs.get("top_level_window"):
return super(GUIField, cls).__new__(cls)
else:
return UnboundField(cls, *args, **kwargs)
def __init__(
self,
widget_type=None,
label=None,
parent=None,
bound_name=None,
callback=None,
default_value=None,
default_focus=False,
extra_callbacks=None,
*args,
**kwargs
):
if widget_type is None:
widget_type = self.widget_type
widget_kwargs = {}
widget_args = []
widget_kwargs.update(self.widget_kwargs)
self.widget_kwargs = widget_kwargs
widget_args.extend(self.widget_args)
self.widget_args = widget_args
logger.debug(
"Field: %r. widget_args: %r. widget_kwargs: %r."
% (self, self.widget_args, self.widget_kwargs)
)
if callback is None:
callback = self.callback
if default_value is None:
default_value = self.default_value
self.widget_type = widget_type
super(GUIField, self).__init__()
self.control_label = label
self.widget_args.extend(args)
self.parent = None
if parent is not None:
self.bind(parent, bound_name)
self.widget_kwargs.update(kwargs)
self.callback = callback
self.default_value = default_value
self.default_focus = default_focus
self.widget = None
if extra_callbacks is not None:
if self.extra_callbacks is None:
self.extra_callbacks = []
self.extra_callbacks = list(self.extra_callbacks)
self.extra_callbacks.extend(extra_callbacks)
def bind(self, parent, name=None):
logger.debug(
"Binding field %r to parent %r with name %r" % (self, parent, name)
)
self.parent = parent
self.bound_name = name
return self
@property
def label(self):
if self.control_label is not None:
return self.control_label
if self.__autolabel__ and self.bound_name:
return self.bound_name.replace("_", " ").title()
def render(self, **runtime_kwargs):
"""Creates this field's widget."""
if self.widget_type is None:
raise RuntimeError("Must set a widget_type for %r" % self)
widget_kwargs = self.widget_kwargs
if self.label is not None:
widget_kwargs["label"] = self.label
if not hasattr(self.parent, "widget"):
widget_kwargs["parent"] = self.parent
else:
if self.parent is not None:
logger.debug(
"The parent of this field is %r and parent of this widget is %r"
% (self.parent, self.parent.widget)
)
if self.parent.widget is None:
logger.warning(
"Parent provided without a rendered widget. Traceback follows:\n%s"
% traceback.format_stack()
)
widget_kwargs["parent"] = self.parent.widget
if self.callback is not None:
widget_kwargs["callback"] = self.callback
logger.debug("Passed in runtime kwargs: %r" % runtime_kwargs)
widget_kwargs.update(runtime_kwargs)
logger.debug(
"Rendering field %r with widget type %r, and widget_kwargs:\n%r"
% (self, self.widget_type, widget_kwargs)
)
try:
self.widget = self.widget_type(
field=self, *self.widget_args, **widget_kwargs
)
except Exception as e:
logger.exception("Error creating widget.")
raise RuntimeError(
"Unable to create widget with type %r" % self.widget_type,
traceback.format_exc(e),
e,
)
self.widget.render()
self.register_extra_callbacks()
def register_extra_callbacks(self):
"""Picks up extra callbacks defined on the field's class and registers them at render time."""
if self.extra_callbacks is None:
return
for callback_set in self.extra_callbacks:
if len(callback_set) == 1:
callback_set = [None].extend(callback_set)
self.register_callback(*callback_set)
def register_callback(self, trigger=None, callback=None):
"""Registers a callback, I.E. an event handler, to a certain trigger (event). If the callback is not provided it is assumed to be this field's default callback. If a trigger is not provided, assumes the trigger is this field's widget's default event type"""
logger.debug(
"Registering callback %r with trigger %r to field %r"
% (callback, trigger, self)
)
self.widget.register_callback(trigger, callback)
def unregister_callback(self, trigger, callback):
"""Unregisters a callback from a trigger"""
logger.debug(
"Unregistering callback %r with trigger %r from field %r"
% (callback, trigger, self)
)
self.widget.unregister_callback(trigger, callback)
def bind_event(self, event, callback):
return self.widget.bind_event(event, callback)
def unbind_event(self, event, callback=None):
return self.widget.unbind_event(event, callback)
def is_focused(self):
"""Returns a boolean indicating if this field is currently focused."""
return self.widget.is_focused()
def set_focus(self):
"""Sets focus to this field."""
self.widget.set_focus()
def populate(self, value):
"""this is to provide a common abstraction for getting data into controls. It will take the most common form that data holds in an application and turn it into something this widget can deal with."""
self.set_value(value)
def set_default_value(self):
if self.default_value is None:
return
default = self.default_value
if hasattr(default, "__unicode__"):
self.populate(default)
return
while callable(default):
default = default(self)
logger.debug("Setting default value of field %r to %r" % (self, default))
self.populate(default)
def can_be_focused(self):
return self.widget_type.can_be_focused()
def disable(self):
"""Disables this field, I.E. makes it unuseable."""
self._reset_last_enabled_descendant()
return self.widget.disable()
def enable(self):
"""Enables this field, making it useable."""
self._reset_last_enabled_descendant()
return self.widget.enable()
def set_enabled(self, enabled):
"""A method to enable/disable this field based on the truthyness of the passed in value"""
if enabled:
self.enable()
else:
self.disable()
def _reset_last_enabled_descendant(self):
next_field = self
while next_field is not None:
if (
hasattr(next_field, "_last_enabled_descendant")
and next_field._last_enabled_descendant is not None
):
next_field._last_enabled_descendant = None
if hasattr(next_field.parent, "widget"):
next_field = next_field.parent
else:
break
def is_enabled(self):
return self.widget.enabled
def freeze(self):
self.widget.freeze()
def thaw(self):
self.widget.thaw()
def hide(self):
"""Hides this field"""
return self.widget.hide()
def show(self):
"""Shows this field, perhaps after it has been hidden"""
return self.widget.show()
def get_first_ancestor(self):
parent = self
current = None
while parent is not None:
current = parent
parent = parent.parent
return current
def is_shown(self):
"""Returns a boolean. If it is False, this control is hidden. If it is true, it is not."""
return self.widget.is_shown()
def destroy(self):
"""Destroys the visual counterpart of this field."""
self.widget.destroy()
logger.debug("Destroyed widget for field %r" % self)
def __del__(self):
logger.debug("Automatically destroying %r" % self)
if self.widget is None:
return
self.destroy()
self.widget = None
def display(self):
"""Display's this field's widget on the screen."""
self.widget.display()
def display_modal(self):
self.widget.display_modal()
def get_label(self):
"""Returns this field's current label."""
return self.widget.get_label()
def set_label(self, label):
"""Given a string, sets this field's label to it."""
return self.widget.set_label(label)
def set_accessible_label(self, label):
self.widget.set_accessible_label(label)
def get_value(self):
"""Returns the contents of this field."""
return self.widget.get_value()
def set_value(self, value):
"""Sets the contents of this field."""
return self.widget.set_value(value)
def get_default_value(self):
return self.default_value
class Text(GUIField):
"""A text field"""
widget_type = widgets.Text
def set_default_value(self):
super(Text, self).set_default_value()
self.select_all()
def append(self, text):
"""Appends text to this text field."""
self.widget.append(text)
def write(self, text):
"""Writes the provided text to this text field at its current position"""
self.widget.write(text)
def select_range(self, start, end):
"""Selects the text in this control from the position specified by start to the position specified by end"""
self.widget.select_range(start, end)
def get_insertion_point(self):
"""Returns the current insertion point, a zero-based index representing the user's position into the text contained in this field"""
return self.widget.get_insertion_point()
def set_insertion_point(self, insertion_point):
"""Sets the insertion point, the 0-based index representing the user's position in this field."""
self.widget.set_insertion_point(insertion_point)
def get_length(self):
"""Returns the length of text contained within this control."""
return self.widget.get_length()
def get_line(self, line_number):
"""Returns the line number of the currently-focused line in this field."""
return self.widget.get_line(line_number)
def get_number_of_lines(self):
"""Returns the total number of lines of text contained in this field."""
return self.widget.get_number_of_lines()
def get_insertion_point_from_x_y(self, x, y):
"""Returns the line and column numbers of the given index into this contents of this text field"""
return self.widget.get_insertion_point_from_x_y(x, y)
def get_x_y_from_insertion_point(self, insertion_point):
"""Given a line and column number, returns the 0-based index of the specified character in the contents of this field"""
return self.widget.get_x_y_from_insertion_point(insertion_point)
def select_all(self):
"""Selects all text in this text field"""
self.select_range(0, self.get_length())
def clear(self):
"""Removes all text from this text field."""
return self.widget.clear()
class IntText(Text):
"""This text field will only allow the input of numbers."""
widget_type = widgets.IntText
class Button(GUIField):
"""A standard button"""
widget_type = widgets.Button
def make_default(self):
"""Called before rendering, sets this to be the default button in a dialog"""
return self.widget.make_default()
class CheckBox(GUIField):
"""A standard Check Box"""
widget_type = widgets.CheckBox
class ButtonSizer(GUIField):
widget_type = widgets.ButtonSizer
class ChoiceField(GUIField):
"""A base class defining the methods available on choice fields."""
def __init__(self, default_index=0, choices=None, *args, **kwargs):
super(ChoiceField, self).__init__(*args, **kwargs)
self.default_index = default_index
if choices is None:
choices = []
self.choices = [unicode(i) for i in choices]
def render(self, **runtime_kwargs):
runtime_kwargs.setdefault("choices", self.choices)
super(ChoiceField, self).render(**runtime_kwargs)
def populate(self, value):
self.set_items(value)
def set_items(self, items):
self.widget.set_items(items)
def set_default_value(self):
super(ChoiceField, self).set_default_value()
self.set_default_index()
def get_default_choice(self):
if self.choices:
return self.choices[self.default_index]
def get_choice(self):
return self.widget.get_choice()
def get_items(self):
return self.widget.get_items()
def set_items(self, items):
return self.widget.set_items(items)
def delete_item(self, item):
return self.widget.delete_item(item)
def clear(self):
return self.widget.clear()
def get_index(self):
return self.widget.get_index()
def set_index(self, index):
self.default_index = index
return self.widget.set_index(index)
def set_default_index(self):
if self.get_count():
self.set_index(self.default_index)
def find_index(self, item):
for num, current_item in enumerate(self.get_items()):
if item == current_item:
return num
raise ValueError("%r not in %r" % (item, self))
def set_index_to_item(self, item):
index = self.find_index(item)
self.set_index(index)
def insert_item(self, index, item):
return self.widget.insert_item(index, item)
def update_item(self, index, new_item):
return self.widget.update_item(index, new_item)
def get_count(self):
return self.widget.get_count()
def get_item(self, index):
return self.widget.get_item(index)
def set_item(self, index, item):
return self.widget.set_item(index, item)
def set_value(self, value):
self.set_items(value)
class ComboBox(ChoiceField):
"""An Edit Combo Box. Pass read_only=True to the constructor for a combo box."""
widget_type = widgets.ComboBox
def select_all(self):
return self.widget.select_all()
class ListBox(ChoiceField):
"""A standard list box."""
widget_type = widgets.ListBox
class RadioButtonGroup(ChoiceField):
"""A group of choices, expressed as radio buttons."""
widget_type = widgets.RadioBox
class ListViewColumn(GUIField):
widget_type = widgets.ListViewColumn
class Slider(GUIField):
"""A moveable slider."""
widget_type = widgets.Slider
def get_page_size(self):
"""Returns the number representing how many units this control will skip when the user presses page up/down."""
return self.widget.get_page_size()
def set_page_size(self, page_size):
"""Sets the number representing how many units this control will skip when the user presses page up/down."""
return self.widget.set_page_size(page_size)
def set_line_size(self, value):
self.widget.set_line_size(value)
def get_line_size(self):
return self.widget.get_line_size()
class FilePicker(GUIField):
widget_type = widgets.FilePicker
class MenuItem(GUIField):
"""An item in a menu which is not a submenu."""
widget_type = widgets.MenuItem
def check(self):
"""Check this menu item."""
self.widget.check()
def uncheck(self):
"""Uncheck this menu item."""
self.widget.uncheck()
def set_checked(self, checked):
"""Pass in a boolean representing whether or not this menu item should be checked."""
if checked:
self.check()
else:
self.uncheck()
def set_enabled(self, enabled):
if enabled:
self.enable()
else:
self.disable()
def set_as_mac_about_menu_item(self):
"""Indicate to OS X that this is the About... item in the help menu"""
self.widget.set_as_mac_about_menu_item()
def set_as_mac_exit_menu_item(self):
"""Indicate to OS X that clicking this menu item will exit the application"""
self.widget.set_as_mac_exit_menu_item()
def set_as_mac_preferences_menu_item(self):
"""Indicate to OS X that clicking this menu item will invoke the application's preferences"""
self.widget.set_as_mac_preferences_menu_item()
class StatusBar(GUIField):
"""A status bar."""
widget_type = widgets.StatusBar
class Link(GUIField):
"""A hyperlink"""
widget_type = widgets.Link
class StaticText(GUIField):
"""Static text"""
widget_type = widgets.StaticText
class DatePicker(GUIField):
widget_type = widgets.DatePicker
def set_range(self, start, end):
"""Sets the minimum and maximum dates that can be picked in this control"""
self.widget.set_range(start, end)
class TreeView(GUIField):
"""A treeview"""
widget_type = widgets.TreeView
def add_root(self, text=None, image=None, selected_image=None, data=None):
return self.widget.add_root(
text, image=image, selected_image=selected_image, data=data
)
def get_root_item(self):
return self.widget.get_root_item()
def append_item(
self, parent=None, text=None, image=None, selected_image=None, data=None
):
if parent is None:
return self.add_root(
text=text, image=image, selected_image=selected_image, data=data
)
return self.widget.append_item(
parent=parent,
text=text,
image=image,
selected_image=selected_image,
data=data,
)
def clear(self):
"""Deletes all items out of this tree view"""
self.widget.clear()
def delete(self, item):
self.widget.delete(item)
def get_selection(self):
return self.widget.get_selection()
def select_item(self, item):
self.widget.select_item(item)
def get_data(self, item):
return self.widget.get_data(item)
def set_item_has_children(self, item, val):
self.widget.set_item_has_children(item, val)
class ProgressBar(GUIField):
widget_type = widgets.ProgressBar
class ToolBarItem(GUIField):
widget_type = widgets.ToolBarItem
class Image(GUIField):
widget_type = widgets.StaticBitmap
def load_image(self, image):
return self.widget.load_image(image)
class SpinBox(GUIField):
widget_type = widgets.SpinBox
def set_min(self, min):
self.widget.set_min(min)
def set_max(self, max):
self.control.set_max(max)
|
StarcoderdataPython
|
1784164
|
<filename>SC001 (beginner)/SC001_Assignment4/mirror_lake.py
"""
File: mirror_lake.py
----------------------------------
This file reads in mt-rainier.jpg and
makes a new image that creates a mirror
lake vibe by placing an inverse image of
mt-rainier.jpg below the original one.
"""
from simpleimage import SimpleImage
def reflect(filename):
"""
:param filename: str, allow users to input a file path
:return: SimpleImage, return an image (a SimpleImage object) back to users
"""
img = SimpleImage(filename)
blank_img = SimpleImage.blank(img.width, img.height * 2)
for x in range(img.width):
for y in range(img.height):
p = img.get_pixel(x, y)
bp_up = blank_img.get_pixel(x, y)
bp_down = blank_img.get_pixel(x, img.height * 2 - 1 - y)
bp_up.red = p.red
bp_up.green = p.green
bp_up.blue = p.blue
bp_down.red = p.red
bp_down.green = p.green
bp_down.blue = p.blue
return blank_img
def main():
"""
This program allows user to mirror a photo vertically.
"""
original_mt = SimpleImage('images/mt-rainier.jpg')
original_mt.show()
reflected = reflect('images/mt-rainier.jpg')
reflected.show()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
6473299
|
#!/usr/bin/env python
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
setup_args = generate_distutils_setup(
packages=['interfaces'],
package_dir={'': 'test'},
)
setup(**setup_args)
|
StarcoderdataPython
|
4844853
|
<reponame>pd-Shah/FlaskRecycle
from app import ma
from app.models import Task
class TaskSchema(ma.ModelSchema):
class Meta:
model = Task
task_schema = TaskSchema()
tasks_schema = TaskSchema(many=True)
|
StarcoderdataPython
|
3234955
|
from os import mkdir, walk, remove
from os.path import exists, join as joinpath
from pickle import PicklingError, UnpicklingError
from collections import namedtuple
from redlib.api.py23 import pickledump, pickleload
from . import const
AutocompInfo = namedtuple('AutocompInfo', ['command', 'access', 'version'])
class DataStoreError(Exception):
FILE_NOT_FOUND = 0
def __init__(self, msg, reason=None):
super(DataStoreError, self).__init__(msg)
self.reason = reason
class DataStore:
pickle_protocol = 2
def __init__(self):
self.check_dir()
def check_dir(self, data=True, autocomp=False, script=False, create=True):
if not exists(const.data_dir):
self.create_dir(const.data_dir, create=create)
if autocomp and not exists(const.autocomp_dir):
self.create_dir(const.autocomp_dir, create=create)
if script and not exists(const.script_dir):
self.create_dir(const.script_dir, create=create)
def create_dir(self, path, create=True):
if create:
mkdir(path)
else:
raise DataStoreError('%s does not exist'%path)
def save_optiontree(self, ot, cmdname):
self.check_dir(data=False, autocomp=True)
with open(joinpath(const.autocomp_dir, cmdname), 'wb') as f:
try:
pickledump(ot, f, protocol=self.pickle_protocol, fix_imports=True)
except PicklingError as e:
print(e)
raise DataStoreError('unable to save option tree')
def load_optiontree(self, cmdname, filepath=None):
filepath = filepath or joinpath(const.autocomp_dir, cmdname)
if not exists(filepath):
filepath = joinpath(const.root_autocomp_dir, cmdname)
if not exists(filepath):
raise DataStoreError('unable to load option tree')
try:
with open(filepath, 'rb') as f:
try:
data = pickleload(f, fix_imports=True)
except UnpicklingError as e:
log.error(str(e))
#ot_version = data[0]
#if ot_version > self.ot_version:
# raise DataStoreError('cannot load greater ot_version, %s > %s'%(version, self.version))
if type(data) == list: # older version (1.0)
return data[1]
else:
return data
except IOError as e:
raise DataStoreError(e)
def remove_optiontree(self, cmdname, exc=False):
filepath = joinpath(const.autocomp_dir, cmdname)
return self.remove_file(filepath, exc=exc)
def remove_all_optiontrees(self):
for name in self.list_optiontree():
self.remove_optiontree(name)
def remove_file(self, filepath, exc=False):
if exists(filepath):
try:
remove(filepath)
return True
except OSError as e:
if exc:
raise DataStoreError(e)
else:
return False
else:
raise DataStoreError('%s not found'%filepath, reason=DataStoreError.FILE_NOT_FOUND)
def list_autocomp_commands(self):
autocomp_list = []
def add_to_list(cmd, access, dirpath):
version = self.load_optiontree(cmd, filepath=joinpath(dirpath, cmd)).prog_version # exc
i = (filter(lambda i : i.command == cmd, autocomp_list) or [None])[0]
if i is None:
autocomp_list.append(AutocompInfo(cmd, [access], [version]))
else:
i.access.append(access)
i.version.append(version)
for _, _, files in walk(const.autocomp_dir):
for f in files:
add_to_list(f, 'user', const.autocomp_dir)
for _, _, files in walk(const.root_autocomp_dir):
for f in files:
add_to_list(f, 'all', const.root_autocomp_dir)
return autocomp_list
|
StarcoderdataPython
|
364819
|
# Copyright (c) 2013, <NAME> and Contributors
# See license.txt
import frappe
import unittest
test_records = frappe.get_test_records('Jasper Reports')
class TestJasperReports(unittest.TestCase):
pass
|
StarcoderdataPython
|
5114688
|
<reponame>esdc-esac-esa-int/pyesasky
"""
pyesasky setup
"""
import json
from pathlib import Path
from os.path import join as pjoin
from jupyter_packaging import (
wrap_installers,
npm_builder,
get_data_files,
install_npm
)
import setuptools
HERE = Path(__file__).parent.resolve()
# The name of the project
name = "pyesasky"
lab_path = (HERE / name / "labextension")
nb_path = (HERE /name / 'nbextension')
# Representative files that should exist after a successful build
jstargets = [
pjoin(nb_path, 'index.js'),
pjoin(HERE, 'lib', 'plugin.js'),
pjoin(HERE, 'lib', 'extension.js'),
]
# Representative files that should exist after a successful build
ensured_targets = [
str(lab_path / "package.json"),
str(lab_path / "static/style.js"),
pjoin(nb_path, 'index.js'),
pjoin(HERE, 'lib', 'plugin.js'),
pjoin(HERE, 'lib', 'extension.js'),
]
labext_name = "pyesasky"
package_data_spec = {
'pyesasky': [
'nbextension/static/*.*js*'
]
}
data_files_spec = [
("share/jupyter/nbextensions/pyesasky", str(nb_path), "**"),
("share/jupyter/labextensions/pyesasky", str(lab_path), "**"),
("share/jupyter/labextensions/pyesasky", str(HERE), "install.json"),
('etc/jupyter/nbconfig/notebook.d' , pjoin(HERE, 'jupyter.d', 'notebook.d'), 'pyesasky.json'),
('etc/jupyter/jupyter_notebook_config.d' , pjoin(HERE, 'jupyter.d', 'jupyter_notebook_config.d'), 'pyesasky.json'),
]
post_develop = npm_builder(
build_cmd="install:extension", source_dir="src", build_dir=lab_path
)
# pre_develop = install_npm(HERE, npm=["yarn"], build_cmd='build:extensions')
cmdclass = wrap_installers(post_develop=post_develop, ensured_targets=ensured_targets)
long_description = (HERE / "README.md").read_text()
# Get the package info from package.json
pkg_json = json.loads((HERE / "package.json").read_bytes())
setup_args = dict(
name=name,
version=pkg_json["version"],
url=pkg_json["homepage"],
author=pkg_json["author"]["name"],
author_email=pkg_json["author"]["email"],
description=pkg_json["description"],
license=pkg_json["license"],
long_description=long_description,
long_description_content_type="text/markdown",
cmdclass=cmdclass,
data_files=get_data_files(data_files_spec),
packages=setuptools.find_packages(),
install_requires=[
'ipywidgets>=7.6.3',
'ipykernel>=5.0.0',
'requests>=2.5.1'
],
zip_safe=False,
include_package_data=True,
python_requires=">=3.6",
platforms="Linux, Mac OS X, Windows",
keywords=["Jupyter", "JupyterLab", "JupyterLab3"],
classifiers=[
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Framework :: Jupyter",
],
)
if __name__ == "__main__":
setuptools.setup(**setup_args)
|
StarcoderdataPython
|
9693878
|
<filename>peon/window.py
from types import (ItemTypes, InventoryTypes, ENCHANT_ITEMS)
import time
import logging
import fastmc.proto
from textwrap import dedent
log = logging.getLogger(__name__)
class Window(object):
def __init__(self, window_id, action_num_counter, send_queue,
proto, recv_condition, slots=None, _type=None, title=None):
self._id = window_id
self.slots = SlotList([])
if slots is not None:
for slot in slots:
slot = Slot(slot) if slot is not None else None
self.slots.append(slot)
self._type = None if _type is None else _type
self.title = None if title is None else title
self.properties = {}
self.cursor_slot = None
self._action_num_counter = action_num_counter
self._send_queue = send_queue
self._proto = proto
self._recv_condition = recv_condition
self._confirmations = {}
self._click_handlers = {
(0, 0): self._left_click,
(1, 0): self._shift_left_click,
(4, 1): self._control_q,
}
def __repr__(self):
slot_strings = []
for index, slot in enumerate(self.slots):
description = InventoryTypes.get_slot_description(
self._type, index)
slot_strings.append(
' {}, # {} {}'.format(str(slot),
index,
description))
templ = dedent('''\
Window(id={}, slots=[
{}
])''')
return templ.format(self._id, '\n'.join(slot_strings))
def __contains__(self, _type):
return _type in self.slots
def index(self, _type):
return self.slots.index(_type)
def count(self, _type):
return self.slots.count(_type)
def window_index(self, _type):
return self.slots.window_index(_type)
def set_slot(self, index, slot):
if slot is None:
self.slots[index] = None
else:
self.slots[index] = Slot(slot)
def set_cursor_slot(self, slot):
self.cursor_slot = None if slot is None else Slot(slot)
def set_slots(self, slots):
self.slots = SlotList([])
for slot in slots:
slot = Slot(slot) if slot is not None else None
self.slots.append(slot)
def get_property(self, _property):
return self.properties.get(_property)
def find(self, term):
slots = []
for index, slot in enumerate(self.slots):
if slot is not None and term in slot.name:
slots.append((index, slot))
return slots
def set_property(self, _property, value):
self.properties.update({_property: value})
@property
def custom_inventory(self):
if len(self.slots) > 35:
return SlotList(self.slots[:-36])
@property
def player_inventory(self):
if len(self.slots) > 35:
return SlotList(self.slots[-36:], start=len(self.slots) - 36)
@property
def main_inventory(self):
if len(self.slots) > 35:
return SlotList(self.slots[-36:-9], start=len(self.slots) - 36)
@property
def held(self):
if len(self.slots) > 9:
return SlotList(self.slots[-9:], start=len(self.slots) - 9)
def click(self, slot_num, button=0, mode=0):
action_num = self._action_num_counter.next()
if slot_num == -999:
slot = None
else:
slot = self.slots[slot_num]
cursor_slot = self.cursor_slot
log.debug('Sending click window. slot_num: %d action_num: %d',
slot_num, action_num)
log.debug('cursor: %s', str(self.cursor_slot))
log.debug('slot: %s', str(slot))
fastmc_slot = None if slot is None else slot.as_fastmc()
self._send(self._proto.PlayServerboundClickWindow.id,
window_id=self._id,
slot=slot_num,
button=button,
action_num=action_num,
mode=mode,
clicked_item=fastmc_slot)
if not self._wait_for(lambda: action_num in self._confirmations,
timeout=5):
log.error('Did not get confirmation')
return False
if not self._confirmations.get(action_num):
log.error('Transaction rejected: %d', action_num)
return False
log.debug('Confirmation received for %d: %s', action_num,
str(self._confirmations.get(action_num)))
if (mode, button) in self._click_handlers:
return self._click_handlers[(mode, button)](slot_num, cursor_slot,
slot)
def shift_click(self, slot_num):
return self.click(slot_num, mode=1)
def drop_click(self, slot_num):
return self.click(button=1, mode=4)
def _left_click(self, slot_num, cursor_slot, slot):
if slot_num == -999:
self.cursor_slot = None
else:
self.slots[slot_num] = cursor_slot
self.cursor_slot = slot
return True
def _shift_left_click(self, slot_num, cursor_slot, slot):
# TODO validate what happens to clicked items base on current inventory
return True
def _control_q(self, slot_num, cursor_slot, slot):
return True
def swap_slots(self, slot_num_a, slot_num_b):
for num in [slot_num_a, slot_num_b, slot_num_a]:
if not self.click(num):
return False
return True
def _send(self, packet_id, **kwargs):
self._send_queue.put((packet_id, kwargs))
def _wait_for(self, what, timeout=10):
start = time.time()
with self._recv_condition:
while not what() and time.time() - start < timeout:
self._recv_condition.wait(timeout=1)
return what()
def get_slot(self, slot_num):
if slot_num > len(self.slots):
return None
return self.slots[slot_num]
def get_slot_count(self, slot_num):
slot = self.get_slot(slot_num)
if slot is None:
return 0
return slot.count
def get_enchantables(self, types=None):
return self.slots.get_enchantables(types=types)
def get_enchanted(self, types=None):
return self.slots.get_enchanted(types=types)
class SlotList(list):
def __init__(self, *args, **kwargs):
self.start = kwargs.get('start', 0)
list.__init__(self, *args)
def __contains__(self, _type):
return (
(_type is None and None in [s for s in self]) or
self._get_name(_type) in [s.name for s in self if s is not None]
)
@staticmethod
def _get_name(_type):
if isinstance(_type, basestring):
return _type
elif isinstance(_type, tuple):
item_id, damage = _type
return ItemTypes.get_name(item_id, damage)
elif isinstance(_type, int):
return ItemTypes.get_name(_type, None)
def index(self, _type, relative=False):
name = self._get_name(_type)
for index, slot in enumerate(self):
if slot is not None and slot.name == name:
if relative:
return index
else:
return index + self.start
def count(self, _type):
name = self._get_name(_type)
count = 0
for index, slot in enumerate(self):
if slot is not None and slot.name == name:
count += slot.count
return count
def get_enchantables(self, types=None):
if types is None:
types = ENCHANT_ITEMS
slot_nums = []
for index, slot in enumerate(self):
if slot is None or slot.name not in types:
continue
if not slot.has_data():
slot_nums.append(index + self.start)
return slot_nums
def get_enchanted(self, types=None):
if types is None:
types = ENCHANT_ITEMS
slot_nums = []
for index, slot in enumerate(self):
if slot is None or slot.name not in types:
continue
if slot.has_data():
slot_nums.append(index + self.start)
return slot_nums
class Slot(object):
def __init__(self, slot):
self.item_id = slot.item_id
self.count = slot.count
self.damage = slot.damage
self.nbt = slot.nbt
def __repr__(self):
return 'Slot(item_name="{}", count={}, damage={}, has_data={})'.format(
self.name, self.count, self.damage, self.has_data())
def __eq__(self, _type):
if _type is None:
return False
elif isinstance(_type, Slot):
return _type.name == self.name
return self._get_name(_type) == self.name
@staticmethod
def _get_name(_type):
if isinstance(_type, basestring):
return _type
elif isinstance(_type, tuple):
item_id, damage = _type
return ItemTypes.get_name(item_id, damage)
elif isinstance(_type, int):
return ItemTypes.get_name(_type, None)
@property
def name(self):
return ItemTypes.get_name(self.item_id, self.damage)
def has_data(self):
return bool(self.nbt)
def as_fastmc(self):
return fastmc.proto.Slot(
self.item_id, self.count, self.damage, self.nbt)
|
StarcoderdataPython
|
8012943
|
<reponame>genyrosk/gym-chess
from gym_chess.gym_chess import ChessEngine # rust module
from gym_chess.envs import ChessEnvV0, ChessEnvV1, ChessEnvV2 # envs
from gym.envs.registration import register # to register envs
register(
id="ChessVsRandomBot-v0",
entry_point="gym_chess.envs:ChessEnvV0",
kwargs={"opponent": "random"},
)
register(
id="ChessVsSelf-v0",
entry_point="gym_chess.envs:ChessEnvV0",
kwargs={"opponent": "none"},
# max_episode_steps=100,
# reward_threshold=.0, # optimum = .0
)
register(
id="ChessVsRandomBot-v1",
entry_point="gym_chess.envs:ChessEnvV1",
kwargs={"opponent": "random"},
)
register(
id="ChessVsSelf-v1",
entry_point="gym_chess.envs:ChessEnvV1",
kwargs={"opponent": "none"},
)
register(
id="ChessVsRandomBot-v2",
entry_point="gym_chess.envs:ChessEnvV2",
kwargs={"opponent": "random"},
)
register(
id="ChessVsSelf-v2",
entry_point="gym_chess.envs:ChessEnvV2",
kwargs={"opponent": "none"},
)
|
StarcoderdataPython
|
9712356
|
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import pytest
import modin.utils
# Note: classes below are used for purely testing purposes - they
# simulate real-world use cases for _inherit_docstring
class BaseParent:
def method(self):
"""ordinary method (base)"""
def base_method(self):
"""ordinary method in base only"""
@property
def prop(self):
"""property"""
@staticmethod
def static():
"""static method"""
@classmethod
def clsmtd(cls):
"""class method"""
class BaseChild(BaseParent):
"""this is class docstring"""
def method(self):
"""ordinary method (child)"""
def own_method(self):
"""own method"""
def no_overwrite(self):
"""another own method"""
F = property(method)
@pytest.fixture(scope="module")
def wrapped_cls():
@modin.utils._inherit_docstrings(BaseChild)
class Wrapped:
def method(self):
pass
def base_method(self):
pass
def own_method(self):
pass
def no_overwrite(self):
"""not overwritten doc"""
@property
def prop(self):
return None
@staticmethod
def static():
pass
@classmethod
def clsmtd(cls):
pass
F = property(method)
return Wrapped
def _check_doc(wrapped, orig):
assert wrapped.__doc__ == orig.__doc__
if isinstance(wrapped, property):
assert wrapped.fget.__doc_inherited__
else:
assert wrapped.__doc_inherited__
def test_doc_inherit_clslevel(wrapped_cls):
_check_doc(wrapped_cls, BaseChild)
def test_doc_inherit_methods(wrapped_cls):
_check_doc(wrapped_cls.method, BaseChild.method)
_check_doc(wrapped_cls.base_method, BaseParent.base_method)
_check_doc(wrapped_cls.own_method, BaseChild.own_method)
assert wrapped_cls.no_overwrite.__doc__ != BaseChild.no_overwrite.__doc__
assert not getattr(wrapped_cls.no_overwrite, "__doc_inherited__", False)
def test_doc_inherit_special(wrapped_cls):
_check_doc(wrapped_cls.static, BaseChild.static)
_check_doc(wrapped_cls.clsmtd, BaseChild.clsmtd)
def test_doc_inherit_props(wrapped_cls):
assert type(wrapped_cls.method) == type(BaseChild.method) # noqa: E721
_check_doc(wrapped_cls.prop, BaseChild.prop)
_check_doc(wrapped_cls.F, BaseChild.F)
def test_doc_inherit_prop_builder():
def builder(name):
return property(lambda self: name)
class Parent:
prop = builder("Parent")
@modin.utils._inherit_docstrings(Parent)
class Child(Parent):
prop = builder("Child")
assert Parent().prop == "Parent"
assert Child().prop == "Child"
|
StarcoderdataPython
|
1692815
|
<gh_stars>0
from random import choice
class RandomWalk:
"""A class to generate a random walk."""
def __init__(self, num_points: int = 5000):
"""Initialize attributes of a walk."""
self.num_points = num_points
# All walks start at (0, 0).
self.x_values = [0]
self.y_values = [0]
def get_step(self, step_range: int):
"""Calculate the direction and distance of the following step."""
direction = choice([1, -1])
distance = choice(range(step_range + 1))
step = direction * distance
return step
def fill_walk(self):
"""Calculate the points in the walk."""
# Keep taking steps until reaches the desired length.
while len(self.x_values) < self.num_points:
# Decide which directions to go and how far to go in that direction
x_step = self.get_step(8)
y_step = self.get_step(8)
# Reject moves that go nowhere
if x_step == 0 and y_step == 0:
continue
# Calculate the new position
x = self.x_values[-1] + x_step
y = self.y_values[-1] + y_step
self.x_values.append(x)
self.y_values.append(y)
|
StarcoderdataPython
|
1980369
|
<reponame>Chromico/bk-base
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import json
from dataflow.batch.handlers.processing_job_info import ProcessingJobInfoHandler
from dataflow.modeling.settings import PARSED_TASK_TYPE, TABLE_TYPE
from dataflow.pizza_settings import BASE_FLOW_URL, MLSQL_NODE_LABEL
from dataflow.shared.log import modeling_logger as logger
SUB_TASK_PREFIX = "subtask_"
def generate_workflow_config(job_id):
job_info_list = ProcessingJobInfoHandler.get_proc_job_info_by_prefix(job_id)
job_id_list = []
for job_info in job_info_list:
job_id_list.append(job_info.job_id)
# 排序处理,并且将第一个移至最后一个
job_id_list = sorted(job_id_list)
first_item = job_id_list.pop(0)
job_id_list.append(first_item)
logger.info(job_id_list)
return generate_workflow_config_by_list(job_id_list, run_mode="product")
def generate_workflow_config_by_list(job_id_list, run_mode="product"):
# 根据job及拓扑信息获取整个workflow的配置文件
workflow_config = {"params": {}, "config": {}, "node": {}}
url = BASE_FLOW_URL
url = url.rstrip("/").rstrip("flow")
for job_index in range(0, len(job_id_list)):
job_id = job_id_list[job_index]
# 目前workflow的限制,所有node的名称不能以数字开头,所以要加入prefix
sub_task_name = "{}_{}".format(SUB_TASK_PREFIX, job_id)
task_run_info = {
"job_id": job_id,
"job_type": "spark_mllib",
"run_mode": run_mode,
"api_url": {"base_dataflow_url": url},
"jobnavi": {},
}
sub_task_info = {
"node_type": "subtask",
"type_id": "spark_mllib",
"children": [],
"subtask_info": json.dumps(task_run_info),
"node_label": MLSQL_NODE_LABEL,
}
workflow_config["node"][sub_task_name] = sub_task_info
child_jobid_list = []
if job_index + 1 < len(job_id_list):
child_jobid_list.append(job_id_list[job_index + 1])
for child_job_id in child_jobid_list:
child_sub_task_name = "{}_{}".format(SUB_TASK_PREFIX, child_job_id)
state_name = "state_{}_{}".format(sub_task_name, child_sub_task_name)
state_info = {
"node_type": "state",
"state_type": "depend_rule",
"children": [child_sub_task_name],
"state_content": {"all_finished": [sub_task_name]},
}
sub_task_info["children"].append(state_name)
workflow_config["node"][state_name] = state_info
return workflow_config
def generate_complete_config(model_config, input_info, output_info, window_info, model_params):
# 我们得到的model_config里, 包括了header和一系列的transform
# 我们需要替换:
# 1. source里的输入内容(input,fields等)
# 2. 第一个transform里的processor.args的内容
# 3. 从最后一个transform里解析出sink的内容
# 替换配置文件中的通配符
model_config_string = json.dumps(model_config)
model_config_string = model_config_string.replace("__TRANSFORM_ID__", output_info["name"])
model_config_string = model_config_string.replace("__DATA_SOURCE_ID__", input_info["name"])
new_model_config = json.loads(model_config_string)
# todo:后续如果一个节点有多个Job,需要从这里进行循环,对每个Job的配置进行处理
# todo:目前的处理是简单化的,因为没有拆分Job的逻辑
# 替换source中的内容
new_model_config["source"][input_info["name"]]["input"] = input_info["info"]
new_model_config["source"][input_info["name"]]["fields"] = input_info["fields"]
new_model_config["source"][input_info["name"]]["window"] = window_info
# 获取第一个及最后一个transform
head, tail = fetch_head_and_tail(new_model_config["transform"])
logger.info("head:" + json.dumps(head))
logger.info("tail:" + json.dumps(tail))
# 替换第一个transform中的参数信息
# 获取用户输入参数信息
# 首先处理interpreter的信息
if head["task_type"] == PARSED_TASK_TYPE.MLSQL_QUERY.value:
head_interpreter = head["interpreter"]
for interpreter_item in head_interpreter:
interpreter_value_list = head_interpreter[interpreter_item]["value"]
new_interpreter_value_list = []
for item in interpreter_value_list:
if item in model_params:
new_interpreter_value_list.append(model_params[item])
head_interpreter[interpreter_item]["value"] = new_interpreter_value_list
# 然后处理其它args
processor = head["processor"]
processor_args = processor["args"]
for arg in processor_args:
if arg in model_params and arg not in head_interpreter:
# 注:在interpreter的情况已经在上面处理了
processor_args[arg] = model_params[arg]
else:
processor = head["processor"]
processor_args = processor["args"]
select_list = ""
for item in model_params:
select_list = select_list + "{} as {},".format(model_params[item], item)
# 去除最后的逗号
select_list = select_list.rstrip(",")
table_name = input_info["name"]
table_name = table_name[table_name.find("_") + 1 : len(table_name)] + "_" + table_name[0 : table_name.find("_")]
sub_query_sql = "(select {select_list} from {table})".format(select_list=select_list, table=table_name)
logger.info("sub query sql:" + sub_query_sql)
processor_args["sql"] = processor_args["format_sql"].replace("__DATA_SOURCE_INPUT_SQL__", sub_query_sql)
# 然后再替换从源表中直接取得的fields
head_transform_fields = head["fields"]
for field in head_transform_fields:
if field["origin"] and field["origin"] in model_params:
field["origin"] = model_params[field["origin"]]
# 生成sink的内容:使用output_fileds(其实也来自于最后一个transform的fields)
sink_info = {
"description": output_info["alias"],
"fields": output_info["fields"],
"type": "data",
"id": output_info["name"],
"name": output_info["name"],
"output": {
"type": "hdfs",
"mode": "overwrite",
"format": "parquet",
"table_type": TABLE_TYPE.RESULT_TABLE.value,
},
}
new_model_config["sink"] = {output_info["name"]: sink_info}
return new_model_config
def fetch_head_and_tail(transform_config):
"""
按transform中的parents排序,获取没有依赖以及不被别人依赖的transform
@param transform_config:
{
'transform_1': {
'parents':['transform_2']
},
'transform_2': {}
}
@return:
[{transform_2}, {transform_1}]
"""
# 记录所有transform之间的“内部依赖”,即不包括对这些transform之外的依赖
transform_depdence_map = {}
transform_id_map = {transform_config[transform_id]["id"]: transform_id for transform_id in transform_config}
for transform_id in transform_config:
if transform_id in transform_depdence_map:
depdence_info = transform_depdence_map[transform_id]
else:
depdence_info = {"parents": set(), "sons": set()}
transform_depdence_map[transform_id] = depdence_info
# 以下部分逻辑有些绕,因为transform内的key与每个transform内的id是不统一的,两者需要有个对应关系,即变量transform_id_map
parents = transform_config[transform_id]["parents"]
for parent in parents:
if parent in transform_id_map:
# actual_transform_id 记录的是真实的id,即每个transform实例内的id字段
actual_transform_id = parent
# origin_transform_id 记录的是transform中,每个transform实体对应的key
origin_transform_id = transform_id_map[actual_transform_id]
# 首先更新当前节点的parents信息
depdence_info["parents"].add(origin_transform_id)
# 同时更新parent对应的sons信息
if origin_transform_id in transform_depdence_map:
parent_dependence_info = transform_depdence_map[origin_transform_id]
else:
parent_dependence_info = {"parents": set(), "sons": set()}
transform_depdence_map[origin_transform_id] = parent_dependence_info
parent_dependence_info["sons"].add(transform_id)
logger.info(transform_depdence_map)
head = None
tail = None
for transform_id in transform_depdence_map:
if not transform_depdence_map[transform_id]["parents"]:
# 没有父依赖
head = transform_config[transform_id]
if not transform_depdence_map[transform_id]["sons"]:
# 没有被任何人依赖
tail = transform_config[transform_id]
return head, tail
|
StarcoderdataPython
|
3335002
|
# demonstrate the logging api in Python
# use the built-in logging module
import logging
def main():
# Use basicConfig to configure logging
# this is only executed once, subsequent calls to
# basicConfig will have no effect
logging.basicConfig(level=logging.DEBUG,
filemode="w",
filename="output.log")
# Try out each of the log levels
logging.debug("This is a debug-level log message")
logging.info("This is an info-level log message")
logging.warning("This is a warning-level message")
logging.error("This is an error-level message")
logging.critical("This is a critical-level message")
# Output formatted string to the log
logging.info("Here's a {} variable and an int: {}".format("string", 10))
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
9771633
|
import random
import re
import db
def preCleanKasvinimi(kasvi: str):
eliRe = r'(.+) eli (.+)'
parantheseRe = r'(.+) \((.+)\)'
latinLineRe = r'(.+) \- (.+)'
strOut = kasvi
m = re.match(eliRe, strOut)
if m:
strOut = m[2]
m = re.match(parantheseRe, strOut)
if m:
strOut = m[1]
m = re.match(latinLineRe, strOut)
if m:
strOut = m[2]
return strOut
def levenshteinDistance(kasvi: str, name: str):
"""Calculate Levenshtein Distance between two strings. Pseudocode from Wikipedia,
translated into python.
Added a preprocessing step to increase fun matches by removing latin/alternative names from
the plant name strings before comparison.
"""
t = preCleanKasvinimi(kasvi).lower()
s = name.lower()
# create two work vectors of integer distances
n = len(t)
m = len(s)
v0 = [0]*(n+1)
v1 = [0]*(n+1)
# initialize v0 (the previous row of distances)
# this row is A[0][i]: edit distance for an empty s
# the distance is just the number of characters to delete from t
for i in range(n+1):
v0[i] = i
for i in range(m):
# calculate v1 (current row distances) from the previous row v0
# first element of v1 is A[i+1][0]
# edit distance is delete (i+1) chars from s to match empty t
v1[0] = i + 1
# use formula to fill in the rest of the row
for j in range(n):
# calculating costs for A[i+1][j+1]
deletionCost = v0[j + 1] + 1
insertionCost = v1[j] + 1
if s[i] == t[j]:
substitutionCost = v0[j]
else:
substitutionCost = v0[j] + 1
v1[j + 1] = min(deletionCost, insertionCost, substitutionCost)
# copy v1 (current row) to v0 (previous row) for next iteration
# since data in v1 is always invalidated, a swap without copy could be more efficient
v0 = []+v1
# after the last swap, the results of v1 are now in v0
return v0[n]
def findKasvinimi(kasvinimet: list, first_name: str = "", last_name: str = None, n: int = 13):
"""Find kasvinimi based on user name, using a word-similarity metric.
Parameters
----------
kasvinimet: list - list kasvinimis to search from
first_name: str - First name of user
last_name: str - Last name of user
n: int - Number of matches to randomize output from. Larger value
gives more unique names for one person, but possibly less matching for their name.
"""
if last_name:
name = first_name + " " + last_name
else:
name = first_name
sortedNimet = sorted(kasvinimet, key=(lambda a: levenshteinDistance(a[0], name)))
kasviNimi = sortedNimet[random.randint(0,13)]
return kasviNimi[0]
if __name__ == '__main__':
first_name = input("First Name: ")
last_name = input("Last Name: ")
print(findKasvinimi(db.readKasvinimet(), first_name=first_name, last_name=last_name))
|
StarcoderdataPython
|
9685992
|
<reponame>JanWeldert/freeDOM
"""
llh client:
packages messages, sends them to the llh service, and interprets replies
provides synchronous and asynchronous interfaces
"""
from __future__ import absolute_import, division, print_function
__author__ = "<NAME>"
import uuid
import numpy as np
import zmq
class LLHClient:
slots = [
"_max_hypos_per_batch",
"_max_obs_per_batch",
"_n_hypo_params",
"_n_hit_features",
"_n_evt_features",
"_sock",
]
def __init__(self, ctrl_addr, conf_timeout=-1):
""" loads configuration info from service listening
at ctrl_addr """
with zmq.Context.instance().socket(zmq.REQ) as req_sock:
req_sock.setsockopt(zmq.LINGER, 0)
req_sock.setsockopt(zmq.RCVTIMEO, conf_timeout)
req_sock.connect(ctrl_addr)
req_sock.send_string("conf")
conf = req_sock.recv_json()
self._init_from_conf(**conf)
@property
def max_obs_per_batch(self):
return self._max_obs_per_batch
@property
def max_hypos_per_batch(self):
return self._max_hypos_per_batch
def request_eval(self, hit_data, evt_data, theta, req_id=""):
"""Asynchronous llh eval request
Note: Batch sizes for asynchronous requests are currently limited to max_hypos_per_batch
Synchronous requests are unlimited in size (see eval_llh)
Replies to asynchronous requests are retrieved using LLHClient.recv()
Parameters
----------
hit_data : numpy.ndarray, or something convertible to one
table of hit features
evt_data : numpy.ndarray, or something convertible to one
event-level features
theta: numpy.ndarray, or something convertible to one
hypothesis params
req_id : optional
Converted to str, and returned as such
"""
hit_data, evt_data, theta = self._prepare_and_check_buffers(
hit_data, evt_data, theta
)
n_hypos = theta.size / self._n_hypo_params
n_hits = hit_data.size / self._n_hit_features
if n_hypos > self._max_hypos_per_batch:
raise RuntimeError(
f"Asynchronous requests are limited to {self.max_hypos_per_batch} hypotheses "
f"per req, but {n_hypos:.0f} were requested!"
)
if n_hits * n_hypos > self._max_obs_per_batch:
raise RuntimeError(
f"Asynchronous requests are limited to {self._max_obs_per_batch} total pulses "
f"per req, but {n_hits*n_hypos:.0f} were requested!"
)
# send a req_id string for development and debugging
req_id_bytes = str(req_id).encode()
self._sock.send_multipart([req_id_bytes, hit_data, evt_data, theta])
def eval_llh(self, hit_data, evt_data, theta, timeout=None):
"""Synchronous llh evaluation, blocking until llh is ready.
Batch size is unlimited for synchronous requests
(although it may take a while to get your reply)
.. warning:: Do not use while asynchronous requests are in progress.
Parameters
----------
hit_data : numpy.ndarray, or something convertible to one
table of hit features
evt_data : numpy.ndarray, or something convertible to one
event-level features
theta: numpy.ndarray, or something convertible to one
hypothesis params
timeout : int, optional
Wait for a reply up to `timeout` milliseconds
Raises
------
RuntimeError
On reaching timeout or failure of internal message uuid check
"""
hit_data, evt_data, theta = self._prepare_and_check_buffers(
hit_data, evt_data, theta
)
n_hypos = theta.size / self._n_hypo_params
n_hits = hit_data.size / self._n_hit_features
# split into multiple requests if necessary
if (
n_hypos > self._max_hypos_per_batch
or n_hits * n_hypos > self._max_obs_per_batch
):
if n_hits > self._max_obs_per_batch:
raise ValueError(
"Current LLH service only supports events with up to "
f"{self._max_obs_per_batch} pulses!"
)
if n_hits * self._max_hypos_per_batch <= self._max_obs_per_batch:
hypos_per_split = self._max_hypos_per_batch
else:
hypos_per_split = int(self._max_obs_per_batch / n_hits)
split_step = hypos_per_split * self._n_hypo_params
theta_splits = np.split(
theta, np.arange(split_step, theta.size, split_step)
)
else:
theta_splits = [theta]
req_ids = []
for theta_split in theta_splits:
req_id = uuid.uuid4().hex
req_ids.append(req_id)
self.request_eval(hit_data, evt_data, theta_split, req_id=req_id)
llhs = np.empty(int(n_hypos), dtype=np.float32)
llh_view = llhs
for req_id in req_ids:
reply = self.recv(timeout)
if reply is None:
raise RuntimeError("No reply from LLH service!")
if reply["req_id"] != req_id:
raise RuntimeError("uuid mismatch!")
llh_split = reply["llh"]
llh_view[: llh_split.size] = llh_split
llh_view = llh_view[llh_split.size :]
assert llh_view.size == 0
if n_hypos == 1:
return llhs[0]
else:
return llhs
def recv(self, timeout=None):
"""
attempt to retrieve a reply from the LLH service
returns None if no reply is available
Parameters
----------
timeout : int, optional
Wait for a reply up to `timeout` milliseconds
"""
if self._sock.poll(timeout, zmq.POLLIN) != 0:
req_id, llh = self._sock.recv_multipart()
return dict(req_id=req_id.decode(), llh=np.frombuffer(llh, np.float32))
return None
def _init_socket(self, req_addr):
# pylint: disable=no-member
ctxt = zmq.Context.instance()
sock = ctxt.socket(zmq.DEALER)
sock.connect(req_addr)
self._sock = sock
self._sock.setsockopt(zmq.LINGER, 0)
def _init_from_conf(
self, req_addr, batch_size, n_hypo_params, n_hit_features, n_evt_features
):
self._init_socket(req_addr)
self._max_hypos_per_batch = batch_size["n_hypos"]
self._max_obs_per_batch = batch_size["n_observations"]
self._n_hypo_params = n_hypo_params
self._n_hit_features = n_hit_features
self._n_evt_features = n_evt_features
def _prepare_and_check_buffers(self, hit_data, evt_data, theta):
""" validates hit_data, evt_data, and theta.
Converts them to contiguous, flat arrays of type np.float32 if they are not already """
hit_data, evt_data, theta = (
self._as_flat_float_array(arr) for arr in (hit_data, evt_data, theta)
)
if hit_data.size % self._n_hit_features != 0:
raise ValueError(
f"hit_data.size must be divisible by the number of observation features ({self._n_hit_features})"
)
if evt_data.size != self._n_evt_features:
raise ValueError(
f"evt_data.size must be equal to the number of event-level features ({self._n_evt_features})"
)
if theta.size % self._n_hypo_params != 0:
raise ValueError(
f"theta.size must be divisible by the number of hypothesis parameters ({self._n_hypo_params})"
)
return hit_data, evt_data, theta
@staticmethod
def _as_flat_float_array(arr):
if (
not isinstance(arr, np.ndarray)
or not arr.flags.c_contiguous
or arr.dtype != np.float32
):
arr = np.ascontiguousarray(arr, dtype=np.float32)
return arr.reshape(arr.size,)
|
StarcoderdataPython
|
8031335
|
# -*- coding: utf-8 -*-
import unittest
import tensorflow as tf
from madoka.utils import tfhelper
from madoka.utils.tfcompat import GLOBAL_VARIABLES_KEY
class VariablesTestCase(unittest.TestCase):
"""Unit tests for variables utility."""
def test_selector(self):
"""Test `VariableSelector()`."""
def get_var(name, trainable=False, collections=None):
collections = collections or []
if trainable:
collections.append(tf.GraphKeys.TRAINABLE_VARIABLES)
collections.append(GLOBAL_VARIABLES_KEY)
return tf.get_variable(name, initializer=0, dtype=tf.int32,
trainable=trainable, collections=collections)
def check(sel, varlist):
mismatch_msg = '\n {%s} != {%s}' % (
', '.join(sorted(v.name for v in sel.select())),
', '.join(sorted(v.name for v in varlist))
)
mismatch_msg += '\ncaused by:\n %s' % sel
self.assertEquals(set(sel.select()), varlist, msg=mismatch_msg)
with tf.Graph().as_default():
v1 = get_var('v1')
v2 = get_var('v2', trainable=True)
v3 = get_var('v3', trainable=True,
collections=[tfhelper.GraphKeys.TRAINING_STATES])
v4 = get_var('v4', collections=[tfhelper.GraphKeys.TRAINING_STATES,
'something'])
v5 = get_var('v5', collections=[tfhelper.GraphKeys.TRAINER_SLOTS,
'something'])
from madoka.utils.tfhelper import VariableSelector as vs
# test individual selectors.
check(vs.all(), {v1, v2, v3, v4, v5})
check(vs.list([v1, v3, v4]), {v1, v3, v4})
check(vs.collection('something'), {v4, v5})
check(vs.trainable(), {v2, v3})
check(vs.training_states(), {v3, v4})
check(vs.trainer_slots(), {v5})
# test composition of selectors.
check(
vs.training_states() & vs.trainable() & vs.list([v1, v3, v4]),
{v3}
)
check(
vs.training_states() & (vs.trainable() & vs.list([v1, v3, v4])),
{v3}
)
check(
vs.trainable() | vs.training_states() | vs.trainer_slots(),
{v2, v3, v4, v5}
)
check(
vs.trainable() | (vs.training_states() | vs.trainer_slots()),
{v2, v3, v4, v5}
)
check(
vs.trainable() + vs.training_states() + vs.trainer_slots(),
{v2, v3, v4, v5}
)
check(
vs.trainable() + (vs.training_states() + vs.trainer_slots()),
{v2, v3, v4, v5}
)
check(-vs.trainable(), {v1, v4, v5})
check(--vs.trainable(), {v2, v3})
check(
(vs.trainable() | vs.training_states()) & -vs.list([v1, v4]),
{v2, v3}
)
check(
vs.all() - vs.trainable() - vs.training_states(),
{v1, v5}
)
check(
vs.all() - (vs.trainable() + vs.training_states()),
{v1, v5}
)
check(
vs.all() - (vs.trainable() - vs.training_states()),
{v1, v3, v4, v5}
)
check(-vs.trainable() - vs.trainer_slots(), {v1, v4})
check((-vs.trainable()) & vs.trainer_slots(), {v5})
check(
(vs.list([v1, v2, v3]) - vs.training_states()) -
(vs.trainable() - vs.training_states()),
{v1}
)
|
StarcoderdataPython
|
1983562
|
<reponame>ChauffeurPrive/nestor-api
"""Unit test for logger class"""
from unittest import TestCase
from unittest.mock import patch
from nestor_api.utils.logger import Logger
class TestLogger(TestCase):
@patch("nestor_api.utils.logger.logging", autospec=True)
def test_logger_debug(self, logging_mock):
Logger.debug({"user_id": 1234}, "Found user")
logging_mock.debug.assert_called_once_with("%s %s", "Found user", '{"user_id": 1234}')
@patch("nestor_api.utils.logger.logging", autospec=True)
def test_logger_debug_with_no_context(self, logging_mock):
Logger.debug(message="Found user")
logging_mock.debug.assert_called_once_with("Found user")
@patch("nestor_api.utils.logger.logging", autospec=True)
def test_logger_info(self, logging_mock):
Logger.info({"user_id": 1234}, "Found user")
logging_mock.info.assert_called_once_with("%s %s", "Found user", '{"user_id": 1234}')
@patch("nestor_api.utils.logger.logging", autospec=True)
def test_logger_info_with_no_context(self, logging_mock):
Logger.info(message="Found user")
logging_mock.info.assert_called_once_with("Found user")
@patch("nestor_api.utils.logger.logging", autospec=True)
def test_logger_warn(self, logging_mock):
Logger.warn({"user_id": 1234}, "Found user")
logging_mock.warning.assert_called_once_with("%s %s", "Found user", '{"user_id": 1234}')
@patch("nestor_api.utils.logger.logging", autospec=True)
def test_logger_warn_with_no_context(self, logging_mock):
Logger.warn(message="Found user")
logging_mock.warning.assert_called_once_with("Found user")
@patch("nestor_api.utils.logger.logging", autospec=True)
def test_logger_error(self, logging_mock):
Logger.error({"user_id": 1234}, "Found user")
logging_mock.error.assert_called_once_with(
"%s %s", "Found user", '{"user_id": 1234}', exc_info=True
)
@patch("nestor_api.utils.logger.logging", autospec=True)
def test_logger_error_with_no_context(self, logging_mock):
Logger.error(message="Found user")
logging_mock.error.assert_called_once_with("Found user", exc_info=True)
|
StarcoderdataPython
|
300657
|
<reponame>kmonsoor/embedX<filename>setup.py
from codecs import open
from os import path
from setuptools import setup, find_packages
# bringing in __version__ data
exec(open('embedx/version.py').read())
# Load the README.md to `long_description`
try:
from pypandoc import convert
long_description = convert('README.md', 'rst')
except(OSError, IOError, ImportError):
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# finally
setup(
name='embedx',
packages=find_packages(),
version=__version__,
url='https://github.com/kmonsoor/embedX',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description='Generate responsive, embeddable HTML/JS code from URL of online content',
keywords=['embed', 'html', 'javascript', 'embeddable', 'code generation', 'from url'],
platforms='any',
long_description=long_description,
install_requires=[],
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
'Topic :: Utilities',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
StarcoderdataPython
|
319037
|
from cliff.command import Command
class ModelUploadCommand(Command):
def get_parser(self, prog_name):
parser = super(ModelUploadCommand, self).get_parser(prog_name)
parser.add_argument('file', action='store', help='Model file to upload')
parser.add_argument('name', action='store', help="Model name to use")
parser.add_argument('format', action='store', help="Model format to use")
return parser
def take_action(self, parsed_args):
self.app.stdout.write("Uploading model: {} {} {}".format(parsed_args.file,
parsed_args.name,
parsed_args.format))
desc = "Uploaded by mcenter-cli"
self.app.mclient.upload_model(parsed_args.name, parsed_args.file, parsed_args.format, description=desc)
|
StarcoderdataPython
|
6618586
|
<reponame>leetcode-pp/leetcode-pp1<gh_stars>10-100
from typing import List
class Solution:
def judgePoint24(self, nums: List[int]) -> bool:
permutations = self.permuteUnique(nums)
for permutation in permutations:
if self.compute(permutation):
return True
return False
def compute(self, nums: List[float]) -> bool:
if len(nums) == 1:
return abs(nums[0] - 24) <= 0.00001
for i in range(len(nums) - 1):
# compute possible result from + - * /
tmp = []
tmp.append(nums[i] + nums[i + 1])
tmp.append(nums[i] - nums[i + 1])
tmp.append(nums[i] * nums[i + 1])
if nums[i + 1] != 0:
tmp.append(nums[i] / nums[i + 1])
for num in tmp:
new_list = nums[:]
new_list[i] = num
new_list.pop(i + 1)
if self.compute(new_list):
return True
return False
def permuteUnique(self, nums: List[int]) -> List[List[int]]:
permutations = []
nums.sort()
tmp = []
visited = [False] * len(nums)
self.backtracking(nums, tmp, visited, permutations)
return permutations
def backtracking(
self, nums: List[int], tmp: List[float], visited: List[bool], perm: List[int],
) -> None:
if len(nums) == len(tmp):
perm.append(tmp[:])
return
for i in range(len(nums)):
if visited[i]:
continue
if i > 0 and nums[i] == nums[i - 1] and not visited[i - 1]:
continue
visited[i] = True
tmp.append(nums[i])
self.backtracking(nums, tmp, visited, perm)
visited[i] = False
tmp.pop()
|
StarcoderdataPython
|
11267086
|
"""
This defines a single elimination 'Tournament' object.
"""
import math
import itertools
from single_elimination.match import Match
from single_elimination.participant import Participant
class Tournament:
"""
This is a single-elimination tournament where each match is between 2 competitors.
It takes in a list of competitors, which can be strings or any type of Python object,
but they should be unique. They should be ordered by a seed, with the first entry being the most
skilled and the last being the least. They can also be randomized before creating the instance.
Optional options dict fields:
"""
def __init__(self, competitors_list, options={}):
assert len(competitors_list) > 1
self.__matches = []
next_higher_power_of_two = int(math.pow(2, math.ceil(math.log2(len(competitors_list)))))
winners_number_of_byes = next_higher_power_of_two - len(competitors_list)
incoming_participants = list(map(Participant, competitors_list))
incoming_participants.extend([None] * winners_number_of_byes)
while len(incoming_participants) > 1:
half_length = int(len(incoming_participants)/2)
first = incoming_participants[0:half_length]
last = incoming_participants[half_length:]
last.reverse()
next_round_participants = []
for participant_pair in zip(first, last):
if participant_pair[1] is None:
next_round_participants.append(participant_pair[0])
elif participant_pair[0] is None:
next_round_participants.append(participant_pair[1])
else:
match = Match(participant_pair[0], participant_pair[1])
next_round_participants.append(match.get_winner_participant())
self.__matches.append(match)
incoming_participants = next_round_participants
self.__winner = incoming_participants[0]
def __iter__(self):
return iter(self.__matches)
def get_active_matches(self):
"""
Returns a list of all matches that are ready to be played.
"""
return [match for match in self.get_matches() if match.is_ready_to_start()]
def get_matches(self):
"""
Returns a list of all matches for the tournament.
"""
return self.__matches
def get_active_matches_for_competitor(self, competitor):
"""
Given the string or object of the competitor that was supplied
when creating the tournament instance,
returns a list of Matches that they are currently playing in.
"""
matches = []
for match in self.get_active_matches():
competitors = [participant.get_competitor() for participant in match.get_participants()]
if competitor in competitors:
matches.append(match)
return matches
def get_winners(self):
"""
Returns None if the winner has not been decided yet,
and returns a list containing the single victor otherwise.
"""
if len(self.get_active_matches()) > 0:
return None
return [self.__winner.get_competitor()]
def add_win(self, match, competitor):
"""
Set the victor of a match, given the competitor string/object and match.
"""
match.set_winner(competitor)
|
StarcoderdataPython
|
213518
|
#!/usr/bin/env python
import time
import rospy
from osr_msgs.msg import Commands, Encoder, Status
from roboclaw_wrapper import MotorControllers
global mutex
mutex = False
motorcontrollers = MotorControllers()
def callback(cmds):
global mutex
rospy.loginfo(cmds)
while mutex:
time.sleep(0.001)
#print "cmds are being buffered"
mutex = True
# PUT THIS BACK IN
motorcontrollers.cornerToPosition(cmds.corner_motor)
for i in range(6):
# PUT THIS BACK IN
#motorcontrollers.sendMotorDuty(i,cmds.drive_motor[i])
motorcontrollers.sendSignedDutyAccel(i,cmds.drive_motor[i])
pass
mutex = False
def shutdown():
print "killing motors"
motorcontrollers.killMotors()
if __name__ == "__main__":
rospy.init_node("motor_controller")
rospy.loginfo("Starting the motor_controller node")
rospy.on_shutdown(shutdown)
sub = rospy.Subscriber("/robot_cmds",Commands,callback)
enc_pub = rospy.Publisher("/encoder", Encoder, queue_size =1)
status_pub = rospy.Publisher("/status", Status, queue_size =1)
rate = rospy.Rate(5)
status = Status()
enc = Encoder()
enc.abs_enc =[1000]*4
enc.abs_enc_angles =[-100]*4
status.battery = 0
status.temp =[0]*5
status.current =[0]*10
status.error_status =[0]*5
counter = 0
while not rospy.is_shutdown():
while mutex:
time.sleep(0.001)
mutex = True
enc.abs_enc = motorcontrollers.getCornerEnc()
#mc_data.abs_enc_angles = motorcontrollers.getCornerEncAngle()
if (counter >= 10):
status.battery = motorcontrollers.getBattery()
status.temp = motorcontrollers.getTemp()
status.current = motorcontrollers.getCurrents()
status.error_status = motorcontrollers.getErrors()
status_pub.publish(status)
counter = 0
mutex = False
enc_pub.publish(enc)
counter += 1
rate.sleep()
rospy.spin()
|
StarcoderdataPython
|
5149040
|
from __future__ import absolute_import
from functools import partial
from .lr_finder import LRFinder
from .adamw import AdamW as AdamW_my
from .radam import RAdam, PlainRAdam
from .sgdw import SGDW
from .schedulers import LinearLR, ExponentialLR
from .rmsprop import RMSprop
from .lookahead import Lookahead
from torch import optim
try:
from apex.optimizers import FusedSGD
from apex.optimizers import FusedAdam
from apex.optimizers import FusedNovoGrad
HAS_APEX = True
except ModuleNotFoundError:
HAS_APEX = False
# 2e-5 is the lowest epsilon than saves from overflow in fp16
def optimizer_from_name(optim_name):
optim_name = optim_name.lower()
if optim_name == "sgd":
return optim.SGD
elif optim_name == "sgdw":
return SGDW
elif optim_name == "adam":
return partial(optim.Adam, eps=2e-5)
elif optim_name == "adamw":
return partial(AdamW_my, eps=2e-5)
elif optim_name == "adamw_gc":
# in this implementation eps in inside sqrt so it can be smaller
return partial(AdamW_my, center=True, eps=1e-7)
elif optim_name == "rmsprop":
# in this implementation eps in inside sqrt so it can be smaller
return partial(RMSprop, eps=1e-7)
elif optim_name == "radam":
return partial(RAdam, eps=2e-5)
elif optim_name in ["fused_sgd", "fusedsgd"] and HAS_APEX:
return FusedSGD
elif optim_name in ["fused_adam", "fusedadam"] and HAS_APEX:
return partial(FusedAdam, eps=2e-5)
elif optim_name in ["fused_novograd", "fusednovograd", "novograd"] and HAS_APEX:
return partial(FusedNovoGrad, eps=2e-5)
else:
raise ValueError(f"Optimizer {optim_name} not found or apex is not installed.")
|
StarcoderdataPython
|
9740454
|
<reponame>RoastVeg/cports
pkgname = "desktop-file-utils"
pkgver = "0.26"
pkgrel = 0
build_style = "meson"
hostmakedepends = ["meson", "pkgconf"]
makedepends = ["libglib-devel"]
triggers = ["/usr/share/applications"]
pkgdesc = "Utilities to manage desktop entries"
maintainer = "q66 <<EMAIL>>"
license = "GPL-2.0-or-later"
url = "https://freedesktop.org/wiki/Software/desktop-file-utils"
source = f"$(FREEDESKTOP_SITE)/{pkgname}/releases/{pkgname}-{pkgver}.tar.xz"
sha256 = "b26dbde79ea72c8c84fb7f9d870ffd857381d049a86d25e0038c4cef4c747309"
|
StarcoderdataPython
|
217342
|
"""
XML pysud module.
Provides classes to deal with xml data files.
"""
import xml.etree.ElementTree as etree
import pysud
class XMLParser():
"""
Abstract class to provide basic xml files handling functionality.
Attributes:
xml_file: A valid xml file path.
xml_root: ElementTree Root class object.
"""
def __init__(self, xml_file):
self.xml_file = xml_file
self.xml_root = None
def parse_file(self):
pass
class ConfigXMLParser(XMLParser):
""" This class handles all config.xml file parsing. """
def parse_file(self):
"""
Parses main configuration xml.
Returns:
A dictionary containing configuration values.
(This configuration values should be mapped to
a pysud.Game class object instance variables)
"""
tree = etree.parse(self.xml_file)
self.xml_root = tree.getroot()
config = dict()
config['ENABLE_JOURNAL'] = self.xml_root.find('ENABLE_JOURNAL').text
config['ENABLE_SAVEGAME'] = self.xml_root.find('ENABLE_SAVEGAME').text
config['HELP_TEXT'] = self.xml_root.find('HELP_TEXT').text
config['INVALID_CMD_TEXT'] = self.xml_root.find('INVALID_CMD_TEXT').text
config['PROMPT_TEXT'] = self.xml_root.find('PROMPT_TEXT').text
config['GAME_START_TEXT'] = self.xml_root.find('GAME_START_TEXT').text
config['GAME_RESUME_TEXT'] = self.xml_root.find('GAME_RESUME_TEXT').text
config['GAME_END_TEXT'] = self.xml_root.find('GAME_END_TEXT').text
config['PLAYER_DEFAULT_NAME'] = self.xml_root.find('PLAYER_DEFAULT_NAME').text
config['ROOM_ITEMS_STR'] = self.xml_root.find('ROOM_ITEMS_STR').text
config['PLAYER_SCORE_STR'] = self.xml_root.find('PLAYER_SCORE_STR').text
config['PLAYER_INVENTORY_STR'] = self.xml_root.find('PLAYER_INVENTORY_STR').text
config['PLAYER_STATS_STR'] = self.xml_root.find('PLAYER_STATS_STR').text
return config
class RoomsXMLParser(XMLParser):
""" This class handles all rooms.xml file parsing. """
def parse_file(self):
"""
Parses rooms definition file.
Returns:
A python List holding pysud.Room class objects
"""
tree = etree.parse(self.xml_file)
self.xml_root = tree.getroot()
rooms_data = dict()
for room_tag in self.xml_root:
temp = self.__parse_room(room_tag)
rooms_data[temp[0].get_id()] = temp
return self.__link_rooms(rooms_data)
def __link_rooms(self, rooms_data):
""" Associates each rooms with its transitions.
Args:
rooms_data: a python Dictionary composed by:
key : a pysud.Room Id
value : a pair ( Room class object, transition List for that room )
Returns:
A python List holding pysud.Room class objects
"""
res = list()
for p in rooms_data.values():
if p[1] != None:
for transition_stub in p[1]:
destination_room = rooms_data.get(transition_stub.destination)[0]
commands = transition_stub.get_commands()
p[0].add_transition(commands, destination_room)
res.append(p[0])
return res
def __parse_room(self, room_tag):
"""
Parses a (single) room xml tag.
Args:
room_tag: The room xml tag to parse
Returns:
A python Tuple containing:
a pysud.rooms object
a python list
"""
room_description = None
room_name = room_tag.get('name')
room_id = room_tag.get('id')
room_transitions = None
for element in room_tag.getchildren():
if element.tag == 'description':
room_description = element.text
elif element.tag == 'transitions':
# self.print_transitions(element, room_id)
room_transitions = self.__parse_transition(element, room_id)
else:
pass # invalid tag found
room = pysud.Room(room_name, room_description, room_id)
return tuple(room, room_transitions)
def __parse_transition(self, transitions_tag, room_origin_id):
l = list()
# iterates over a given room destinations:
for transition in transitions_tag.getchildren():
room_destination_id = transition.get('destination')
ts = TransitionStub(room_origin_id, room_destination_id)
# iterates over a single destination alias commands:
for command in transition.getchildren():
ts.add_command(command.text)
l.append(ts)
return l
def print_transitions(self, transitions_tag, room_origin_id):
print('from ', room_origin_id, ' you can go to:')
# iterates over a given room destinations:
for transition in transitions_tag.getchildren():
dest = transition.get('destination')
print('\t', dest)
# iterates over a single destination alias commands:
for com in transition.getchildren():
print('\t\t with commands:', com.text)
class TransitionStub:
""" Internal class to hold a single transition data.
An object instance of this class is used exclusively to temporarily hold
parsed data for a room transition."""
def __init__(self, origin_room_id , destination_room_id):
self.origin = origin_room_id
self.destination = destination_room_id
self.__commands = list()
def add_command(self, command):
self.__commands.append(command)
def get_commands(self):
return self.__commands
# @DEBUG - For testing purposes only:
if __name__ == '__main__':
CFG_XML_FILE = 'config.xml'
ROOMS_XML_FILE = 'rooms.xml'
C = ConfigXMLParser(CFG_XML_FILE)
RP = RoomsXMLParser (ROOMS_XML_FILE)
R = RP.parse_file()
E = RP.xml_root
|
StarcoderdataPython
|
178554
|
<gh_stars>0
#imports to start with app.py
from flask import Flask, render_template
import requests
#instantiating the app to be called later
app = Flask(__name__)
#creating the root(home) page
@app.route('/')
#function to populate your main page
def home():
""" Make sure your folder holding templates is called templates """
return render_template('home.html')
#make sure you get that starting slash
#puppy is a page and the name is a variable you will pass in
@app.route('/puppy/<title>')
def create_puppy(title):
""" The name variable gets passed into the function """
#the full response from pulling the information from API
response = requests.get('https://dog.ceo/api/breeds/image/random')
#extracting the json data with the key in brackets
image = response.json()['message']
return render_template('puppy.html', name=title, image=image)
#this stays here until you get an init going
if __name__ == '__main__':
app.run()
|
StarcoderdataPython
|
6497514
|
# -*- coding: utf-8 -*-
import numpy as np
import tensorflow as tf
slim = tf.contrib.slim
_BATCH_NORM_DECAY = 0.9
_BATCH_NORM_EPSILON = 1e-05
_LEAKY_RELU = 0.1
_ANCHORS = [(10, 13), (16, 30), (33, 23),
(30, 61), (62, 45), (59, 119),
(116, 90), (156, 198), (373, 326)]
@tf.contrib.framework.add_arg_scope
def _fixed_padding(inputs, kernel_size, *args, mode='CONSTANT', **kwargs):
"""
Pads the input along the spatial dimensions independently of input size.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
Should be a positive integer.
data_format: The input format ('NHWC' or 'NCHW').
mode: The mode for tf.pad.
Returns:
A tensor with the same format as the input with the data either intact
(if kernel_size == 1) or padded (if kernel_size > 1).
"""
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
if kwargs['data_format'] == 'NCHW':
padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],
[pad_beg, pad_end],
[pad_beg, pad_end]],
mode=mode)
else:
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],
[pad_beg, pad_end], [0, 0]], mode=mode)
return padded_inputs
def _conv2d_fixed_padding(inputs, filters, kernel_size, strides=1):
if strides > 1:
inputs = _fixed_padding(inputs, kernel_size)
inputs = slim.conv2d(inputs, filters, kernel_size, stride=strides,
padding=('SAME' if strides == 1 else 'VALID'))
return inputs
def _get_size(shape, data_format):
if len(shape) == 4:
shape = shape[1:]
return shape[1:3] if data_format == 'NCHW' else shape[0:2]
def _detection_layer(inputs, num_classes, anchors, img_size, data_format):
num_anchors = len(anchors)
predictions = slim.conv2d(inputs, num_anchors * (5 + num_classes), 1,
stride=1, normalizer_fn=None,
activation_fn=None,
biases_initializer=tf.zeros_initializer())
shape = predictions.get_shape().as_list()
grid_size = _get_size(shape, data_format)
dim = grid_size[0] * grid_size[1]
bbox_attrs = 5 + num_classes
if data_format == 'NCHW':
predictions = tf.reshape(
predictions, [-1, num_anchors * bbox_attrs, dim])
predictions = tf.transpose(predictions, [0, 2, 1])
predictions = tf.reshape(predictions, [-1, num_anchors * dim, bbox_attrs])
stride = (img_size[0] // grid_size[0], img_size[1] // grid_size[1])
anchors = [(a[0] / stride[0], a[1] / stride[1]) for a in anchors]
box_centers, box_sizes, confidence, classes = tf.split(
predictions, [2, 2, 1, num_classes], axis=-1)
box_centers = tf.nn.sigmoid(box_centers)
confidence = tf.nn.sigmoid(confidence)
grid_x = tf.range(grid_size[0], dtype=tf.float32)
grid_y = tf.range(grid_size[1], dtype=tf.float32)
a, b = tf.meshgrid(grid_x, grid_y)
x_offset = tf.reshape(a, (-1, 1))
y_offset = tf.reshape(b, (-1, 1))
x_y_offset = tf.concat([x_offset, y_offset], axis=-1)
x_y_offset = tf.reshape(tf.tile(x_y_offset, [1, num_anchors]), [1, -1, 2])
box_centers = box_centers + x_y_offset
box_centers = box_centers * stride
anchors = tf.tile(anchors, [dim, 1])
box_sizes = tf.exp(box_sizes) * anchors
box_sizes = box_sizes * stride
detections = tf.concat([box_centers, box_sizes, confidence], axis=-1)
classes = tf.nn.sigmoid(classes)
predictions = tf.concat([detections, classes], axis=-1)
return predictions
def _upsample(inputs, out_shape, data_format='NCHW'):
# tf.image.resize_nearest_neighbor accepts input in format NHWC
if data_format == 'NCHW':
inputs = tf.transpose(inputs, [0, 2, 3, 1])
if data_format == 'NCHW':
new_height = out_shape[2]
new_width = out_shape[3]
else:
new_height = out_shape[1]
new_width = out_shape[2]
inputs = tf.image.resize_nearest_neighbor(inputs, (new_height, new_width))
# back to NCHW if needed
if data_format == 'NCHW':
inputs = tf.transpose(inputs, [0, 3, 1, 2])
inputs = tf.identity(inputs, name='upsampled')
return inputs
def yolo_v4(inputs, num_classes, is_training=False, data_format='NCHW', reuse=False):
"""
Creates YOLO v3 model.
:param inputs: a 4-D tensor of size [batch_size, height, width, channels].
Dimension batch_size may be undefined. The channel order is RGB.
:param num_classes: number of predicted classes.
:param is_training: whether is training or not.
:param data_format: data format NCHW or NHWC.
:param reuse: whether or not the network and its variables should be reused.
:param with_spp: whether or not is using spp layer.
:return:
"""
# it will be needed later on
img_size = inputs.get_shape().as_list()[1:3]
# transpose the inputs to NCHW
if data_format == 'NCHW':
inputs = tf.transpose(inputs, [0, 3, 1, 2])
# normalize values to range [0..1]
inputs = inputs / 255
# set batch norm params
batch_norm_params = {
'decay': _BATCH_NORM_DECAY,
'epsilon': _BATCH_NORM_EPSILON,
'scale': True,
'is_training': is_training,
'fused': None, # Use fused batch norm if possible.
}
with slim.arg_scope([slim.conv2d, slim.batch_norm, _fixed_padding], data_format=data_format, reuse=reuse):
with slim.arg_scope([slim.conv2d], normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params,
biases_initializer=None,
activation_fn=lambda x:x* tf.math.tanh(tf.math.softplus(x))):
inputs = _conv2d_fixed_padding(inputs, 32, 3, strides=1)
inputs = _conv2d_fixed_padding(inputs, 64, 3, strides=2)
route1 = inputs
inputs = _conv2d_fixed_padding(inputs, 64, 1, strides=1)
route2 = inputs
inputs = route1
inputs = _conv2d_fixed_padding(inputs, 64, 1, strides=1)
shortcut = inputs
inputs = _conv2d_fixed_padding(inputs, 32, 1, strides=1)
inputs = _conv2d_fixed_padding(inputs, 64, 3, strides=1)
inputs = inputs + shortcut
inputs = _conv2d_fixed_padding(inputs, 64, 1, strides=1)
route8 = inputs
inputs = tf.concat([route8, route2], axis=1 if data_format == 'NCHW' else 3)
inputs = _conv2d_fixed_padding(inputs, 64, 1, strides=1)
inputs = _conv2d_fixed_padding(inputs, 128, 3, strides=2)
route11 = inputs
inputs = _conv2d_fixed_padding(inputs, 64, 1, strides=1)
route12 = inputs
inputs = route11
inputs = _conv2d_fixed_padding(inputs, 64, 1, strides=1)
shortcut = inputs
inputs = _conv2d_fixed_padding(inputs, 64, 1, strides=1)
inputs = _conv2d_fixed_padding(inputs, 64, 3, strides=1)
inputs = inputs + shortcut
shortcut = inputs
inputs = _conv2d_fixed_padding(inputs, 64, 1, strides=1)
inputs = _conv2d_fixed_padding(inputs, 64, 3, strides=1)
inputs = inputs + shortcut
inputs = _conv2d_fixed_padding(inputs, 64, 1, strides=1)
route21 = inputs
inputs = tf.concat([route21, route12], axis=1 if data_format == 'NCHW' else 3)
inputs = _conv2d_fixed_padding(inputs, 128, 1, strides=1)
inputs = _conv2d_fixed_padding(inputs, 256, 3, strides=2)
route24 = inputs
inputs = _conv2d_fixed_padding(inputs, 128, 1, strides=1)
route25 = inputs
inputs = route24
inputs = _conv2d_fixed_padding(inputs, 128, 1, strides=1)
shortcut = inputs
inputs = _conv2d_fixed_padding(inputs, 128, 1, strides=1)
inputs = _conv2d_fixed_padding(inputs, 128, 3, strides=1)
inputs = inputs + shortcut
shortcut = inputs
inputs = _conv2d_fixed_padding(inputs, 128, 1, strides=1)
inputs = _conv2d_fixed_padding(inputs, 128, 3, strides=1)
inputs = inputs + shortcut
shortcut = inputs
inputs = _conv2d_fixed_padding(inputs, 128, 1, strides=1)
inputs = _conv2d_fixed_padding(inputs, 128, 3, strides=1)
inputs = inputs + shortcut
shortcut = inputs
inputs = _conv2d_fixed_padding(inputs, 128, 1, strides=1)
inputs = _conv2d_fixed_padding(inputs, 128, 3, strides=1)
inputs = inputs + shortcut
shortcut = inputs
inputs = _conv2d_fixed_padding(inputs, 128, 1, strides=1)
inputs = _conv2d_fixed_padding(inputs, 128, 3, strides=1)
inputs = inputs + shortcut
shortcut = inputs
inputs = _conv2d_fixed_padding(inputs, 128, 1, strides=1)
inputs = _conv2d_fixed_padding(inputs, 128, 3, strides=1)
inputs = inputs + shortcut
shortcut = inputs
inputs = _conv2d_fixed_padding(inputs, 128, 1, strides=1)
inputs = _conv2d_fixed_padding(inputs, 128, 3, strides=1)
inputs = inputs + shortcut
shortcut = inputs
inputs = _conv2d_fixed_padding(inputs, 128, 1, strides=1)
inputs = _conv2d_fixed_padding(inputs, 128, 3, strides=1)
inputs = inputs + shortcut
inputs = _conv2d_fixed_padding(inputs, 128, 1, strides=1)
route52 = inputs
inputs = tf.concat([route52, route25], axis=1 if data_format == 'NCHW' else 3)
inputs = _conv2d_fixed_padding(inputs, 256, 1, strides=1)
route54 = inputs
inputs = _conv2d_fixed_padding(inputs, 512, 3, strides=2)
route55 = inputs
inputs = _conv2d_fixed_padding(inputs, 256, 1, strides=1)
route56 = inputs
inputs = route55
inputs = _conv2d_fixed_padding(inputs, 256, 1, strides=1)
shortcut = inputs
inputs = _conv2d_fixed_padding(inputs, 256, 1, strides=1)
inputs = _conv2d_fixed_padding(inputs, 256, 3, strides=1)
inputs = inputs + shortcut
shortcut = inputs
inputs = _conv2d_fixed_padding(inputs, 256, 1, strides=1)
inputs = _conv2d_fixed_padding(inputs, 256, 3, strides=1)
inputs = inputs + shortcut
shortcut = inputs
inputs = _conv2d_fixed_padding(inputs, 256, 1, strides=1)
inputs = _conv2d_fixed_padding(inputs, 256, 3, strides=1)
inputs = inputs + shortcut
shortcut = inputs
inputs = _conv2d_fixed_padding(inputs, 256, 1, strides=1)
inputs = _conv2d_fixed_padding(inputs, 256, 3, strides=1)
inputs = inputs + shortcut
shortcut = inputs
inputs = _conv2d_fixed_padding(inputs, 256, 1, strides=1)
inputs = _conv2d_fixed_padding(inputs, 256, 3, strides=1)
inputs = inputs + shortcut
shortcut = inputs
inputs = _conv2d_fixed_padding(inputs, 256, 1, strides=1)
inputs = _conv2d_fixed_padding(inputs, 256, 3, strides=1)
inputs = inputs + shortcut
shortcut = inputs
inputs = _conv2d_fixed_padding(inputs, 256, 1, strides=1)
inputs = _conv2d_fixed_padding(inputs, 256, 3, strides=1)
inputs = inputs + shortcut
shortcut = inputs
inputs = _conv2d_fixed_padding(inputs, 256, 1, strides=1)
inputs = _conv2d_fixed_padding(inputs, 256, 3, strides=1)
inputs = inputs + shortcut
inputs = _conv2d_fixed_padding(inputs, 256, 1, strides=1)
route83 = inputs
inputs = tf.concat([route83, route56], axis=1 if data_format == 'NCHW' else 3)
inputs = _conv2d_fixed_padding(inputs, 512, 1, strides=1)
route85 = inputs
inputs = _conv2d_fixed_padding(inputs, 1024, 3, strides=2)
route86 = inputs
inputs = _conv2d_fixed_padding(inputs, 512, 1, strides=1)
route87 = inputs
inputs = route86
inputs = _conv2d_fixed_padding(inputs, 512, 1, strides=1)
shortcut = inputs
inputs = _conv2d_fixed_padding(inputs, 512, 1, strides=1)
inputs = _conv2d_fixed_padding(inputs, 512, 3, strides=1)
inputs = inputs + shortcut
shortcut = inputs
inputs = _conv2d_fixed_padding(inputs, 512, 1, strides=1)
inputs = _conv2d_fixed_padding(inputs, 512, 3, strides=1)
inputs = inputs + shortcut
shortcut = inputs
inputs = _conv2d_fixed_padding(inputs, 512, 1, strides=1)
inputs = _conv2d_fixed_padding(inputs, 512, 3, strides=1)
inputs = inputs + shortcut
shortcut = inputs
inputs = _conv2d_fixed_padding(inputs, 512, 1, strides=1)
inputs = _conv2d_fixed_padding(inputs, 512, 3, strides=1)
inputs = inputs + shortcut
inputs = _conv2d_fixed_padding(inputs, 512, 1, strides=1)
route102 = inputs
inputs = tf.concat([route102, route87], axis=1 if data_format == 'NCHW' else 3)
inputs = _conv2d_fixed_padding(inputs, 1024, 1, strides=1)
# Set activation_fn and parameters for conv2d, batch_norm.
with slim.arg_scope([slim.conv2d, slim.batch_norm, _fixed_padding], data_format=data_format, reuse=reuse):
with slim.arg_scope([slim.conv2d], normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params,
biases_initializer=None,
activation_fn=lambda x: tf.nn.leaky_relu(x, alpha=_LEAKY_RELU)):
with tf.variable_scope('yolo-v4'):
inputs = _conv2d_fixed_padding(inputs, 512, 1, strides=1)
inputs = _conv2d_fixed_padding(inputs, 1024, 3, strides=1)
inputs = _conv2d_fixed_padding(inputs, 512, 1, strides=1)
route107 = inputs
maxpool108 = slim.max_pool2d(inputs, 5, 1, 'SAME')
inputs = route107
maxpool110 = slim.max_pool2d(inputs, 9, 1, 'SAME')
inputs = route107
maxpool112 = slim.max_pool2d(inputs, 13, 1, 'SAME')
inputs = tf.concat([maxpool112, maxpool110, maxpool108, route107], axis=1 if data_format == 'NCHW' else 3)
inputs = _conv2d_fixed_padding(inputs, 512, 1, strides=1)
inputs = _conv2d_fixed_padding(inputs, 1024, 3, strides=1)
inputs = _conv2d_fixed_padding(inputs, 512, 1, strides=1)
route116 = inputs
inputs = _conv2d_fixed_padding(inputs, 256, 1, strides=1)
inputs = _upsample(inputs, route85.get_shape().as_list(), data_format)
route118 = inputs
inputs = route85
inputs = _conv2d_fixed_padding(inputs, 256, 1, strides=1)
route120 = inputs
inputs = tf.concat([route120, route118], axis=1 if data_format == 'NCHW' else 3)
inputs = _conv2d_fixed_padding(inputs, 256, 1, strides=1)
inputs = _conv2d_fixed_padding(inputs, 512, 3, strides=1)
inputs = _conv2d_fixed_padding(inputs, 256, 1, strides=1)
inputs = _conv2d_fixed_padding(inputs, 512, 3, strides=1)
inputs = _conv2d_fixed_padding(inputs, 256, 1, strides=1)
route126 = inputs
inputs = _conv2d_fixed_padding(inputs, 128, 1, strides=1)
inputs = _upsample(inputs, route54.get_shape().as_list(), data_format)
route128 = inputs
inputs = route54
inputs = _conv2d_fixed_padding(inputs, 128, 1, strides=1)
route130 = inputs
inputs = tf.concat([route130, route128], axis=1 if data_format == 'NCHW' else 3)
inputs = _conv2d_fixed_padding(inputs, 128, 1, strides=1)
inputs = _conv2d_fixed_padding(inputs, 256, 3, strides=1)
inputs = _conv2d_fixed_padding(inputs, 128, 1, strides=1)
inputs = _conv2d_fixed_padding(inputs, 256, 3, strides=1)
inputs = _conv2d_fixed_padding(inputs, 128, 1, strides=1)
route136 = inputs
inputs = _conv2d_fixed_padding(inputs, 256, 3, strides=1)
detect_1 = _detection_layer(inputs, num_classes, _ANCHORS[0:3], img_size, data_format)
detect_1 = tf.identity(detect_1, name='detect_1')
inputs = route136
inputs = _conv2d_fixed_padding(inputs, 256, 3, strides=2)
route141 = inputs
inputs = tf.concat([route141, route126], axis=1 if data_format == 'NCHW' else 3)
inputs = _conv2d_fixed_padding(inputs, 256, 1, strides=1)
inputs = _conv2d_fixed_padding(inputs, 512, 3, strides=1)
inputs = _conv2d_fixed_padding(inputs, 256, 1, strides=1)
inputs = _conv2d_fixed_padding(inputs, 512, 3, strides=1)
inputs = _conv2d_fixed_padding(inputs, 256, 1, strides=1)
route147 = inputs
inputs = _conv2d_fixed_padding(inputs, 512, 3, strides=1)
detect_2 = _detection_layer(inputs, num_classes, _ANCHORS[3:6], img_size, data_format)
detect_2 = tf.identity(detect_2, name='detect_2')
inputs = route147
inputs = _conv2d_fixed_padding(inputs, 512, 3, strides=2)
route152 = inputs
inputs = tf.concat([route152, route116], axis=1 if data_format == 'NCHW' else 3)
inputs = _conv2d_fixed_padding(inputs, 512, 1, strides=1)
inputs = _conv2d_fixed_padding(inputs, 1024, 3, strides=1)
inputs = _conv2d_fixed_padding(inputs, 512, 1, strides=1)
inputs = _conv2d_fixed_padding(inputs, 1024, 3, strides=1)
inputs = _conv2d_fixed_padding(inputs, 512, 1, strides=1)
inputs = _conv2d_fixed_padding(inputs, 1024, 3, strides=1)
detect_3 = _detection_layer(inputs, num_classes, _ANCHORS[6:9], img_size, data_format)
detect_3 = tf.identity(detect_3, name='detect_3')
detections = tf.concat([detect_1, detect_2, detect_3], axis=1)
detections = tf.identity(detections, name='detections')
return detections
|
StarcoderdataPython
|
6658619
|
<gh_stars>1-10
import json
import logging
import os
import sys
from running_modes.configurations.general_configuration_envelope import GeneralConfigurationEnvelope
from running_modes.configurations.logging.create_model_log_configuration import CreateModelLoggerConfiguration
class CreateModelLogger:
def __init__(self, configuration: GeneralConfigurationEnvelope):
self.configuration = configuration
self._log_config = CreateModelLoggerConfiguration(**self.configuration.logging)
self._common_logger = self._setup_logger(name="create_model_logger")
def log_message(self, message: str):
self._common_logger.info(message)
def log_out_input_configuration(self):
file = os.path.join(self._log_config.logging_path, "input.json")
jsonstr = json.dumps(self.configuration, default=lambda x: x.__dict__, sort_keys=True, indent=4,
separators=(',', ': '))
with open(file, 'w') as f:
f.write(jsonstr)
def _setup_logger(self, name, level=logging.INFO):
logging.getLogger(name).addHandler(logging.NullHandler())
handler = logging.StreamHandler(stream=sys.stderr)
formatter = logging.Formatter(
fmt="%(asctime)s: %(module)s.%(funcName)s +%(lineno)s: %(levelname)-8s %(message)s",
datefmt="%H:%M:%S"
)
handler.setFormatter(formatter)
logger = logging.getLogger(name)
if not logger.handlers:
logger.setLevel(level)
logger.addHandler(handler)
return logger
|
StarcoderdataPython
|
7094
|
<gh_stars>1-10
"""
Authorization Utilities
"""
from shared.models.user_entities import User
from shared.service.jwt_auth_wrapper import JWTAuthManager
manager = JWTAuthManager(oidc_vault_secret="oidc/rest",
object_creator=lambda claims, assumed_role, user_roles: User(
first_name=claims["given_name"],
last_name=claims["family_name"],
school=assumed_role,
email=claims['email']
))
AUTH_USER = manager.auth_header()
|
StarcoderdataPython
|
12847901
|
<reponame>NehzUx/autodl
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modified by: <NAME>, <NAME>, <NAME>
"""An example of code submission for the AutoDL challenge.
It implements 3 compulsory methods ('__init__', 'train' and 'test') and
an attribute 'done_training' for indicating if the model will not proceed more
training due to convergence or limited time budget.
To create a valid submission, zip model.py together with other necessary files
such as Python modules/packages, pre-trained weights, etc. The final zip file
should not exceed 300MB.
"""
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.autograd import Variable
import datetime
import logging
import numpy as np
import os
import sys
import time
import torch.utils.data as data_utils
import torch
import torch.nn as nn
import torchvision
import tensorflow as tf
# seeding randomness for reproducibility
np.random.seed(42)
torch.manual_seed(1)
# PyTorch Model class
class TorchModel(nn.Module):
def __init__(self, input_shape, output_dim):
''' 3D CNN Model with no of CNN layers depending on the input size'''
super(TorchModel, self).__init__()
self.conv = torch.nn.Sequential()
cnn_ch = 16
if input_shape[1] == 1: # if num_channels = 1
self.conv.add_module('cnn1', nn.Conv3d(input_shape[0], cnn_ch, (1,3,3)))
else:
self.conv.add_module('cnn1', nn.Conv3d(input_shape[0], cnn_ch, 3))
self.conv.add_module('pool1', nn.MaxPool3d(2,2))
i = 2
while True:
self.conv.add_module('cnn{}'.format(i),
nn.Conv3d(cnn_ch * (i-1), cnn_ch * i, (1,3,3)))
self.conv.add_module('pool{}'.format(i), nn.MaxPool3d(2,2))
i += 1
n_size, out_len = self.get_fc_size(input_shape)
# no more CNN layers if Linear layers get input size < 1000
if n_size < 1000 or out_len[3] < 3 or out_len[3] < 3:
break
fc_size, _ = self.get_fc_size(input_shape)
self.fc = nn.Linear(fc_size, output_dim)
def forward_cnn(self, x):
x = self.conv(x)
return x
def get_fc_size(self, input_shape):
''' function to get the size for Linear layers
with given number of CNN layers
'''
sample_input = Variable(torch.rand(1, *input_shape))
output_feat = self.forward_cnn(sample_input)
out_shape = output_feat.shape
n_size = output_feat.data.view(1, -1).size(1)
return n_size, out_shape
def forward(self, x):
x = self.forward_cnn(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
# PyTorch Dataset to get data from tensorflow Dataset.
class TFDataset(torch.utils.data.Dataset):
def __init__(self, dataset, session, num_samples):
super(TFDataset, self).__init__()
self.dataset = dataset
self.session = session
self.num_samples = num_samples
self.next_element = None
self.reset()
def reset(self):
dataset = self.dataset
iterator = dataset.make_one_shot_iterator()
self.next_element = iterator.get_next()
return self
def __len__(self):
return self.num_samples
def __getitem__(self, index):
session = self.session if self.session is not None else tf.Session()
try:
example, label = session.run(self.next_element)
except tf.errors.OutOfRangeError:
self.reset()
example, label = session.run(self.next_element)
return example.transpose(3,0,1,2), label
class Model():
def __init__(self, metadata):
"""
Args:
metadata: an AutoDLMetadata object. Its definition can be found in
AutoDL_ingestion_program/dataset.py
"""
# Attribute necessary for ingestion program to stop evaluation process
self.done_training = False
self.metadata_ = metadata
# Getting details of the data from meta data
self.output_dim = self.metadata_.get_output_size()
self.num_examples_train = self.metadata_.size()
row_count, col_count = self.metadata_.get_matrix_size(0)
channel = self.metadata_.get_num_channels(0)
sequence_size = self.metadata_.get_sequence_size()
self.num_train = self.metadata_.size()
test_metadata_filename = self.metadata_.get_dataset_name()\
.replace('train', 'test') + '/metadata.textproto'
self.num_test = [int(line.split(':')[1]) for line
in open(test_metadata_filename, 'r').readlines()
if 'sample_count' in line][0]
# Getting the device available
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Device Found = ', self.device,
'\nMoving Model and Data into the device...')
# Attributes for preprocessing
self.default_image_size = (112,112)
self.default_num_frames = 15
self.default_shuffle_buffer = 100
if row_count == -1 or col_count == -1 :
row_count = self.default_image_size[0]
col_count = self.default_image_size[1]
if sequence_size == -1: sequence_size = self.default_num_frames
self.input_shape = (channel, sequence_size, row_count, col_count)
print('\n\nINPUT SHAPE = ', self.input_shape)
# getting an object for the PyTorch Model class for Model Class
# use CUDA if available
self.pytorchmodel = TorchModel(self.input_shape, self.output_dim)
print('\nPyModel Defined\n')
print(self.pytorchmodel)
self.pytorchmodel.to(self.device)
# PyTorch Optimizer and Criterion
self.criterion = nn.BCEWithLogitsLoss()
self.optimizer = torch.optim.Adam(self.pytorchmodel.parameters(), lr=1e-2)
# Attributes for managing time budget
# Cumulated number of training steps
self.birthday = time.time()
self.total_train_time = 0
self.cumulated_num_steps = 0
self.estimated_time_per_step = None
self.total_test_time = 0
self.cumulated_num_tests = 0
self.estimated_time_test = None
self.trained = False
# PYTORCH
# Critical number for early stopping
self.num_epochs_we_want_to_train = 100
# no of examples at each step/batch
self.train_batch_size = 30
self.test_batch_size = 30
# Tensorflow sessions to get the data from TFDataset
self.train_session = tf.Session()
self.test_session = tf.Session()
def train(self, dataset, remaining_time_budget=None):
"""Train this algorithm on the tensorflow |dataset|.
This method will be called REPEATEDLY during the whole training/predicting
process. So your `train` method should be able to handle repeated calls and
hopefully improve your model performance after each call.
****************************************************************************
****************************************************************************
IMPORTANT: the loop of calling `train` and `test` will only run if
self.done_training = False
(the corresponding code can be found in ingestion.py, search
'M.done_training')
Otherwise, the loop will go on until the time budget is used up. Please
pay attention to set self.done_training = True when you think the model is
converged or when there is not enough time for next round of training.
****************************************************************************
****************************************************************************
Args:
dataset: a `tf.data.Dataset` object. Each of its examples is of the form
(example, labels)
where `example` is a dense 4-D Tensor of shape
(sequence_size, row_count, col_count, num_channels)
and `labels` is a 1-D Tensor of shape
(output_dim,).
Here `output_dim` represents number of classes of this
multilabel classification task.
IMPORTANT: some of the dimensions of `example` might be `None`,
which means the shape on this dimension might be variable. In this
case, some preprocessing technique should be applied in order to
feed the training of a neural network. For example, if an image
dataset has `example` of shape
(1, None, None, 3)
then the images in this datasets may have different sizes. On could
apply resizing, cropping or padding in order to have a fixed size
input tensor.
remaining_time_budget: time remaining to execute train(). The method
should keep track of its execution time to avoid exceeding its time
budget. If remaining_time_budget is None, no time budget is imposed.
"""
steps_to_train = self.get_steps_to_train(remaining_time_budget)
if steps_to_train <= 0:
logger.info("Not enough time remaining for training. " +
"Estimated time for training per step: {:.2f}, "\
.format(self.estimated_time_per_step) +
"but remaining time budget is: {:.2f}. "\
.format(remaining_time_budget) +
"Skipping...")
self.done_training = True
else:
msg_est = ""
if self.estimated_time_per_step:
msg_est = "estimated time for this: " +\
"{:.2f} sec.".format(steps_to_train * self.estimated_time_per_step)
logger.info("Begin training for another {} steps...{}".format(steps_to_train, msg_est))
# If PyTorch dataloader for training set doen't already exists, get the train dataloader
if not hasattr(self, 'trainloader'):
self.trainloader = self.get_dataloader(dataset, self.num_train, batch_size=self.train_batch_size)
train_start = time.time()
# Training loop
self.trainloop(self.criterion, self.optimizer, steps=steps_to_train)
train_end = time.time()
# Update for time budget managing
train_duration = train_end - train_start
self.total_train_time += train_duration
self.cumulated_num_steps += steps_to_train
self.estimated_time_per_step = self.total_train_time / self.cumulated_num_steps
logger.info("{} steps trained. {:.2f} sec used. ".format(steps_to_train, train_duration) +\
"Now total steps trained: {}. ".format(self.cumulated_num_steps) +\
"Total time used for training: {:.2f} sec. ".format(self.total_train_time) +\
"Current estimated time per step: {:.2e} sec.".format(self.estimated_time_per_step))
def test(self, dataset, remaining_time_budget=None):
"""Test this algorithm on the tensorflow |dataset|.
Args:
Same as that of `train` method, except that the `labels` will be empty.
Returns:
predictions: A `numpy.ndarray` matrix of shape (sample_count, output_dim).
here `sample_count` is the number of examples in this dataset as test
set and `output_dim` is the number of labels to be predicted. The
values should be binary or in the interval [0,1].
"""
if self.done_training:
return None
if self.choose_to_stop_early():
logger.info("Oops! Choose to stop early for next call!")
self.done_training = True
test_begin = time.time()
if remaining_time_budget and self.estimated_time_test and\
self.estimated_time_test > remaining_time_budget:
logger.info("Not enough time for test. " +\
"Estimated time for test: {:.2e}, ".format(self.estimated_time_test) +\
"But remaining time budget is: {:.2f}. ".format(remaining_time_budget) +\
"Stop train/predict process by returning None.")
return None
msg_est = ""
if self.estimated_time_test:
msg_est = "estimated time: {:.2e} sec.".format(self.estimated_time_test)
logger.info("Begin testing..." + msg_est)
# If PyTorch dataloader for training set doen't already exists, get the test dataloader
if not hasattr(self, 'testloader'):
self.testloader = self.get_dataloader_test(dataset, self.num_test,
self.test_batch_size)
# get predictions from the test loop
predictions = self.testloop(self.testloader)
test_end = time.time()
# Update some variables for time management
test_duration = test_end - test_begin
self.total_test_time += test_duration
self.cumulated_num_tests += 1
self.estimated_time_test = self.total_test_time / self.cumulated_num_tests
logger.info("[+] Successfully made one prediction. {:.2f} sec used. ".format(test_duration) +\
"Total time used for testing: {:.2f} sec. ".format(self.total_test_time) +\
"Current estimated time for test: {:.2e} sec.".format(self.estimated_time_test))
return predictions
##############################################################################
#### Above 3 methods (__init__, train, test) should always be implemented ####
##############################################################################
def preprocess_tensor_4d(self, tensor_4d):
"""Preprocess a 4-D tensor (only when some dimensions are `None`, i.e.
non-fixed). The output tensor wil have fixed, known shape.
Args:
tensor_4d: A Tensor of shape
[sequence_size, row_count, col_count, num_channels]
where some dimensions might be `None`.
Returns:
A 4-D Tensor with fixed, known shape.
"""
tensor_4d_shape = tensor_4d.shape
logger.info("Tensor shape before preprocessing: {}".format(tensor_4d_shape))
if tensor_4d_shape[0] > 0 and tensor_4d_shape[0] < 10:
num_frames = tensor_4d_shape[0]
else:
num_frames = self.default_num_frames
if tensor_4d_shape[1] > 0:
new_row_count = tensor_4d_shape[1]
else:
new_row_count=self.default_image_size[0]
if tensor_4d_shape[2] > 0:
new_col_count = tensor_4d_shape[2]
else:
new_col_count=self.default_image_size[1]
if not tensor_4d_shape[0] > 0:
logger.info("Detected that examples have variable sequence_size, will " +
"randomly crop a sequence with num_frames = " +
"{}".format(num_frames))
tensor_4d = crop_time_axis(tensor_4d, num_frames=num_frames)
if not tensor_4d_shape[1] > 0 or not tensor_4d_shape[2] > 0:
logger.info("Detected that examples have variable space size, will " +
"resize space axes to (new_row_count, new_col_count) = " +
"{}".format((new_row_count, new_col_count)))
tensor_4d = resize_space_axes(tensor_4d,
new_row_count=new_row_count,
new_col_count=new_col_count)
logger.info("Tensor shape after preprocessing: {}".format(tensor_4d.shape))
return tensor_4d
def get_dataloader(self, tf_dataset, num_images, batch_size):
''' Get the training PyTorch dataloader
Args:
tf_dataset: Tensorflow Dataset which is given in train function
num_images : number of examples in train data
batch_size : batch_size for training set
Return:
dataloader: PyTorch Training Dataloader
'''
tf_dataset = tf_dataset.map(lambda *x: (self.preprocess_tensor_4d(x[0]), x[1]))
train_dataset = TFDataset(tf_dataset, self.train_session, num_images)
dataloader = torch.utils.data.DataLoader(
train_dataset,
batch_size=self.train_batch_size,
shuffle=True,
drop_last=False
)
return dataloader
def get_dataloader_test(self, tf_dataset, num_images, batch_size):
''' Get the test PyTorch dataloader
Args:
tf_dataset: Tensorflow Dataset which is given in test function
num_images : number of examples in test data
batch_size : batch_size for test set
Return:
dataloader: PyTorch Test Dataloader
'''
tf_dataset = tf_dataset.map(lambda *x: (self.preprocess_tensor_4d(x[0]), x[1]))
dataset = TFDataset(tf_dataset, self.test_session, num_images)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size)
return dataloader
def trainloop(self, criterion, optimizer, steps):
''' Training loop with no of given steps
Args:
criterion: PyTorch Loss function
Optimizer: PyTorch optimizer for training
steps: No of steps to train the model
Return:
None, updates the model parameters
'''
self.pytorchmodel.train()
data_iterator = iter(self.trainloader)
for i in range(steps):
try:
images, labels = next(data_iterator)
except StopIteration:
data_iterator = iter(self.trainloader)
images, labels = next(data_iterator)
images = images.float().to(self.device)
labels = labels.float().to(self.device)
optimizer.zero_grad()
log_ps = self.pytorchmodel(images)
loss = criterion(log_ps, labels)
if hasattr(self, 'scheduler'):
self.scheduler.step(loss)
loss.backward()
optimizer.step()
def get_steps_to_train(self, remaining_time_budget):
"""Get number of steps for training according to `remaining_time_budget`.
The strategy is:
1. If no training is done before, train for 10 steps (ten batches);
2. Otherwise, estimate training time per step and time needed for test,
then compare to remaining time budget to compute a potential maximum
number of steps (max_steps) that can be trained within time budget;
3. Choose a number (steps_to_train) between 0 and max_steps and train for
this many steps. Double it each time.
"""
if not remaining_time_budget: # This is never true in the competition anyway
remaining_time_budget = 1200 # if no time limit is given, set to 20min
if not self.estimated_time_per_step:
steps_to_train = 10
else:
if self.estimated_time_test:
tentative_estimated_time_test = self.estimated_time_test
else:
tentative_estimated_time_test = 50 # conservative estimation for test
max_steps = int((remaining_time_budget - tentative_estimated_time_test) / self.estimated_time_per_step)
max_steps = max(max_steps, 1)
if self.cumulated_num_tests < np.log(max_steps) / np.log(2):
steps_to_train = int(2 ** self.cumulated_num_tests) # Double steps_to_train after each test
else:
steps_to_train = 0
return steps_to_train
def testloop(self, dataloader):
'''
Args:
dataloader: PyTorch test dataloader
Return:
preds: Predictions of the model as Numpy Array.
'''
preds = []
with torch.no_grad():
self.pytorchmodel.eval()
for images, _ in dataloader:
if torch.cuda.is_available():
images = images.float().cuda()
else:
images = images.float()
log_ps = self.pytorchmodel(images)
pred = torch.sigmoid(log_ps).data > 0.5
preds.append(pred.cpu().numpy())
preds = np.vstack(preds)
return preds
def choose_to_stop_early(self):
"""The criterion to stop further training (thus finish train/predict
process).
"""
# return self.cumulated_num_tests > 10 # Limit to make 10 predictions
# return np.random.rand() < self.early_stop_proba
batch_size = self.train_batch_size
num_examples = self.metadata_.size()
num_epochs = self.cumulated_num_steps * batch_size / num_examples
logger.info("Model already trained for {} epochs.".format(num_epochs))
return num_epochs > self.num_epochs_we_want_to_train # Train for at least certain number of epochs then stop
#### Other helper functions
def crop_time_axis(tensor_4d, num_frames, begin_index=None):
"""Given a 4-D tensor, take a slice of length `num_frames` on its time axis.
Args:
tensor_4d: A Tensor of shape
[sequence_size, row_count, col_count, num_channels]
num_frames: An integer representing the resulted chunk (sequence) length
begin_index: The index of the beginning of the chunk. If `None`, chosen
randomly.
Returns:
A Tensor of sequence length `num_frames`, which is a chunk of `tensor_4d`.
"""
# pad sequence if not long enough
pad_size = tf.maximum(num_frames - tf.shape(tensor_4d)[0], 0)
padded_tensor = tf.pad(tensor_4d, ((0, pad_size), (0, 0), (0, 0), (0, 0)))
# If not given, randomly choose the beginning index of frames
if not begin_index:
maxval = tf.shape(padded_tensor)[0] - num_frames + 1
begin_index = tf.random.uniform([1],
minval=0,
maxval=maxval,
dtype=tf.int32)
begin_index = tf.stack([begin_index[0], 0, 0, 0], name='begin_index')
sliced_tensor = tf.slice(padded_tensor,
begin=begin_index,
size=[num_frames, -1, -1, -1])
return sliced_tensor
def resize_space_axes(tensor_4d, new_row_count, new_col_count):
"""Given a 4-D tensor, resize space axes to have target size.
Args:
tensor_4d: A Tensor of shape
[sequence_size, row_count, col_count, num_channels].
new_row_count: An integer indicating the target row count.
new_col_count: An integer indicating the target column count.
Returns:
A Tensor of shape [sequence_size, target_row_count, target_col_count].
"""
resized_images = tf.image.resize_images(tensor_4d,
size=(new_row_count, new_col_count))
return resized_images
def get_logger(verbosity_level):
"""Set logging format to something like:
2019-04-25 12:52:51,924 INFO model.py: <message>
"""
logger = logging.getLogger(__file__)
logging_level = getattr(logging, verbosity_level)
logger.setLevel(logging_level)
formatter = logging.Formatter(
fmt='%(asctime)s %(levelname)s %(filename)s: %(message)s')
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setLevel(logging_level)
stdout_handler.setFormatter(formatter)
stderr_handler = logging.StreamHandler(sys.stderr)
stderr_handler.setLevel(logging.WARNING)
stderr_handler.setFormatter(formatter)
logger.addHandler(stdout_handler)
logger.addHandler(stderr_handler)
logger.propagate = False
return logger
logger = get_logger('INFO')
|
StarcoderdataPython
|
3354256
|
from valtypes.condition import is_fixed_length_tuple
def test_not_generic_alias() -> None:
"""
It returns False if the value isn't a generic alias
"""
assert not is_fixed_length_tuple(...)
def test_origin_is_not_tuple() -> None:
"""
It returns False if the origin isn't a tuple
"""
assert not is_fixed_length_tuple(list[int])
def test_variable_length_tuple() -> None:
"""
It returns False if the value is a generic alias of a variable-length tuple
"""
assert not is_fixed_length_tuple(tuple[int, ...])
def test_fixed_length_tuple() -> None:
"""
It returns True if the value is a generic alias of a fixed-length tuple
"""
assert is_fixed_length_tuple(tuple[int])
assert is_fixed_length_tuple(tuple[int, int]) # type: ignore
|
StarcoderdataPython
|
8065503
|
<filename>bert_ner/predict_QG.py
"""
For hotpot Pipeline in codalab
Rewrite eval.py
"""
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils import data
from model import Net
from data_load_QG import NerDataset, pad, VOCAB, tokenizer, tag2idx, idx2tag, EvalDataset, QueryDataset, AnswerDataset
import os
import numpy as np
import argparse
from tqdm import tqdm
import json
import re
def tag_numbers(words, preds):
MONTH = ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
rule = re.compile('^[0-9]+$')
def date_pattern_1(ptr):
# e.g. 21 March 1873
if ptr + 2 < len(words):
return (words[ptr + 1] in MONTH) and re.match(rule, words[ptr]) and re.match(rule, words[ptr + 2])
else:
return False
def date_pattern_2(ptr):
# e.g. December 12, 2019
if ptr + 3 < len(words):
return (words[ptr] in MONTH) and re.match(rule, words[ptr + 1]) and words[ptr + 2] == ',' and re.match(rule, words[ptr + 3])
else:
return False
ptr = 0
while ptr < len(words):
if preds[ptr] != 'O':
ptr += 1
elif date_pattern_1(ptr):
preds[ptr:ptr+3] = ['J-DATE'] * 3
ptr += 3
elif date_pattern_2(ptr):
preds[ptr:ptr+4] = ['J-DATE'] * 4
ptr += 4
elif re.match(rule, words[ptr]):
preds[ptr] = 'J-NUM'
ptr += 1
else:
ptr += 1
return preds
def get_entities(words, preds):
entities = []
ptr = 0
while ptr < len(words):
FLAG = False
for prefix in ['I-', 'J-']:
sub_words = []
while ptr < len(words) and preds[ptr].startswith(prefix):
sub_words.append(words[ptr])
ptr += 1
if len(sub_words) > 0:
entity = " ".join(sub_words).replace(' .', '.').replace(' ,', ',') # Rearrange blank
entities.append([entity, preds[ptr - 1]])
FLAG = True
if not FLAG:
ptr += 1
return entities
def eval_para(model, iterator, sent_ids, output_path):
model.eval()
Words, Is_heads, Tags, Y, Y_hat = [], [], [], [], []
with torch.no_grad():
for i, batch in enumerate(tqdm(iterator)):
words, x, is_heads, tags, y, seqlens = batch
_, _, y_hat = model(x, y) # y_hat: (N, T)
Words.extend(words)
Is_heads.extend(is_heads)
Tags.extend(tags)
Y.extend(y.numpy().tolist())
Y_hat.extend(y_hat.cpu().numpy().tolist())
entities = {k: dict() for k, sid in sent_ids}
# gets results and save
for i, (words, is_heads, tags, y_hat) in enumerate(zip(Words, Is_heads, Tags, Y_hat)):
y_hat = [hat for head, hat in zip(is_heads, y_hat) if head == 1]
preds = [idx2tag[hat] for hat in y_hat]
assert len(preds) == len(words), f'len(preds)={len(preds)}, len(words)={len(words)}'
words, preds = words[1:-1], preds[1:-1]
preds = tag_numbers(words, preds)
entity = get_entities(words, preds)
key, sid = sent_ids[i][0], sent_ids[i][1]
entities[key][sid] = entity
json.dump(entities, open(output_path, 'w'))
return
def eval_query(model, iterator, sent_ids, output_path):
model.eval()
Words, Is_heads, Tags, Y, Y_hat = [], [], [], [], []
with torch.no_grad():
for i, batch in enumerate(tqdm(iterator)):
words, x, is_heads, tags, y, seqlens = batch
_, _, y_hat = model(x, y) # y_hat: (N, T)
Words.extend(words)
Is_heads.extend(is_heads)
Tags.extend(tags)
Y.extend(y.numpy().tolist())
Y_hat.extend(y_hat.cpu().numpy().tolist())
entities = dict()
# gets results and save
for i, (words, is_heads, tags, y_hat) in enumerate(zip(Words, Is_heads, Tags, Y_hat)):
y_hat = [hat for head, hat in zip(is_heads, y_hat) if head == 1]
preds = [idx2tag[hat] for hat in y_hat]
assert len(preds) == len(words), f'len(preds)={len(preds)}, len(words)={len(words)}'
words, preds = words[1:-2], preds[1:-2] # remove the last punctuation "?"
preds = tag_numbers(words, preds)
entity = get_entities(words, preds)
key = sent_ids[i][0]
entities[key] = entity
json.dump(entities, open(output_path, 'w'))
return
def eval_ans(model, iterator, sent_ids, output_path):
model.eval()
Words, Is_heads, Tags, Y, Y_hat = [], [], [], [], []
with torch.no_grad():
for i, batch in enumerate(tqdm(iterator)):
words, x, is_heads, tags, y, seqlens = batch
_, _, y_hat = model(x, y) # y_hat: (N, T)
Words.extend(words)
Is_heads.extend(is_heads)
Tags.extend(tags)
Y.extend(y.numpy().tolist())
Y_hat.extend(y_hat.cpu().numpy().tolist())
entities = dict()
# gets results and save
for i, (words, is_heads, tags, y_hat) in enumerate(zip(Words, Is_heads, Tags, Y_hat)):
y_hat = [hat for head, hat in zip(is_heads, y_hat) if head == 1]
preds = [idx2tag[hat] for hat in y_hat]
assert len(preds) == len(words), f'len(preds)={len(preds)}, len(words)={len(words)}'
words, preds = words[1:-1], preds[1:-1] # we change this line
preds = tag_numbers(words, preds)
entity = get_entities(words, preds)
key = sent_ids[i][0]
entities[key] = entity
json.dump(entities, open(output_path, 'w'))
return
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--ckpt_path', type=str, default='work_dir/bert_ner.pt')
parser.add_argument('--input_path', type=str, default='work_dir/selected_paras.json')
parser.add_argument('--output_path', type=str, default='work_dir/entities.json')
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--use_answer', action='store_true')
args = parser.parse_args()
if args.use_answer:
eval_dataset = AnswerDataset(args.input_path, debug=False)
else:
eval_dataset = EvalDataset(args.input_path, debug=False)
eval_iter = data.DataLoader(dataset=eval_dataset,
batch_size=args.batch_size,
shuffle=False,
collate_fn=pad)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = Net(top_rnns=False, vocab_size=len(VOCAB), device=device, finetuning=True).cuda()
# model = Net(top_rnns=False, vocab_size=len(VOCAB), device=device, finetuning=True).cuda()
model = nn.DataParallel(model)
params = model.state_dict()
for k,v in params.items():
print(k)
checkpoint = torch.load(args.ckpt_path)
for k,v in checkpoint.items():
print(k)
model.load_state_dict(torch.load(args.ckpt_path))
model.eval()
if args.use_answer:
eval_ans(model, eval_iter, eval_dataset.sent_id, args.output_path)
else:
eval_para(model, eval_iter, eval_dataset.sent_id, args.output_path)
|
StarcoderdataPython
|
9726146
|
import xbmcgui,xbmcplugin
import sys
import urlparse
import datetime
import resources.lib.utils as utils
from resources.lib.database import Database, WatchHistory, DBSettings
class HistoryGUI:
params = None
historyDB = None
settings = None
def __init__(self,params):
self.params = params
database = Database()
database.connect()
database.checkDBStructure()
self.historyDB = WatchHistory(database)
self.settings = DBSettings(database)
def run(self):
action = int(params['action'])
utils.log("action " + str(action))
if(action == 0):
self._showHistory()
elif(action == 1001):
self._delete(params['id'])
def _showHistory(self):
#we are listing files
xbmcplugin.setContent(int(sys.argv[1]),'files')
xbmcplugin.setPluginCategory(int(sys.argv[1]),'Play History')
xbmcplugin.addSortMethod(int(sys.argv[1]),xbmcplugin.SORT_METHOD_NONE)
context_url = "%s?%s"
#load the history
history = self.historyDB.getAllOrderedLimit('date',0,30)
if(len(history) > 0):
for entry in history:
entryName = str(entry[2])
#if the name is blank use the file path
if(entryName == ''):
entryName = str(entry[3])
item = xbmcgui.ListItem(entryName,str(entry[4]),path=str(entry[3]))
item.setProperty('IsPlayable','true')
item.addContextMenuItems([('Delete from History','Xbmc.RunPlugin(%s?%s)' % (str(sys.argv[0]),'action=1001&id=' + str(entry[0])))])
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url= "%s" % (entry[3],),listitem=item,isFolder=False)
else:
item = xbmcgui.ListItem("No Items In History")
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url="%s?%s" % (sys.argv[0],"action=0"),listitem=item,isFolder=False)
xbmcplugin.endOfDirectory(int(sys.argv[1]),cacheToDisc=False)
def _delete(self,id):
#check if we need PIN confirmation to do this
if(utils.getSetting('require_pin_on_delete') == 'true'):
user_try = xbmcgui.Dialog().numeric(0,'PIN Required')
if(self.settings.checkPIN(user_try)):
self.historyDB.delete(id)
xbmc.executebuiltin('Container.Refresh')
else:
xbmcgui.Dialog().ok('Error','Incorrect PIN')
else:
self.historyDB.delete(id)
xbmc.executebuiltin('Container.Refresh')
def get_params():
param = {}
try:
for i in sys.argv:
args = i
if(args.startswith('?')):
args = args[1:]
param.update(dict(urlparse.parse_qsl(args)))
except:
pass
return param
params = get_params()
#set an action if one does not exist
try:
action = int(params['action'])
except:
params['action'] = 0
pass
gui = HistoryGUI(params)
gui.run()
|
StarcoderdataPython
|
3511633
|
# Generated by Django 2.2.13 on 2021-05-10 21:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('project', '0037_auto_20210201_1146'),
]
operations = [
migrations.AlterField(
model_name='project',
name='pi_email',
field=models.CharField(blank=True, max_length=256, verbose_name="Principal Investigator's email"),
),
]
|
StarcoderdataPython
|
1828874
|
from os import listdir, walk
from os.path import isfile, join
import shelve
from clean_stories import clean_directory
class StoryCorpus(object):
def __init__(self, path='.', dp_path='stories'):
self.path = path
self.db_path = dp_path
self.topics = 'names'
# generates db file
# requires clean_stories to be already run
def generate_db(self):
db = shelve.open(self.db_path)
folderNames = []
for x in walk(self.path):
folderPath = x[0]
if len(folderPath) > 2:
folderName = folderPath[2:]
# generate clean txt files
# clean_directory(folderPath)
# Parse all the stories into an array
stories = []
storyfiles = [f for f in listdir(folderPath) if isfile(join(folderPath, f)) and '.txt' in f]
for sf in storyfiles:
path = join(folderPath, sf)
print("Reading clean story: ", path)
with open(path, 'r') as file:
stories.append(file.read())
db[folderName] = stories
folderNames.append(folderName)
db[self.topics] = folderNames
# returns a list of all the story folders
def print_main_stories(self):
db = shelve.open(self.db_path)
print(db[self.topics])
# returns a map of storyfolder: list of stories
def get_stories(self):
db = shelve.open(self.db_path)
data = {}
for topic in db[self.topics]:
data[topic] = db[topic]
return data
# combines all the stories into a giant story array
def get_flat_data(self):
story_map = self.get_stories()
data = []
for tuple in story_map.items():
data.append(tuple[1])
return data
if __name__ == '__main__':
sc = StoryCorpus()
# sc.generate_db()
sc.print_main_stories()
# print(sc.get_flat_data())
|
StarcoderdataPython
|
3407633
|
<reponame>jg10545/cleanup<filename>cleanup/tests/test_plot.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 26 08:52:10 2021
@author: joe
"""
import numpy as np
import matplotlib
import os
from cleanup.plot import build_embedding_figure
def test_build_embedding_figure_returns_fig():
N = 100
embeddings = np.random.normal(0, 1, (N,2))
residuals = np.random.normal(0, 2, (N,))
fig = build_embedding_figure(embeddings, residuals)
assert isinstance(fig, matplotlib.figure.Figure)
def test_build_embedding_figure_saves_fig(tmpdir):
N = 100
embeddings = np.random.normal(0, 1, (N,2))
residuals = np.random.normal(0, 2, (N,))
saveloc = os.path.join(tmpdir, "fig.jpg")
output = build_embedding_figure(embeddings, residuals, save=saveloc)
assert output is None
|
StarcoderdataPython
|
8129068
|
<filename>src/scenic/simulators/newtonian/__init__.py<gh_stars>100-1000
"""Simple Newtonian physics simulator for traffic scenarios.
Allows scenarios written using the :obj:`scenic.domains.driving` abstract
domain to be simulated without installing an external simulator.
.. raw:: html
<h2>Submodules</h2>
.. autosummary::
:toctree:
model
simulator
"""
from .simulator import NewtonianSimulator
|
StarcoderdataPython
|
4841359
|
import skimage.io as io
import skimage.transform as skt
import numpy as np
from PIL import Image
from src.models.class_patcher import patcher
from src.utils.imgproc import *
from skimage.color import rgb2hsv, hsv2rgb, rgb2gray
from skimage.filters import gaussian
class patcher(patcher):
def __init__(self, body='./body/body_yuko.png', **options):
super().__init__('幽狐', body=body, pantie_position=[-2, 1130], **options)
self.mask = io.imread('./mask/mask_yuko.png')
self.ribbon_position = [1712, 1601]
self.bra_position = [298, 1301]
try:
self.use_ribbon_mesh = self.options['use_ribbon_mesh']
except:
self.use_ribbon_mesh = self.ask(question='Use Yuko ribbon mesh?', default=False)
if self.use_ribbon_mesh:
self.ribbon_base = io.imread('./mask/ribbon_yuko.png')[:, :, :3] / 255
self.ribbon_shade = io.imread('./material/ribbon_yuko.png')[:, :, 3] / 255
self.bra_base = io.imread('./mask/bra_yuko.png')[1300:, 300:-400] / 255
self.bra_mask = self.bra_base[:, :, 0] > 0
self.bra_center = io.imread('./mask/bra_yuko_center.png')[1300:, 300:-400, 0] > 0
self.bra_shade = io.imread('./material/bra_yuko_shade.png')[1300:, 300:-400, 3] / 255
self.frill = io.imread('./material/bra_yuko_frill.png')[1300:, 300:-400] / 255
self.lace = io.imread('./material/bra_yuko_lace.png')[1300:, 300:-400] / 255
self.ribbon_mask = io.imread('./mask/ribbon.png')
def gen_ribbon(self, image):
image = np.array(image)
ribbon = image[19:58, 5:35, :3]
base_color = np.mean(np.mean(ribbon[5:12, 16:20], axis=0), axis=0) / 255
shade_color = np.mean(np.mean(ribbon[8:14, 7:15], axis=0), axis=0) / 255
ribbon_base = io.imread('./mask/ribbon_yuko.png')[:, :, :3] / 255
ribbon_shade = io.imread('./material/ribbon_yuko.png')[:, :, 3] / 255
ribbon_base = (self.ribbon_base > 0) * base_color
ribbon_shade = self.ribbon_shade[:, :, None] * (1 - shade_color)
ribbon = ribbon_base - ribbon_shade
ribbon = np.dstack((ribbon, ribbon[:, :, 0] > 0))
ribbon = np.clip(ribbon, 0, 1)
return Image.fromarray(np.uint8(ribbon * 255))
def gen_bra(self, image):
# image = Image.open('./dream/0101.png')
pantie = np.array(image)
if self.use_ribbon_mesh:
pantie = ribbon_inpaint(pantie)
else:
ribbon = pantie.copy()
ribbon[:, :, 3] = self.ribbon_mask[:, :, 1]
ribbon = ribbon[19:58, 8:30] / 255.0
front = pantie[20:100, 30:80, :3] / 255
front_shade = pantie[100:150, 0:40, :3] / 255
center = pantie[20:170, -200:-15, :3] / 255
base_color = np.mean(np.mean(center, axis=0), axis=0)
front_color = np.mean(np.mean(front, axis=0), axis=0)
shade_color = np.mean(np.mean(front_shade, axis=0), axis=0)
# make seamless design
design = rgb2gray(center[:, :, :3])[::-1, ::-1]
design = (design - np.min(design)) / (np.max(design) - np.min(design))
edge = 3
design_seamless = gaussian(design, sigma=3)
design_seamless[edge:-edge, edge:-edge] = design[edge:-edge, edge:-edge]
[hr, hc, hd] = center.shape
y = np.arange(-hr / 2, hr / 2, dtype=np.int16)
x = np.arange(-hc / 2, hc / 2, dtype=np.int16)
design_seamless = (design_seamless[y, :])[:, x] # rearrange pixels
design_seamless = resize(design_seamless, [1.65, 1.8])
design_seamless = np.tile(design_seamless, (3, 4))
posy = int((self.bra_center.shape[0] - design_seamless.shape[0]) / 2)
posx = int((self.bra_center.shape[1] - design_seamless.shape[1]) / 2)
sx = 0
sy = 0
design_seamless = (np.pad(design_seamless, [(posy + sy + 1, posy - sy), (posx + sx, posx - sx)], mode='constant'))
# Base shading
bra_base = self.bra_base[:, :, :3] * front_color
bra_base = bra_base - design_seamless[:, :, None] / 10
shade = rgb2hsv(np.tile((self.bra_shade)[:, :, None], [1, 1, 3]) * base_color)
shade[:, :, 0] -= 1
shade[:, :, 1] *= 0.5 + np.mean(base_color) / 3
shade[:, :, 2] /= 1 + 1 * np.mean(base_color)
bra_shade = hsv2rgb(shade)
# bra_shade = bra_shade[:, :, None] * shade_color
# Center painting
sx = -270
sy = -50
center = resize(center, [4, 4])
posy = int((self.bra_center.shape[0] - center.shape[0]) / 2)
posx = int((self.bra_center.shape[1] - center.shape[1]) / 2)
center = (np.pad(center, [(posy + sy, posy - sy), (posx + sx, posx - sx), (0, 0)], mode='constant'))
center = center * self.bra_center[:, :, None]
# Decoration painting
deco_shade = np.median(pantie[5, :, :3], axis=0) / 255
frill = np.dstack((self.frill[:, :, :3] * deco_shade, self.frill[:, :, 3]))
lace = np.dstack((self.lace[:, :, :3] * shade_color, self.lace[:, :, 3]))
# Finalize
textured = bra_base * (1 - self.bra_center[:, :, None]) + center * self.bra_center[:, :, None]
textured = textured - bra_shade
textured = textured * (1 - lace[:, :, 3])[:, :, None] + lace[:, :, :3] * lace[:, :, 3][:, :, None]
textured = textured * (1 - frill[:, :, 3])[:, :, None] + frill[:, :, :3] * frill[:, :, 3][:, :, None]
textured = np.dstack((textured, self.bra_mask))
if self.use_ribbon_mesh is False:
ribbon = skt.rotate(ribbon, 8, resize=True)
ribbon = resize(ribbon, [1.5, 1.5])
[r, c, d] = ribbon.shape
textured[460:460 + r, 35:35 + c] = textured[460:460 + r, 35:35 + c] * (1 - ribbon[:, :, 3][:, :, None]) + ribbon * ribbon[:, :, 3][:, :, None]
return Image.fromarray(np.uint8(np.clip(textured, 0, 1) * 255))
def convert(self, image):
pantie = np.array(image)
[r, c, d] = pantie.shape
# move from hip to front
patch = np.copy(pantie[-140:-5, 546:, :])
patch = skt.resize(patch[::-1, ::-1, :], (270, 63), anti_aliasing=True, mode='reflect')
[pr, pc, d] = patch.shape
pantie[123:123 + pr, :pc, :] = np.uint8(patch * 255)
# Inpainting ribbon
if self.use_ribbon_mesh:
pantie = ribbon_inpaint(pantie)
# Front transform
front = pantie[:390, :250, :]
front = np.pad(front, [(0, 0), (50, 0), (0, 0)], mode='constant')
front = front.transpose(1, 0, 2)
arrx = np.zeros((100))
arry = np.zeros((100))
arrx[40:] -= (np.linspace(0, 1 * np.pi, 60)**2) * 4
arrx[28:70] += (np.sin(np.linspace(0, 1 * np.pi, 100)) * 10)[28:70]
front = affine_transform_by_arr(front, arrx, arry)
front = np.uint8(front.transpose(1, 0, 2) * 255)[:, 38:]
# Back transform
back = pantie[:350, 250:, :]
back = np.pad(back, [(0, 0), (0, 100), (0, 0)], mode='constant')
back = back.transpose(1, 0, 2)
arrx = np.zeros((100))
arry = np.zeros((100))
arrx[10:] -= (np.linspace(0, 1 * np.pi, 90)**3) * 14
back = affine_transform_by_arr(back, arrx, arry, smoothx=True)
back = np.uint8(back.transpose(1, 0, 2) * 255.0)[:, 1:]
# Merge front and back
pantie = np.zeros((np.max((front.shape[0], back.shape[0])), front.shape[1] + back.shape[1], d), dtype=np.uint8)
pantie[:front.shape[0], :front.shape[1]] = front
pantie[:back.shape[0], front.shape[1]:] = back
# main transform
arrx = np.zeros((100))
arry = np.zeros((100))
arrx[35:] += (np.cos(np.linspace(0, 1 * np.pi, 100) - np.pi) * -75)[35:] - 30
arrx[:30] += (np.sin(np.linspace(0, 3 * np.pi, 100) - np.pi / 0.9) * 10)[:30]
arrx[50:80] += (np.sin(np.linspace(0, 3 * np.pi, 100) - np.pi) * 11)[:30]
arry += np.linspace(0, 1, 100) * -50
arry[:30] += (np.sin(np.linspace(0, 3 * np.pi, 100) - np.pi) * 35)[:30]
pantie = affine_transform_by_arr(pantie, arrx, arry, smoothx=True)
pantie = skt.rotate(pantie, 8.1, resize=True)
# Finalize
pantie = resize(pantie, [2.31, 2.35])
pantie = pantie[140:-80, 72:]
pantie = np.uint8(pantie * 255)
pantie = np.bitwise_and(pantie, self.mask)
return Image.fromarray(pantie)
def patch(self, image, transparent=False):
pantie = self.convert(image)
if transparent:
patched = Image.new("RGBA", self.body_size)
else:
patched = self.body.copy()
if self.use_ribbon_mesh:
ribbon = self.gen_ribbon(image)
self.paste(patched, ribbon, self.ribbon_position)
bra = self.gen_bra(image)
patched = self.paste(patched, bra, self.bra_position)
patched = self.paste(patched, pantie, self.pantie_position)
return patched
|
StarcoderdataPython
|
1970031
|
# encoding: utf-8
"""
openbikebox websocket-client
Copyright (c) 2021, binary butterfly GmbH
Use of this source code is governed by an MIT-style license that can be found in the LICENSE file.
"""
class Config:
CLIENT_UID = 'client-1'
CLIENT_PASSWORD = 'password'
OBB_CONNECT_URL = 'ws://your-server:port/connect/%s' % CLIENT_UID
CARDREADER_SERVER = 'localhost'
CARDREADER_PORT = 9000
RESOURCES = {
'resource-1': 17,
'resource-2': 27,
'resource-3': 22,
'resource-4': 18
}
|
StarcoderdataPython
|
5160185
|
<filename>setup.py
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
requires = ["requests == 1.2.3"]
setup(
name='pystex',
version='0.0.20',
description='Python StackExchange API Client',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/stevenc81/pystex',
packages=['pystex'],
package_data={'': ['LICENSE']},
install_requires=requires,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Natural Language :: English',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
StarcoderdataPython
|
9629686
|
<gh_stars>0
from django.shortcuts import render
from django.conf import settings
import jwt
from users.models import User
def decode_token(header):
access_token = header.get('HTTP_AUTHORIZATION')[4:]
decoded_access_token = jwt.decode(access_token, settings.SECRET_KEY)
user = User.objects.filter(pk=decoded_access_token.get('user_id')).first()
return user
|
StarcoderdataPython
|
3547705
|
from statsmodels.stats.multicomp import pairwise_tukeyhsd
import pandas as pd
# store the data
veryants = pd.read_csv('veryants.csv')
print(veryants)
# run tukey's test
tukey_results = pairwise_tukeyhsd(veryants.Sale, veryants.Store, 0.05)
print(tukey_results)
# determine significance
a_b_significant = True
a_c_significant = False
b_c_significant = False
|
StarcoderdataPython
|
6487913
|
<filename>utils/file_handling.py
import os
import re
import pandas as pd
from expworkup.devconfig import valid_input_files, workup_targets, lab_vars
from utils.globals import get_debug_header, get_debug_simple
def get_interface_filename(interface_type, working_directory, runID):
""" Searches for filename match and returns instance
Specified in devconfig['valid_input_files']
new file names (suffixes) can be added in devconfig as needed
Parameters
----------
working_directory : (aka save_directory) where local files are
report default = {target_directory}/gdrive_files
runID : name of gdrive folder containing the experiment
aka. experiment_name, e.g. 2019-09-18T20_27_33.741387+00_00_LBL
Returns
-------
filename : identified filename for a particular type of file
e.g. type = 'experiment_specification' could be
'ExperimentSpecification.xls' or 'RobotInput.xls'
"""
for suffix in valid_input_files[interface_type]:
filename = os.path.join(working_directory, f'{runID}_{suffix}')
if os.path.exists(filename):
return filename
raise FileNotFoundError(f'Could not find any of {valid_input_files[interface_type]} file for {runID}')
def get_experimental_run_lab(run_filename):
""" parses experiment foldername and returns lab
Parameters
----------
run_filename: either the remote run directory name or the local json that is generated from it
Returns
-------
labname
"""
for lab in lab_vars.keys():
lab_pat = re.compile(f'_({lab})($|.json$)')
labname = lab_pat.search(run_filename.strip()) #returns if match
if labname:
return labname.group(1)
raise RuntimeError(f'{run_filename} does not specify a supported lab')
def write_debug_file(df, filename, write_index=True):
if os.path.isfile(filename):
os.remove(filename)
f = open(filename, 'a')
if not get_debug_simple():
f.write(get_debug_header())
df.to_csv(f, index=write_index)
f.write(get_debug_header())
else:
df.to_csv(f, index=write_index)
f.close()
def get_command_dict(one_type, application):
"""Converts expworkup.type_command.csv to dict for chemdescriptor
Parameters
----------
one_type : defines which chemical type to target
should match an entry in command_types_df 'types' column
application : defines the application being targeted by caller
will only return rows where actor_systemtool_name matches
specified application
Returns
-------
default_command_dict : structure shown below
default_command_dict = {
"descriptors": {
"acceptorcount": {
"command": [
"acceptorcount"
],
"column_names": [
"_feat_acceptorcount"
]
},...
""ph_descriptors": {
"molsurfaceareaASAp": {
"command": [
"molecularsurfacearea",
"-t",
"ASA+"
],
"column_names": [
"_feat_molsurfaceareaASAp"
]
},...
Notes
-----
* https://github.com/darkreactions/chemdescriptor
* 'descriptors' must be specified fully (including flags where needed)
* 'ph_descriptors' are those which have -H option, can use to simplify return
"""
command_type_df = pd.read_csv('./type_command.csv')
if one_type == 'any':
commands_df = command_type_df[(command_type_df['actor_systemtool_name'] == application)]
else:
commands_df = command_type_df[(command_type_df['input'] == one_type) & \
(command_type_df['actor_systemtool_name'] == application)]
my_descriptor_dict = {}
for command in commands_df.itertuples():
column_name = f'_feat_{command.short_name}'
my_descriptor_dict[command.short_name] = {}
# 'space' (i.e, ' ') removal
templist = command.calc_definition.split(' ')
str_list = list(filter(None, templist))
my_descriptor_dict[command.short_name]["command"] = str_list
my_descriptor_dict[command.short_name]["column_names"] = [column_name]
my_descriptor_dict[command.short_name]["alternative_input"] = command.alternative_input
command_dict = {}
command_dict['descriptors'] = my_descriptor_dict
command_dict['ph_descriptors'] = {} # possibly useful, see chemdescriptor for more details
if len(command_dict['descriptors'].keys()) == 0:
return None
else:
return(command_dict)
|
StarcoderdataPython
|
9764735
|
from opytimizer.optimizers.swarm import AF
# One should declare a hyperparameters object based
# on the desired algorithm that will be used
params = {
'c1': 0.75,
'c2': 1.25,
'm': 10,
'Q': 0.75
}
# Creates an AF optimizer
o = AF(params=params)
|
StarcoderdataPython
|
6609974
|
<filename>cargaimagenes.py
import cv2
import numpy as np
#cargar imagen a color
IRGB=cv2.imread("009.jpg")
print(IRGB)
print(IRGB.shape)
print("lineas agregadas en mai")
IGS=cv2.cvtColor(IRGB,cv2.COLOR_BGR2GRAY)
print(IGS)
print(IGS.shape)
cv2.imwrite('009GS.jpg',IGS)
|
StarcoderdataPython
|
4864442
|
import shutil,os,glob
def setup_default(out_dir='./mysite',add_as_service=True):
dir=os.path.dirname(__file__)+'/pjfiles'
if not os.path.exists(out_dir):os.makedirs(out_dir)
for i,f in enumerate(glob.glob(dir+'/*')):
if os.path.isdir(f):continue
f2=out_dir+'/'+os.path.basename(f)
shutil.copy(f,f2)
if not add_as_service:return True
from wk.extra.linux.tools import add_py_to_service
add_py_to_service(service_name="mysite",project_dir=out_dir,script_file_path=out_dir+'/main.py')
|
StarcoderdataPython
|
11367532
|
"""Test suite for ashley receivers"""
import json
from django.test import TestCase
from django.urls import reverse
from machina.apps.forum_permission.shortcuts import assign_perm
from ashley import SESSION_LTI_CONTEXT_ID
from ashley.factories import LTIContextFactory, PostFactory, TopicFactory, UserFactory
class TestTrackTopicView(TestCase):
"""Test the track_topic_view receiver"""
def test_xapi_logger(self):
"""
When a topic is viewed, the test_track_topic receiver should emit an
XAPI event on the logger configured for the corresponding LTIConsumer.
"""
# Create a topic in a new forum
topic = TopicFactory()
for _ in range(42):
PostFactory(topic=topic)
# Create a user with access to this forum
user = UserFactory()
lti_context = LTIContextFactory(
lti_consumer=user.lti_consumer,
lti_id="course-v1:myschool+mathematics101+session01",
)
forum = topic.forum
forum.lti_contexts.add(lti_context)
assign_perm("can_read_forum", user, forum, True)
topic_url = reverse(
"forum_conversation:topic",
kwargs={
"forum_slug": topic.forum.slug,
"forum_pk": topic.forum.pk,
"slug": topic.slug,
"pk": topic.pk,
},
)
logger_name = f"xapi.{user.lti_consumer.slug}"
with self.assertLogs(logger=logger_name, level="INFO") as cm:
self.client.force_login(user, "ashley.auth.backend.LTIBackend")
session = self.client.session
session[SESSION_LTI_CONTEXT_ID] = lti_context.id
session.save()
response = self.client.get(topic_url, data={"page": 2})
self.assertEqual(response.status_code, 200)
# One line of debug should have been written
self.assertEqual(len(cm.output), 1)
# Extract XAPI statement from log output
log_prefix_len = len(f"{logger_name}:INFO:")
raw_xapi_event = cm.output[0][log_prefix_len:]
xapi_event = json.loads(raw_xapi_event)
# The XAPI event should have an ID
self.assertIn("id", xapi_event)
# Validate the actor part of the XAPI event
self.assertEqual("Agent", xapi_event["actor"]["objectType"])
self.assertEqual(
user.lti_consumer.url, xapi_event["actor"]["account"]["homePage"]
)
self.assertEqual(
user.public_username, xapi_event["actor"]["account"]["name"]
)
# Validate the verb
self.assertEqual(
"http://id.tincanapi.com/verb/viewed", xapi_event["verb"]["id"]
)
# Validate the activity
self.assertEqual(
f"id://ashley/topic/{topic.pk}", xapi_event["object"]["id"]
)
self.assertEqual("Activity", xapi_event["object"]["objectType"])
self.assertEqual(
"http://id.tincanapi.com/activitytype/discussion",
xapi_event["object"]["definition"]["type"],
)
# validate the activity definition extensions
expected_extensions = {
"https://w3id.org/xapi/acrossx/extensions/total-items": 42,
"https://w3id.org/xapi/acrossx/extensions/total-pages": 3,
}
self.assertEqual(
xapi_event["object"]["definition"]["extensions"], expected_extensions
)
# Validate the context
self.assertEqual(
f"uuid://{topic.forum.lti_id}",
xapi_event["context"]["contextActivities"]["parent"][0]["id"],
)
self.assertEqual(
"Activity",
xapi_event["context"]["contextActivities"]["parent"][0]["objectType"],
)
self.assertEqual(
xapi_event["context"]["extensions"],
{"http://www.risc-inc.com/annotator/extensions/page": 2},
)
self.assertEqual(
"http://id.tincanapi.com/activitytype/community-site",
xapi_event["context"]["contextActivities"]["parent"][0]["definition"][
"type"
],
)
self.assertEqual(
"course-v1:myschool+mathematics101+session01",
xapi_event["context"]["contextActivities"]["parent"][1]["id"],
)
self.assertEqual(
"Activity",
xapi_event["context"]["contextActivities"]["parent"][1]["objectType"],
)
self.assertEqual(
"http://adlnet.gov/expapi/activities/course",
xapi_event["context"]["contextActivities"]["parent"][1]["definition"][
"type"
],
)
|
StarcoderdataPython
|
11258547
|
import asyncio
import threading
import bleak
class BleakAdapter:
@staticmethod
def scan_toys(timeout: float = 5.0):
return asyncio.run(bleak.discover(timeout))
def __init__(self, address):
self.__event_loop = asyncio.new_event_loop()
self.__device = bleak.BleakClient(address, timeout=5.0)
self.__lock = threading.Lock()
self.__thread = threading.Thread(target=self.__event_loop.run_forever)
self.__thread.start()
try:
self.__execute(self.__device.connect())
except:
self.close(False)
raise
def __execute(self, coroutine):
with self.__lock:
return asyncio.run_coroutine_threadsafe(coroutine, self.__event_loop).result()
def close(self, disconnect=True):
if disconnect:
self.__execute(self.__device.disconnect())
with self.__lock:
self.__event_loop.call_soon_threadsafe(self.__event_loop.stop)
self.__thread.join()
self.__event_loop.close()
def set_callback(self, uuid, cb):
self.__execute(self.__device.start_notify(uuid, cb))
def write(self, uuid, data):
self.__execute(self.__device.write_gatt_char(uuid, data, True))
|
StarcoderdataPython
|
23158
|
<filename>awsshell/autocomplete.py
from __future__ import print_function
from awsshell.fuzzy import fuzzy_search
from awsshell.substring import substring_search
class AWSCLIModelCompleter(object):
"""Autocompletion based on the JSON models for AWS services.
This class consumes indexed data based on the JSON models from
AWS service (which we pull through botocore's data loaders).
"""
def __init__(self, index_data, match_fuzzy=True):
self._index = index_data
self._root_name = 'aws'
self._global_options = index_data[self._root_name]['arguments']
# These values mutate as autocompletions occur.
# They track state to improve the autocompletion speed.
self._current_name = 'aws'
self._current = index_data[self._root_name]
self._last_position = 0
self._current_line = ''
self.last_option = ''
# This will get populated as a command is completed.
self.cmd_path = [self._current_name]
self.match_fuzzy = match_fuzzy
@property
def arg_metadata(self):
# Returns the required arguments for the current level.
return self._current.get('argument_metadata', {})
def reset(self):
# Resets all the state. Called after a user runs
# a command.
self._current_name = self._root_name
self._current = self._index[self._root_name]
self._last_position = 0
self.last_option = ''
self.cmd_path = [self._current_name]
def autocomplete(self, line):
"""Given a line, return a list of suggestions."""
current_length = len(line)
self._current_line = line
if current_length == 1 and self._last_position > 1:
# Reset state. This is likely from a user completing
# a previous command.
self.reset()
elif current_length < self._last_position:
# The user has hit backspace. We'll need to check
# the current words.
return self._handle_backspace()
elif not line:
return []
elif current_length != self._last_position + 1:
return self._complete_from_full_parse()
# This position is important. We only update the _last_position
# after we've checked the special cases above where that value
# matters.
self._last_position = len(line)
if line and not line.strip():
# Special case, the user hits a space on a new line so
# we autocomplete all the top level commands.
return self._current['commands']
last_word = line.split()[-1]
if last_word in self.arg_metadata or last_word in self._global_options:
# The last thing we completed was an argument, record
# this as self.last_arg
self.last_option = last_word
if line[-1] == ' ':
# At this point the user has autocompleted a command
# or an argument and has hit space. If they've
# just completed a command, we need to change the
# current context and traverse into the subcommand.
# "ec2 "
# ^--here, need to traverse into "ec2"
#
# Otherwise:
# "ec2 --no-validate-ssl "
# ^-- here, stay on "ec2" context.
if not last_word.startswith('-'):
next_command = self._current['children'].get(last_word)
if next_command is not None:
self._current = next_command
self._current_name = last_word
self.cmd_path.append(self._current_name)
elif last_word in self.arg_metadata and \
self.arg_metadata[last_word]['example']:
# Then this is an arg with a shorthand example so we'll
# suggest that example.
return [self.arg_metadata[last_word]['example']]
# Even if we don't change context, we still want to
# autocomplete all the commands for the current context
# in either of the above two cases.
return self._current['commands'][:]
elif last_word.startswith('-'):
# TODO: cache this for the duration of the current context.
# We don't need to recompute this until the args are
# different.
all_args = self._get_all_args()
if self.match_fuzzy:
return fuzzy_search(last_word, all_args)
else:
return substring_search(last_word, all_args)
if self.match_fuzzy:
return fuzzy_search(last_word, self._current['commands'])
else:
return substring_search(last_word, self._current['commands'])
def _get_all_args(self):
if self._current['arguments'] != self._global_options:
all_args = self._current['arguments'] + self._global_options
else:
all_args = self._current['arguments']
return all_args
def _handle_backspace(self):
return self._complete_from_full_parse()
def _complete_from_full_parse(self):
# We try to avoid calling this, but this is necessary
# sometimes. In this scenario, we're resetting everything
# and starting from the very beginning and reparsing
# everything.
# This is a naive implementation for now. This
# can be optimized.
self.reset()
line = self._current_line
for i in range(1, len(self._current_line)):
self.autocomplete(line[:i])
return self.autocomplete(line)
def _autocomplete_options(self, last_word):
global_args = []
# Autocomplete argument names.
current_arg_completions = [
cmd for cmd in self._current['arguments']
if cmd.startswith(last_word)]
if self._current_name != self._root_name:
# Also autocomplete global arguments.
global_args = [
cmd for cmd in self._global_options if
cmd.startswith(last_word)]
return current_arg_completions + global_args
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.