id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
98478 | <filename>tools/disasm.py<gh_stars>0
import argparse
import subprocess
import re
FLINE_RE = re.compile(r'\s*/\*\w{4}\*/\s*([^;]*;)\s*/\* 0x(\w{16}) \*/\s*')
SLINE_RE = re.compile(r'\s*/\* 0x(\w{16}) \*/\s*')
FNAME_RE = re.compile(r'\s*Function : ([\w|\(|\)]+)\s*')
BRA_RE = re.compile(r'(.*BRA(?:\.U)? )(0x\w+);')
def parseCtrl(sline):
enc = int(SLINE_RE.match(sline).group(1), 16)
stall = (enc >> 41) & 0xf
yld = (enc >> 45) & 0x1
wrtdb = (enc >> 46) & 0x7
readb = (enc >> 49) & 0x7
watdb = (enc >> 52) & 0x3f
yld_str = 'Y' if yld == 0 else '-'
wrtdb_str = '-' if wrtdb == 7 else str(wrtdb)
readb_str = '-' if readb == 7 else str(readb)
watdb_str = '--' if watdb == 0 else f'{watdb:02d}'
return f'{watdb_str}:{readb_str}:{wrtdb_str}:{yld_str}:{stall:x}'
def processSassLines(fline, sline, labels):
asm = FLINE_RE.match(fline).group(1)
# Remove tailing space
if asm.endswith(" ;"):
asm = asm[:-2] + ";"
ctrl = parseCtrl(sline)
# BRA target address
if BRA_RE.match(asm) != None:
target = int(BRA_RE.match(asm).group(2), 16)
if target in labels:
pass
else:
labels[target] = len(labels)
return (f'{ctrl}', f'{asm}')
def extract(file_path, fun):
if fun == None:
sass_str = subprocess.check_output(["cuobjdump", "-sass", file_path])
else:
sass_str = subprocess.check_output(["cuobjdump", "-fun", fun, "-sass", file_path])
sass_lines = sass_str.splitlines()
line_idx = 0
while line_idx < len(sass_lines):
line = sass_lines[line_idx].decode()
# format:
# function : <function_name>
# .headerflags: ...
# /*0000*/ asmstr /*0x...*/
# /*0x...*/
fname_match = FNAME_RE.match(line)
# Looking for new function header (function: <name>)
while FNAME_RE.match(line) == None:
line_idx += 1
if line_idx < len(sass_lines):
line = sass_lines[line_idx].decode()
else:
return
fname = FNAME_RE.match(line).group(1)
print(f'Function:{fname}')
line_idx += 2 # bypass .headerflags
line = sass_lines[line_idx].decode()
# Remapping address to label
labels = {} # address -> label_idx
# store sass asm in buffer and them print them (for labels)
# (ctrl, asm)
asm_buffer = []
while FLINE_RE.match(line) != None:
# First line (Offset ASM Encoding)
fline = sass_lines[line_idx].decode()
line_idx += 1
# Second line (Encoding)
sline = sass_lines[line_idx].decode()
line_idx += 1
asm_buffer.append(processSassLines(fline, sline, labels))
# peek the next line
line = sass_lines[line_idx].decode()
# Print sass
# label naming convension: LBB#i
for idx, (ctrl, asm) in enumerate(asm_buffer):
# Print label if this is BRA target
offset = idx * 16
if offset in labels:
label_name = f'LBB{labels[offset]}'
print(f'{label_name}:')
print(ctrl, end='\t')
# if this is BRA, remap offset to label
if BRA_RE.match(asm):
target = int(BRA_RE.match(asm).group(2), 16)
target_name = f'LBB{labels[target]}'
asm = BRA_RE.sub(rf'\1{target_name};', asm)
print(asm)
print('\n')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="nv disasm")
parser.add_argument('file_path')
parser.add_argument('-fun', required=False,
help='Specify names of device functions whose fat binary structures must be dumped.')
args = parser.parse_args()
extract(args.file_path, args.fun)
| StarcoderdataPython |
127964 | <reponame>krakowiakpawel9/python_kurs<filename>02_struktury_danych/02_tuple.py
# -*- coding: utf-8 -*-
"""
@author: <EMAIL>
@site: e-smartdata.org
"""
empty_tuple = tuple()
print(empty_tuple)
# %%
amazon = ('Amazon', 'USA', 'Technology', 1)
google = ('Google', 'USA', 'Technology', 2)
# %%
name_google = google[0]
# %%
data = (amazon, google)
print(data)
# %%
a = ('Pawel', 'Krakowiak')
print(a)
# %%
imie = 'Pawel'
nazwisko = 'Krakowiak'
# %%
imie, nazwisko, id_user = ('Pawel', 'Krakowiak', '001')
# %%
amazon_name, country, sector, rank = amazon
# %%
stocks = 'Amazon', 'Apple', 'IBM'
print(type(stocks))
# %%
nested = 'Europa', 'Polska', ('Warszawa', 'Krakow', 'Wroclaw')
print(nested)
# %%
a = 12
b = 14
c = b
b = a
a = c
print(a, b)
# %%
x, y = 10, 15
x, y = y, x
print(x, y)
| StarcoderdataPython |
3316121 | <reponame>cducrest/eth-tester-rpc<filename>tests/integration/web3/threads.py
"""
A minimal implementation of the various gevent APIs used within this codebase.
"""
import threading
class ThreadWithReturn(threading.Thread):
def __init__(self, target=None, args=None, kwargs=None):
super().__init__(
target=target,
args=args or tuple(),
kwargs=kwargs or {},
)
self.target = target
self.args = args
self.kwargs = kwargs
def run(self):
self._return = self.target(*self.args, **self.kwargs)
def get(self, timeout=None):
self.join(timeout)
try:
return self._return
except AttributeError:
raise RuntimeError("Something went wrong. No `_return` property was set")
def spawn(target, *args, thread_class=ThreadWithReturn, **kwargs):
thread = thread_class(
target=target,
args=args,
kwargs=kwargs,
)
thread.daemon = True
thread.start()
return thread
| StarcoderdataPython |
38286 | import logging
import os
from figcli.config.style.color import Color
from figcli.io.input import Input
from figcli.svcs.config_manager import ConfigManager
from figcli.config.aws import *
from figcli.config.constants import *
log = logging.getLogger(__name__)
class AWSConfig:
"""
Utility methods for interacting with AWSCLI resources, such as the ~/.aws/credentials and ~/.aws/config files
"""
def __init__(self, color: Color = Color(False)):
self.init_files()
self.c = color
self._config = ConfigManager(AWS_CONFIG_FILE_PATH)
self._creds = ConfigManager(AWS_CREDENTIALS_FILE_PATH)
@staticmethod
def init_files():
os.makedirs(os.path.dirname(AWS_CREDENTIALS_FILE_PATH), exist_ok=True)
if not os.path.exists(AWS_CREDENTIALS_FILE_PATH):
with open(AWS_CREDENTIALS_FILE_PATH, "w+") as file:
file.write("")
if not os.path.exists(AWS_CONFIG_FILE_PATH):
with open(AWS_CONFIG_FILE_PATH, "w+") as file:
file.write("")
def _is_temporary_session(self, profile_name: str):
if self._creds.has_section(profile_name):
return self._creds.has_option(profile_name, AWS_CFG_TOKEN)
return False
def _backup_section(self, section: str):
backup_name, backup_profile = f'{section}-figgy-backup', f'profile {section}-figgy-backup'
profile_name = f'profile {section}'
if self._creds.has_section(section):
for opt in self._creds.options(section):
self._creds.set_config(backup_name, opt, self._creds.get_option(section, opt))
if self._config.has_section(profile_name):
for opt in self._config.options(profile_name):
self._config.set_config(backup_profile, opt, self._config.get_option(profile_name, opt))
def restore(self, profile_name: str) :
"""
Restore a credentials previously backed up by Figgy
"""
config_profile = f'profile {profile_name}'
backup_name, backup_profile = f'{profile_name}-figgy-backup', f'profile {profile_name}-figgy-backup'
creds_restored, config_restored = False, False
if self._creds.has_section(backup_name):
for opt in self._creds.options(backup_name):
self._creds.set_config(profile_name, opt, self._creds.get_option(backup_name, opt))
creds_restored = True
if self._config.has_section(backup_profile):
for opt in self._config.options(backup_profile):
self._config.set_config(config_profile, opt, self._config.get_option(backup_profile, opt))
config_restored = True
self._creds.delete(profile_name, AWS_CFG_TOKEN)
self._creds.save()
self._config.save()
if creds_restored and config_restored:
print(f"\n{self.c.fg_gr}Restoration successful!{self.c.rs}")
else:
print(f"\n{self.c.fg_yl}Unable to restore credentials. Profile: "
f"{self.c.fg_bl}[{backup_name}]{self.c.rs}{self.c.fg_yl} was not found in either the "
f"~/.aws/credentials or ~/.aws/config files.{self.c.rs}")
def write_credentials(self, access_key: str, secret_key: str, token: str, region: str,
profile_name: str = 'default') -> None:
"""
Overwrite credentials stored in the [default] profile in both ~/.aws/config and ~/.aws/credentials file
with the provided temporary credentials. This method also CREATES these files if they do not already exist.
"""
if not self._is_temporary_session(profile_name):
print(f"\n{self.c.fg_yl}Existing AWS Profile {self.c.fg_bl}[{profile_name}]{self.c.rs}{self.c.fg_yl} "
f"was found with long-lived access keys "
f"in file: {self.c.fg_bl}~/.aws/credentials{self.c.rs}{self.c.fg_yl}.\n"
f"To avoid overwriting these keys, they will be moved under profile: "
f"{self.c.rs}{self.c.fg_bl}[{profile_name}-figgy-backup]{self.c.rs}{self.c.fg_yl}.{self.c.rs}\n\n"
f"These old keys may be restored with: {self.c.fg_bl}`"
f"{CLI_NAME} iam restore`{self.c.rs}.")
self._backup_section(profile_name)
self._creds.set_config(profile_name, AWS_CFG_ACCESS_KEY_ID, access_key)
self._creds.set_config(profile_name, AWS_CFG_SECRET_KEY, secret_key)
self._creds.set_config(profile_name, AWS_CFG_TOKEN, token)
config_section = f'profile {profile_name}'
self._config.set_config(config_section, AWS_CFG_REGION, region)
self._config.set_config(config_section, AWS_CFG_OUTPUT, 'json')
print(f"\n\n{self.c.fg_gr}Successfully updated: {AWS_CREDENTIALS_FILE_PATH}{self.c.rs}")
print(f"{self.c.fg_gr}Successfully updated: {AWS_CONFIG_FILE_PATH}{self.c.rs}")
| StarcoderdataPython |
3326436 | <reponame>mahmoudimus/cosmosquest-ng
from glob import glob
from os.path import basename
from os.path import splitext
import setuptools
setuptools.setup(
name='kosmosquest-ng',
version='0.1-beta',
url='https://github.com/mahmoudimus/kosmosquest-ng',
license='Apache License 2.0',
author='mahmoudimus',
author_email='mahmoud - @ - linux.com',
description='kosmosquest-ng -- an open source implementation of cosmos quest',
packages=setuptools.find_packages('.'),
package_dir={'': '.'},
py_modules=[splitext(basename(path))[0] for path in glob('./*.py')],
include_package_data=True,
zip_safe=False,
platforms='any',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache License 2.0',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
]
) | StarcoderdataPython |
4825149 | <filename>solutions/513_find_bottom_left_tree_value.py
class Solution:
def findBottomLeftValue(self, root: TreeNode) -> int:
"""BFS.
Running time: O(n) where n is the total number of nodes in the tree.
"""
lvl = [root]
while True:
nlvl = []
for node in lvl:
if node.left:
nlvl.append(node.left)
if node.right:
nlvl.append(node.right)
if not nlvl:
return lvl[0].val
lvl = nlvl
| StarcoderdataPython |
1706455 | from bitrix24_bridge.handlers.base import BaseModelHandler
from bitrix24_bridge.models import ProductBX
class ProductHandler(BaseModelHandler):
model = ProductBX
| StarcoderdataPython |
1694768 | import torch
import torch.nn as nn
from torch import Tensor as Tensor
import torch._C as _C
class BoundedTensor(Tensor):
@staticmethod
# We need to override the __new__ method since Tensor is a C class
def __new__(cls, x, ptb, *args, **kwargs):
if isinstance(x, Tensor):
tensor = super().__new__(cls, [], *args, **kwargs)
tensor.data = x.data
tensor.requires_grad = x.requires_grad
return tensor
else:
return super().__new__(cls, x, *args, **kwargs)
def __init__(self, x, ptb):
self.ptb = ptb
def __repr__(self):
if hasattr(self, 'ptb') and self.ptb is not None:
return '<BoundedTensor: {}, {}>'.format(super().__repr__(), self.ptb.__repr__())
else:
return '<BoundedTensor: {}, no ptb>'.format(super().__repr__())
def clone(self, *args, **kwargs):
tensor = BoundedTensor(super().clone(*args, **kwargs), self.ptb)
return tensor
def _func(self, func, *args, **kwargs):
temp = func(*args, **kwargs)
new_obj = BoundedTensor([], self.ptb)
new_obj.data = temp.data
new_obj.requires_grad = temp.requires_grad
return new_obj
# Copy to other devices with perturbation
def to(self, *args, **kwargs):
return self._func(super().to, *args, **kwargs)
@classmethod
def _convert(cls, ret):
if cls is Tensor:
return ret
if isinstance(ret, Tensor):
if True:
# The current implementation does not seem to need non-leaf BoundedTensor
return ret
else:
# Enable this branch if non-leaf BoundedTensor should be kept
ret = ret.as_subclass(cls)
if isinstance(ret, tuple):
ret = tuple(cls._convert(r) for r in ret)
return ret
if torch.__version__ >= '1.7':
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
if not all(issubclass(cls, t) for t in types):
return NotImplemented
with _C.DisableTorchFunction():
ret = func(*args, **kwargs)
return cls._convert(ret)
class BoundedParameter(nn.Parameter):
def __new__(cls, data, ptb, requires_grad=True):
return BoundedTensor._make_subclass(cls, data, requires_grad)
def __init__(self, data, ptb, requires_grad=True):
self.ptb = ptb
self.requires_grad = requires_grad
def __deepcopy__(self, memo):
if id(self) in memo:
return memo[id(self)]
else:
result = type(self)(self.data.clone(), self.ptb, self.requires_grad)
memo[id(self)] = result
return result
def __repr__(self):
return 'BoundedParameter containing:\n{}\n{}'.format(
self.data.__repr__(), self.ptb.__repr__())
def __reduce_ex__(self, proto):
raise NotImplementedError
| StarcoderdataPython |
1758448 | <gh_stars>0
# !/usr/bin/env python3
# Author: C.K
# Email: <EMAIL>
# DateTime:2021-09-19 13:32:45
# Description:
class Solution:
def topKFrequent(self, nums: List[int], k: int) -> List[int]:
bucket = [[] for _ in range(len(nums) + 1)]
Count = Counter(nums).items()
for num, freq in Count:
bucket[freq].append(num)
flat_list = list(chain(*bucket))
return flat_list[::-1][:k]
if __name__ == "__main__":
pass
| StarcoderdataPython |
3393307 | from vk_api.execute import VkFunction
def test_execute(vk):
func_add = VkFunction('return %(x)s + %(y)s;', args=('x', 'y'))
func_get = VkFunction(
'return API.users.get(%(values)s)[0]["id"];',
args=('values',)
)
assert func_add(vk, 2, 6) == 8
assert func_get(vk, {'user_ids': 'durov'}) == 1
| StarcoderdataPython |
1722429 | <reponame>yasiupl/PSI
#!/bin/python3
import socket
import time
import struct
TCP_IP = '10.200.200.1'
TCP_PORT = 1338
BUFFER_SIZE = 1024
mean = 0
max = 0
min = 100
n = 0
N = 10
print("Measuring TCP roundtrip time to/from {0}".format(str(TCP_IP)))
while n < N:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((TCP_IP, TCP_PORT))
start = time.time()
s.send(struct.pack('!d', start))
data = s.recv(BUFFER_SIZE)
end = time.time()
s.close()
roundtrip_ms = (end - struct.unpack('!d', data)[0]) * 1000
print("[TCP {0}/{1}] Roundtrip time: {2:.2f}ms".format(n+1, N, roundtrip_ms))
mean += roundtrip_ms/N
if roundtrip_ms > max:
max = roundtrip_ms
if roundtrip_ms < min:
min = roundtrip_ms
n = n + 1
time.sleep(1)
print("Min: {0:.2f}, Mean: {1:.2f}, Max: {2:.2f}".format(min,mean,max)) | StarcoderdataPython |
1663380 | """
Project: RadarBook
File: right_circular_cone.py
Created by: <NAME>
One: 11/24/2018
Created with: PyCharm
Copyright (C) 2019 Artech House (<EMAIL>)
This file is part of Introduction to Radar Using Python and MATLAB
and can not be copied and/or distributed without the express permission of Artech House.
"""
from numpy import sin, cos, exp, sqrt
from scipy.constants import c, pi
def radar_cross_section(frequency, cone_half_angle, base_radius, incident_angle):
"""
Calculate the radar cross section of a right circular cone.
:param frequency: The operating frequency (Hz).
:param cone_half_angle: The cone half angle (rad).
:param base_radius: The base radius (m).
:param incident_angle: The incident angle (rad).
:return: The radar cross section of a right circular cone (m^2).
"""
# Wavelength
wavelength = c / frequency
# Wavenumber
k = 2.0 * pi / wavelength
# Parameter "n"
n = 1.5 + cone_half_angle / pi
# Common factor
if incident_angle != 0.0:
value = (wavelength ** 2 * k * base_radius) / (4.0 * pi ** 2) * (sin(pi / n) / n) ** 2 / sin(incident_angle)
# Special case values
term1 = 1.0 / (cos(pi / n) - cos(3.0 * pi / n))
term2 = sin(pi / n) * exp(1j * (2.0 * k * base_radius - pi / 4.0)) / \
(n * sqrt(pi * k * base_radius) * (cos(pi / n) - cos(3.0 * pi / (2.0 * n))) ** 2)
nose_max = (wavelength ** 2 / pi) * (k * base_radius * sin(pi / n) / n) ** 2 * abs(term1 + term2) ** 2
spec_max = wavelength ** 2 * 8.0 * pi / 9.0 * (base_radius / wavelength) ** 3 / \
(sin(cone_half_angle) ** 2 * cos(cone_half_angle))
base_max = wavelength ** 2 * (k * base_radius) ** 4 / (4.0 * pi)
# Calculate the radar cross section
if incident_angle < 1e-6:
# Nose on, double diffraction on base
term1 = 1.0 / (cos(pi / n) - cos(3.0 * pi / n))
term2 = sin(pi / n) * exp(1j * (2.0 * k * base_radius - pi / 4.0)) / \
(n * sqrt(pi * k * base_radius) * (cos(pi/n) - cos(3.0 * pi / (2.0 * n))) ** 2)
rcs_vv = (wavelength ** 2 / pi) * (k * base_radius * sin(pi / n) / n) ** 2 * abs(term1 + term2) ** 2
rcs_hh = rcs_vv
elif abs(incident_angle - pi) < 1e-6:
# Base specular
rcs_vv = wavelength ** 2 * (k * base_radius) ** 4 / (4.0 * pi)
rcs_hh = rcs_vv
elif abs(incident_angle - (0.5 * pi - cone_half_angle)) < 1e-6:
# Normal to the generator of the cone
rcs_vv = wavelength ** 2 * 8.0 * pi / 9.0 * (base_radius / wavelength) ** 3 / \
(sin(cone_half_angle) ** 2 * cos(cone_half_angle))
rcs_hh = rcs_vv
elif 0.0 < incident_angle < cone_half_angle:
term1 = exp(1j * (2.0 * k * base_radius * sin(incident_angle) - pi / 4.0))
term2 = 1.0 / (cos(pi / n) - 1.0) - 1.0 / (cos(pi / n) - cos((3.0 * pi - 2.0 * incident_angle) / n))
term3 = 1.0 / (cos(pi / n) - 1.0) + 1.0 / (cos(pi / n) - cos((3.0 * pi - 2.0 * incident_angle) / n))
term4 = exp(-1j*(2.0 * k * base_radius * sin(incident_angle) - pi / 4.0))
term5 = 1.0 / (cos(pi / n) - 1.0) - 1.0 / (cos(pi / n) - cos((3.0 * pi + 2.0 * incident_angle) / n))
term6 = 1.0 / (cos(pi / n) - 1.0) + 1.0 / (cos(pi / n) - cos((3.0 * pi + 2.0 * incident_angle) / n))
rcs_vv = value * abs(term1 * term2 + term4 * term5) ** 2
rcs_hh = value * abs(term1 * term3 + term4 * term6) ** 2
if rcs_vv > nose_max:
rcs_vv = nose_max
if rcs_hh > nose_max:
rcs_hh = nose_max
elif cone_half_angle <= incident_angle < 0.5 * pi:
term1 = 1.0 / (cos(pi / n) - 1.0) - 1.0 / (cos(pi / n) - cos((3.0 * pi - 2.0 * incident_angle) / n))
term2 = 1.0 / (cos(pi / n) - 1.0) + 1.0 / (cos(pi / n) - cos((3.0 * pi - 2.0 * incident_angle) / n))
rcs_vv = value * term1 ** 2
rcs_hh = value * term2 ** 2
if rcs_vv > 0.8 * spec_max:
rcs_vv = spec_max * cos(25 * (incident_angle - (0.5 * pi - cone_half_angle)))
if rcs_hh > 0.8 * spec_max:
rcs_hh = spec_max * cos(25 * (incident_angle - (0.5 * pi - cone_half_angle)))
elif 0.5 * pi <= incident_angle < pi:
term1 = exp(1j * (2.0 * k * base_radius * sin(incident_angle) - pi / 4.0))
term2 = 1.0 / (cos(pi / n) - 1.0) - 1.0 / (cos(pi / n) - cos((3.0 * pi - 2.0 * incident_angle) / n))
term3 = 1.0 / (cos(pi / n) - 1.0) + 1.0 / (cos(pi / n) - cos((3.0 * pi - 2.0 * incident_angle) / n))
term4 = exp(-1j * (2.0 * k * base_radius * sin(incident_angle) - pi / 4.0))
term5 = 1.0 / (cos(pi / n) - 1.0) - 1.0 / (cos(pi / n) - cos((2.0 * incident_angle - pi) / n))
term6 = 1.0 / (cos(pi / n) - 1.0) + 1.0 / (cos(pi / n) - cos((2.0 * incident_angle - pi) / n))
rcs_vv = value * abs(term1 * term2 + term4 * term5) ** 2
rcs_hh = value * abs(term1 * term3 + term4 * term6) ** 2
if rcs_vv > base_max:
rcs_vv = base_max
if rcs_hh > base_max:
rcs_hh = base_max
return rcs_vv, rcs_hh
| StarcoderdataPython |
92429 | from .xinput import XInputJoystick as Joystick
__all__ = ['Joystick']
| StarcoderdataPython |
120128 | import praw
# Reddit developer credentials
reddit = praw.Reddit(client_id="", client_secret="", username="", password="", user_agent="")
# Instagram password and username
IGusername = ""
IGpassword = ""
| StarcoderdataPython |
1715389 | <filename>FlowNetAPI.py<gh_stars>0
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.autograd import Variable
from tensorboardX import SummaryWriter
from matplotlib import pyplot as plt
from scipy import misc
import argparse, os, sys, subprocess
import setproctitle, colorama
import numpy as np
from tqdm import tqdm
from glob import glob
from os.path import *
import json
from bunch import Bunch
import source.models, source.losses, source.datasets
from utils import flow_utils, tools
import utils.frame_utils as frame_utils
import torch.utils.data as data
from source.datasets import StaticRandomCrop as StaticRandomCrop
from source.datasets import StaticCenterCrop as StaticCenterCrop
from networks.resample2d_package.modules.resample2d import Resample2d
from networks.channelnorm_package.modules.channelnorm import ChannelNorm
from networks.submodules import *
class FlowNet2API(object):
def __init__(self, name_model="FlowNet2"):
self.name_model = name_model
self.get_config()
self.load_model()
def flow_estimate(self, img0, img1, save_flow=True, flow_visualize=False, is_cropped =False, inference_batch_size = 1):
self.mode = 'flow_estimation'
self.args.save_flow = save_flow
self.flow_visualize = flow_visualize
self.args.inference_batch_size = inference_batch_size
self.args.is_cropped = is_cropped
data = self.flow_dataloader(img0, img1)
self.inference(data, self.model_and_loss, offset=1)
def flow_warping(self,img0, img1, flow, is_cropped =False, warping_batch_size = 1):
self.mode = 'warping'
self.args.inference_batch_size = warping_batch_size
self.args.is_cropped = is_cropped
data = self.flow_dataloader( img0, img1,flow)
self.inference(data, self.warping_model, offset=1)
def flow_dataloader(self, img0, img1=0, flow=0):
dataset = FlowDataset(self.args, img0, img1,flow, mode=self.mode)
inference_dataloader = DataLoader(dataset, batch_size=self.args.inference_batch_size, shuffle=False, **self.gpuargs)
return inference_dataloader
def inference(self, data_loader, model, offset=0,):
model.eval()
if self.mode == 'flow_estimation':
if (self.args.save_flow):
flow_folder = self.args.inference_dir
if not os.path.exists(flow_folder):
os.makedirs(flow_folder)
self.args.inference_n_batches = np.inf if self.args.inference_n_batches < 0 else self.args.inference_n_batches
progress = tqdm(data_loader, ncols=100, total=np.minimum(len(data_loader), self.args.inference_n_batches),
desc='Inferencing ',
leave=True, position=offset)
for batch_idx, (data, target) in enumerate(progress):
if self.args.cuda:
data, target = [d.cuda(async=True) for d in data], [t.cuda(async=True) for t in target]
data, target = [Variable(d, volatile=True) for d in data], [Variable(t, volatile=True) for t in target]
if self.mode == 'flow_estimation':
losses, output = model(data[0], target[0], inference=True)
if self.args.save_flow:
for i in range(self.args.inference_batch_size):
_pflow = output[i].data.cpu().numpy().transpose(1, 2, 0)
flow_utils.writeFlow(join(flow_folder, '%06d.flo' % (batch_idx * self.args.inference_batch_size + i)),
_pflow)
if self.flow_visualize:
flowX = _pflow[:, :, 0]
plt.imshow(flowX)
plt.savefig(fname= join(flow_folder, '%06d_x.png' % (batch_idx * self.args.inference_batch_size + i)))
flowY = _pflow[:, :, 1]
plt.imshow(flowY)
plt.savefig(
fname=join(flow_folder, '%06d_y.png' % (batch_idx * self.args.inference_batch_size + i)))
elif self.mode == 'warping':
warped_data, losses = model(data[0], target[0])
for i in range(self.args.inference_batch_size):
warped_data = warped_data[i].data.cpu().numpy().transpose(1, 2, 0)
misc.imsave('warped_image' + str(batch_idx) + '.png', warped_data)
progress.update(1)
progress.close()
return
def get_config(self):
json_file = "config.json"
with open(json_file, 'r') as config_file:
config_dict = json.load(config_file)
# convert the dictionary to a namespace using bunch lib
self.args = Bunch(config_dict)
self.args.name_model = self.name_model
self.args.model = self.name_model.replace('-', '')
self.args.resume = os.path.join(self.args.dir_checkpoints,(self.args.name_model + "_checkpoint.pth.tar"))
self.args.model_class = tools.module_to_dict(source.models)[self.args.model]
self.args.loss_class = tools.module_to_dict(source.losses)[self.args.loss]
self.args.cuda = not self.args.no_cuda and torch.cuda.is_available()
self.args.inference_dir = os.path.join("{}/inference".format(self.args.save), self.args.name_model)
def load_model(self):
self.gpuargs = {'num_workers': self.args.number_workers, 'pin_memory': True} if self.args.cuda else {}
with tools.TimerBlock("Building {} model".format(self.args.model)) as block:
class ModelAndLoss(nn.Module):
def __init__(self, args):
super(ModelAndLoss, self).__init__()
kwargs = tools.kwargs_from_args(args, 'model')
self.model = args.model_class(args, **kwargs)
kwargs = tools.kwargs_from_args(args, 'loss')
self.loss = args.loss_class(args, **kwargs)
def forward(self, data, target, inference=False):
output = self.model(data)
loss_values = self.loss(output, target)
if not inference:
return loss_values
else:
return loss_values, output
self.model_and_loss = ModelAndLoss(self.args)
# block.log('Effective Batch Size: {}'.format(self.args.effective_batch_size))
block.log('Number of parameters: {}'.format(
sum([p.data.nelement() if p.requires_grad else 0 for p in self.model_and_loss.parameters()])))
## # assing to cuda or wrap with dataparallel, model and loss
if self.args.cuda and (self.args.number_gpus > 0) and self.args.fp16:
block.log('Parallelizing')
model_and_loss = nn.parallel.DataParallel(self.model_and_loss, device_ids=list(range(self.args.number_gpus)))
block.log('Initializing CUDA')
model_and_loss = model_and_loss.cuda().half()
torch.cuda.manual_seed(self.args.seed)
param_copy = [param.clone().type(torch.cuda.FloatTensor).detach() for param in
model_and_loss.parameters()]
elif self.args.cuda and self.args.number_gpus > 0:
block.log('Initializing CUDA')
model_and_loss = self.model_and_loss.cuda()
block.log('Parallelizing')
model_and_loss = nn.parallel.DataParallel(model_and_loss, device_ids=list(range(self.args.number_gpus)))
torch.cuda.manual_seed(self.args.seed)
else:
block.log('CUDA not being used')
torch.manual_seed(self.args.seed)
cwd = os.getcwd()
print(cwd)
# Load weights if needed, otherwise randomly initialize
if self.args.resume and os.path.isfile(self.args.resume):
block.log("Loading checkpoint '{}'".format(self.args.resume))
checkpoint = torch.load(self.args.resume)
# if (not args.inference) and (not args.test):
# args.start_epoch = checkpoint['epoch']
# best_err = checkpoint['best_EPE']
model_and_loss.module.model.load_state_dict(checkpoint['state_dict'])
block.log("Loaded checkpoint '{}' (at epoch {})".format(self.args.resume, checkpoint['epoch']))
elif self.args.resume:
block.log("No checkpoint found at '{}'".format(self.args.resume))
quit()
else:
block.log("Random initialization")
block.log("Initializing save directory: {}".format(self.args.save))
if not os.path.exists(self.args.save):
os.makedirs(self.args.save)
self.warping_model = FlowWarping()
print("Warping Model initialized")
class FlowWarping(nn.Module):
def __init__(self):
super(FlowWarping, self).__init__()
self.channelnorm = ChannelNorm()
self.resample1 = Resample2d()
def forward(self, input, target):
img1 = input[:,:,0,:,:]
img0 = input[:,:,1,:,:]
flow = target
frame_size = img0.shape
resampled_img1 = self.resample1(img1, flow)
diff_img0 = img0 - resampled_img1
norm_diff_img0 = self.channelnorm(diff_img0) / (frame_size[1] * frame_size[2])
return resampled_img1, norm_diff_img0
class FlowDataset(data.Dataset):
def __init__(self, args, img0, img1=0, flow=0, mode='flow_estimation'):
self.args = args
self.is_cropped = self.args.is_cropped
self.crop_size = self.args.crop_size
self.render_size = self.args.inference_size
self.mode = mode
self.image_list = []
self.flow_list = []
self.frame_size = img0[0].shape
for i in range(len(img0)):
if self.mode == 'flow_estimation':
self.flow_size = img0[i][:, :, 0:2].shape
flow_ = np.zeros(self.flow_size, dtype=float)
elif self.mode == 'warping':
self.img_size = img0[i].shape
img1[i] = np.zeros(self.img_size, dtype=float)
flow_ = flow[i]
self.image_list += [[img0[i], img1[i]]]
self.flow_list += [flow_]
self.size = len(self.image_list)
if (self.render_size[0] < 0) or (self.render_size[1] < 0) or (self.frame_size[0] % 64) or (
self.frame_size[1] % 64):
self.render_size[0] = ((self.frame_size[0]) / 64) * 64
self.render_size[1] = ((self.frame_size[1]) / 64) * 64
args.inference_size = self.render_size
assert (len(self.image_list) == len(self.flow_list))
def __getitem__(self, index):
index = index % self.size
img1 = self.image_list[index][0]
img2 = self.image_list[index][1]
flow = self.flow_list[index]
images = [img1, img2]
image_size = img1.shape[:2]
if self.is_cropped:
cropper = StaticRandomCrop(image_size, self.crop_size)
else:
cropper = StaticCenterCrop(image_size, self.render_size)
images = map(cropper, images)
if self.mode == 'flow_estimation':
flow = cropper(flow)
images = np.array(images).transpose(3, 0, 1, 2)
flow = flow.transpose(2, 0, 1)
images = torch.from_numpy(images.astype(np.float32))
flow = torch.from_numpy(flow.astype(np.float32))
return [images], [flow]
def __len__(self):
return self.size | StarcoderdataPython |
3233607 | """The driver for the opf kpoint selection.
This subroutin requires a POSCAR to be present and a GRIDGEN file to
be present.
The POSCAR should be in the standard VASP format. The GRIDGEN file
should specify the target k-point density.
"""
import os
import numpy as np
from opf_python.universal import find_srBs
if not os.path.isfile("POSCAR"):
print("ERROR: The POSCAR file must be provided.")
exit()
if not os.path.isfile("GRIDGEN"):
print("ERROR: The GRIDGEN file must be provided.")
exit()
#Find A from the POSCAR
with open("POSCAR","r") as f:
f.readline()
f.readline()
for i in range(3):
A.append(float(i) for i in f.readline().strip().split())
A = np.transpose(A)
#Read the k-point density from file.
with open("GRIDGEN","r") as f:
target = int(f.readline().strip())
Bs = find_srBs(A,target)
#now we want to find the grid vectors
grids = []
ns = []
for B in Bs:
grids.append(np.transpose(np.linalg.inv(B)))
ns.apppend(np.linalg.det(B)/np.linalg.det(A))
#now I want to find the packing fraction and the r_min for each of these grids.
from phenum.vector_utils import _minkowski_reduce_basis
rmin = []
pack_frac = []
for grid in grids:
min_grid = _minkowski_reduce_basis(grid)
rm = min(np.linalg.norm(min_grid,axis=0))
pf = 4/3.*np.pi*rm**2/np.dot(min_grid[0],np.cross(min_grid[1],min_grid[2]))
rmin.append(rm)
pack_frac.append(pf)
| StarcoderdataPython |
48708 | import os, queue
from tablet import Tablet
f = open(os.path.join(os.path.dirname(__file__), '../input/18/part1.txt'), 'r')
def main():
instructionStrings = []
line = f.readline()
while line:
instructionStrings.append(line.rstrip())
line = f.readline()
q0 = queue.Queue()
q1 = queue.Queue()
t0 = Tablet(instructionStrings, 0, q0, q1)
t1 = Tablet(instructionStrings, 1, q1, q0)
isDeadlock = False
while not isDeadlock:
t0.run()
t1.run()
if t0.isWaiting() and t1.isWaiting():
isDeadlock = True
print(t1.getTimesSent())
if __name__ == '__main__':
main()
| StarcoderdataPython |
3373392 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-01-24 17:03
from __future__ import unicode_literals
import colorfield.fields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_countries.fields
import filer.fields.image
import parler.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.FILER_IMAGE_MODEL),
('sites', '0002_alter_domain_unique'),
]
operations = [
migrations.CreateModel(
name='Accessibility',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('accessKey', models.CharField(blank=True, help_text='Remaps the accesskey that the module is assigned to', max_length=512, null=True)),
('highlightFocus', models.NullBooleanField(help_text='Determines if the module should use more accentuated styling to highlight elements in focus')),
],
),
migrations.CreateModel(
name='Branding',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='A Unique identifier for this branding', max_length=255)),
('fontFamily', models.CharField(blank=True, help_text='CSS font-family for the entire module e.g. Arial, sans-serif', max_length=255, null=True)),
('fontSizeTitle', models.CharField(blank=True, help_text='CSS font-size for the main (h2) title e.g. 1.2em', max_length=255, null=True)),
('fontSizeHeaders', models.CharField(blank=True, help_text='CSS font-size for the sub (h3) titles e.g. 1em', max_length=255, null=True)),
('fontSize', models.CharField(blank=True, help_text='CSS font-size throughout the module e.g. 0.8em', max_length=255, null=True)),
('fontColor', colorfield.fields.ColorField(blank=True, help_text='CSS color for all text and icons within the module.', max_length=18, null=True)),
('backgroundColor', colorfield.fields.ColorField(blank=True, help_text='CSS background-color within the module.', max_length=18, null=True)),
('acceptText', colorfield.fields.ColorField(blank=True, help_text="CSS color for text of the module's primary 'accept' buttons", max_length=18, null=True)),
('acceptBackground', colorfield.fields.ColorField(blank=True, help_text="CSS background-color for the module's primary 'accept' buttons", max_length=18, null=True)),
('toggleText', colorfield.fields.ColorField(blank=True, help_text="CSS color for the toggle button's text", max_length=18, null=True)),
('toggleColor', colorfield.fields.ColorField(blank=True, help_text='CSS background-color for the movable part of the toggle slider', max_length=18, null=True)),
('toggleBackground', colorfield.fields.ColorField(blank=True, help_text='CSS background-color for the toggle background.', max_length=18, null=True)),
('alertText', colorfield.fields.ColorField(blank=True, help_text='CSS color within the alert areas', max_length=18, null=True)),
('alertBackground', colorfield.fields.ColorField(blank=True, help_text='CSS background-color to highlight the alert areas', max_length=18, null=True)),
('buttonIconWidth', models.CharField(blank=True, help_text='If buttonIcon exists, the css width', max_length=255, null=True)),
('buttonIconHeight', models.CharField(blank=True, help_text='If buttonIcon exists, the css height', max_length=255, null=True)),
('removeIcon', models.NullBooleanField(help_text="Whether or not to remove the button icon entirely. Please note, if you do this, you will need to present an alternative trigger to run the module's open() function.")),
('removeAbout', models.NullBooleanField(help_text="Whether or not to remove the 'About this tool' link")),
('buttonIcon', filer.fields.image.FilerImageField(blank=True, help_text='Icon in the bottom corner', null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.FILER_IMAGE_MODEL)),
],
),
migrations.CreateModel(
name='CallbackFunction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('automated_prefix', models.CharField(blank=True, max_length=512, null=True)),
('function_text', models.TextField(blank=True, help_text='JavaScript function, e.g. "function () {...}" ', null=True)),
],
),
migrations.CreateModel(
name='Cookie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=512, unique=True)),
],
),
migrations.CreateModel(
name='CookieControl',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('apiKey', models.CharField(max_length=255)),
('product', models.CharField(choices=[(b'COMMUNITY', b'COMMUNITY'), (b'PRO', b'PRO'), (b'PRO_MULTISITE', b'PRO_MULTISITE')], max_length=255)),
('position', models.CharField(blank=True, choices=[(b'LEFT', b'LEFT'), (b'RIGHT', b'RIGHT')], max_length=255, null=True)),
('theme', models.CharField(blank=True, choices=[(b'LIGHT', b'LIGHT'), (b'DARK', b'DARK')], max_length=255, null=True)),
('toggleType', models.CharField(blank=True, choices=[(b'slider', b'slider'), (b'checkbox', b'checkbox')], max_length=255, null=True)),
('closeStyle', models.CharField(blank=True, choices=[(b'icon', b'icon'), (b'labelled', b'labelled'), (b'button', b'button')], max_length=255, null=True)),
('consentCookieExpiry', models.PositiveIntegerField(blank=True, default=90, null=True)),
('logConsent', models.NullBooleanField()),
('notifyOnce', models.NullBooleanField()),
('initialState', models.CharField(blank=True, choices=[(b'open', b'open'), (b'closed', b'closed'), (b'notify', b'notify')], max_length=255, null=True)),
('layout', models.CharField(blank=True, choices=[(b'slideout', b'slideout'), (b'popup', b'popup')], max_length=255, null=True)),
('excludedCountries', django_countries.fields.CountryField(blank=True, max_length=746, multiple=True)),
('accessibility', models.ForeignKey(blank=True, help_text='Determines the accessibility helpers available.', null=True, on_delete=django.db.models.deletion.CASCADE, to='django_cookie_control.Accessibility')),
('branding', models.ForeignKey(blank=True, help_text="Set all aspects of the module's styling, and remove any back links to CIVIC.", null=True, on_delete=django.db.models.deletion.SET_NULL, to='django_cookie_control.Branding')),
('necessaryCookies', models.ManyToManyField(blank=True, help_text="List of cookie names necessary for your website's core functionality,", to='django_cookie_control.Cookie')),
('onLoad', models.ForeignKey(blank=True, help_text='Defines a function to be triggered after the module initiates.', null=True, on_delete=django.db.models.deletion.SET_NULL, to='django_cookie_control.CallbackFunction')),
],
options={
'verbose_name': 'Cookie Control',
'verbose_name_plural': 'Cookie Controls',
},
),
migrations.CreateModel(
name='PurposeObject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('recommendedState', models.NullBooleanField(help_text='Should category be accepted (opted in)')),
('lawfulBasis', models.CharField(blank=True, choices=[(b'consent', b'Consent'), (b'legitimate interest', b'Legitimate Interest')], help_text='requires explicit user consent', max_length=255, null=True)),
('cookies', models.ManyToManyField(blank=True, to='django_cookie_control.Cookie')),
('onAccept', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='accept_function', to='django_cookie_control.CallbackFunction')),
('onRevoke', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='revoke_function', to='django_cookie_control.CallbackFunction')),
],
options={
'verbose_name': 'Optional Cookie Control',
'verbose_name_plural': 'Optional Cookie Controls',
},
bases=(parler.models.TranslatableModelMixin, models.Model),
),
migrations.CreateModel(
name='PurposeObjectTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language_code', models.CharField(db_index=True, max_length=15, verbose_name='Language')),
('name', models.CharField(max_length=255, verbose_name='A Unique identifier for the category')),
('label', models.CharField(blank=True, help_text='Descriptive Title', max_length=512, null=True)),
('description', models.TextField(blank=True, help_text='Text introducing the privacy statement', null=True)),
('master', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='django_cookie_control.PurposeObject')),
],
options={
'managed': True,
'db_table': 'django_cookie_control_purposeobject_translation',
'db_tablespace': '',
'default_permissions': (),
'verbose_name': 'Optional Cookie Control Translation',
},
),
migrations.CreateModel(
name='Statement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url', models.CharField(blank=True, help_text='URL where your terms may be accessed', max_length=512, null=True)),
('updated', models.DateField(blank=True, help_text='Terms updated date', null=True)),
],
options={
'abstract': False,
},
bases=(parler.models.TranslatableModelMixin, models.Model),
),
migrations.CreateModel(
name='StatementTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language_code', models.CharField(db_index=True, max_length=15, verbose_name='Language')),
('name', models.CharField(max_length=255)),
('description', models.TextField(blank=True, help_text='Text introducing the privacy statement', null=True)),
('master', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='django_cookie_control.Statement')),
],
options={
'managed': True,
'db_table': 'django_cookie_control_statement_translation',
'db_tablespace': '',
'default_permissions': (),
'verbose_name': 'statement Translation',
},
),
migrations.CreateModel(
name='TextValue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'abstract': False,
},
bases=(parler.models.TranslatableModelMixin, models.Model),
),
migrations.CreateModel(
name='TextValueTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language_code', models.CharField(db_index=True, max_length=15, verbose_name='Language')),
('title', models.TextField(blank=True, null=True)),
('intro', models.TextField(blank=True, null=True)),
('acceptRecommended', models.TextField(blank=True, null=True)),
('necessaryTitle', models.TextField(blank=True, null=True)),
('necessaryDescription', models.TextField(blank=True, null=True)),
('thirdPartyTitle', models.TextField(blank=True, null=True)),
('thirdPartyDescription', models.TextField(blank=True, null=True)),
('on', models.TextField(blank=True, null=True)),
('off', models.TextField(blank=True, null=True)),
('notifyTitle', models.TextField(blank=True, null=True)),
('notifyDescription', models.TextField(blank=True, null=True)),
('accept', models.TextField(blank=True, null=True)),
('settings', models.TextField(blank=True, null=True)),
('closeLabel', models.TextField(blank=True, null=True)),
('accessibilityAlert', models.TextField(blank=True, null=True)),
('master', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='django_cookie_control.TextValue')),
],
options={
'managed': True,
'db_table': 'django_cookie_control_textvalue_translation',
'db_tablespace': '',
'default_permissions': (),
'verbose_name': 'text value Translation',
},
),
migrations.CreateModel(
name='ThirdPartyCookie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=512)),
('optOutLink', models.URLField(help_text='Opt-out Link for third-party cookies')),
],
),
migrations.AddField(
model_name='purposeobject',
name='thirdPartyCookies',
field=models.ManyToManyField(blank=True, to='django_cookie_control.ThirdPartyCookie'),
),
migrations.AddField(
model_name='cookiecontrol',
name='optionalCookies',
field=models.ManyToManyField(blank=True, help_text='List of Optional Cookie Groups that your website may use.', to='django_cookie_control.PurposeObject'),
),
migrations.AddField(
model_name='cookiecontrol',
name='site',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='sites.Site'),
),
migrations.AddField(
model_name='cookiecontrol',
name='statement',
field=models.ForeignKey(blank=True, help_text='Cookie Control will respect user given consent until either the cookie expires, or there is a change in your privacy statement.', null=True, on_delete=django.db.models.deletion.CASCADE, to='django_cookie_control.Statement'),
),
migrations.AddField(
model_name='cookiecontrol',
name='text',
field=models.ForeignKey(blank=True, help_text='Determines the text used by Cookie Control.', null=True, on_delete=django.db.models.deletion.CASCADE, to='django_cookie_control.TextValue'),
),
migrations.AlterUniqueTogether(
name='textvaluetranslation',
unique_together=set([('language_code', 'master')]),
),
migrations.AlterUniqueTogether(
name='statementtranslation',
unique_together=set([('language_code', 'master')]),
),
migrations.AlterUniqueTogether(
name='purposeobjecttranslation',
unique_together=set([('language_code', 'master')]),
),
]
| StarcoderdataPython |
22780 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import json
import os
import random
import time
import uuid
import pyrax
from pyrax.autoscale import AutoScaleClient
from pyrax.autoscale import AutoScalePolicy
from pyrax.autoscale import AutoScaleWebhook
from pyrax.autoscale import ScalingGroup
from pyrax.autoscale import ScalingGroupManager
from pyrax.client import BaseClient
from pyrax.clouddatabases import CloudDatabaseClient
from pyrax.clouddatabases import CloudDatabaseDatabaseManager
from pyrax.clouddatabases import CloudDatabaseInstance
from pyrax.clouddatabases import CloudDatabaseManager
from pyrax.clouddatabases import CloudDatabaseUser
from pyrax.clouddatabases import CloudDatabaseUserManager
from pyrax.clouddatabases import CloudDatabaseVolume
from pyrax.cloudblockstorage import CloudBlockStorageClient
from pyrax.cloudblockstorage import CloudBlockStorageManager
from pyrax.cloudblockstorage import CloudBlockStorageSnapshot
from pyrax.cloudblockstorage import CloudBlockStorageSnapshotManager
from pyrax.cloudblockstorage import CloudBlockStorageVolume
from pyrax.cloudloadbalancers import CloudLoadBalancer
from pyrax.cloudloadbalancers import CloudLoadBalancerManager
from pyrax.cloudloadbalancers import CloudLoadBalancerClient
from pyrax.cloudloadbalancers import Node
from pyrax.cloudloadbalancers import VirtualIP
from pyrax.clouddns import CloudDNSClient
from pyrax.clouddns import CloudDNSDomain
from pyrax.clouddns import CloudDNSManager
from pyrax.clouddns import CloudDNSRecord
from pyrax.clouddns import CloudDNSPTRRecord
from pyrax.cloudnetworks import CloudNetwork
from pyrax.cloudnetworks import CloudNetworkClient
from pyrax.cloudmonitoring import CloudMonitorClient
from pyrax.cloudmonitoring import CloudMonitorEntity
from pyrax.cloudmonitoring import CloudMonitorCheck
from pyrax.cloudmonitoring import CloudMonitorNotification
from pyrax.image import Image
from pyrax.image import ImageClient
from pyrax.image import ImageManager
from pyrax.image import ImageMemberManager
from pyrax.image import ImageTagManager
from pyrax.object_storage import BulkDeleter
from pyrax.object_storage import Container
from pyrax.object_storage import ContainerManager
from pyrax.object_storage import FolderUploader
from pyrax.object_storage import StorageClient
from pyrax.object_storage import StorageObject
from pyrax.object_storage import StorageObjectManager
from pyrax.queueing import Queue
from pyrax.queueing import QueueClaim
from pyrax.queueing import QueueMessage
from pyrax.queueing import QueueClient
from pyrax.queueing import QueueManager
import pyrax.exceptions as exc
from pyrax.base_identity import BaseIdentity
from pyrax.base_identity import Endpoint
from pyrax.base_identity import Service
from pyrax.identity.rax_identity import RaxIdentity
from pyrax.identity.keystone_identity import KeystoneIdentity
import pyrax.utils as utils
example_uri = "http://example.com"
class FakeResponse(object):
headers = {}
body = ""
status_code = 200
reason = "Oops"
content = "Oops"
@property
def status(self):
# TEMPORARY - until the cf_wrapper code is removed.
return self.status_code
@status.setter
def status(self, val):
# TEMPORARY - until the cf_wrapper code is removed.
self.status_code = val
def getheaders(self):
return self.headers
def read(self):
return "Line1\nLine2"
def get(self, arg):
return self.headers.get(arg)
def json(self):
return self.content
class FakeIterator(utils.ResultsIterator):
def _init_methods(self):
pass
class FakeClient(object):
user_agent = "Fake"
USER_AGENT = "Fake"
def __init__(self, *args, **kwargs):
self.identity = FakeIdentity()
class FakeStorageClient(StorageClient):
def __init__(self, identity=None, *args, **kwargs):
if identity is None:
identity = FakeIdentity()
super(FakeStorageClient, self).__init__(identity, *args, **kwargs)
def create(self, name):
return FakeContainer(self._manager, {"name": name})
class FakeContainerManager(ContainerManager):
def __init__(self, api=None, *args, **kwargs):
if api is None:
api = FakeStorageClient()
super(FakeContainerManager, self).__init__(api, *args, **kwargs)
class FakeContainer(Container):
def __init__(self, *args, **kwargs):
super(FakeContainer, self).__init__(*args, **kwargs)
self.object_manager = FakeStorageObjectManager(self.manager.api,
uri_base=self.name)
self.object_manager._container = self
class FakeStorageObjectManager(StorageObjectManager):
def __init__(self, api=None, *args, **kwargs):
if api is None:
api = FakeStorageClient()
if "uri_base" not in kwargs:
kwargs["uri_base"] = utils.random_ascii()
super(FakeStorageObjectManager, self).__init__(api, *args, **kwargs)
class FakeStorageObject(StorageObject):
def __init__(self, manager, name=None, total_bytes=None, content_type=None,
last_modified=None, etag=None, attdict=None):
"""
The object can either be initialized with individual params, or by
passing the dict that is returned by swiftclient.
"""
self.manager = manager
self.name = name
self.bytes = total_bytes or 0
self.content_type = content_type
self.last_modified = last_modified
self.hash = etag
if attdict:
self._read_attdict(attdict)
fake_attdict = {"name": "fake",
"content-length": 42,
"content-type": "text/html",
"etag": "ABC",
"last-modified": "Tue, 01 Jan 2013 01:02:03 GMT",
}
class FakeServer(object):
id = utils.random_unicode()
class FakeService(object):
user_agent = "FakeService"
USER_AGENT = "FakeService"
def __init__(self, *args, **kwargs):
self.client = FakeClient()
self.Node = FakeNode
self.VirtualIP = FakeVirtualIP
self.loadbalancers = FakeLoadBalancer()
self.id = utils.random_unicode()
def authenticate(self):
pass
def get_protocols(self):
return ["HTTP"]
def get_algorithms(self):
return ["RANDOM"]
def get_usage(self):
pass
class FakeCSClient(FakeService):
def __init__(self, *args, **kwargs):
ident = FakeIdentity()
super(FakeCSClient, self).__init__(ident, *args, **kwargs)
def dummy(self):
pass
self.servers = FakeService()
utils.add_method(self.servers, dummy, "list")
self.images = FakeService()
utils.add_method(self.images, dummy, "list")
self.flavors = FakeService()
utils.add_method(self.flavors, dummy, "list")
class FakeFolderUploader(FolderUploader):
def __init__(self, *args, **kwargs):
super(FakeFolderUploader, self).__init__(*args, **kwargs)
# Useful for when we mock out the run() method.
self.actual_run = self.run
self.run = self.fake_run
def fake_run(self):
pass
class FakeBulkDeleter(BulkDeleter):
def __init__(self, *args, **kwargs):
super(FakeBulkDeleter, self).__init__(*args, **kwargs)
# Useful for when we mock out the run() method.
self.actual_run = self.run
self.run = self.fake_run
def fake_run(self):
time.sleep(0.0001)
self.results = {}
self.completed = True
class FakeManager(object):
def __init__(self, *args, **kwargs):
super(FakeManager, self).__init__(*args, **kwargs)
self.api = FakeClient()
def list(self):
pass
def get(self, item):
pass
def delete(self, item):
pass
def create(self, *args, **kwargs):
pass
def find(self, *args, **kwargs):
pass
def action(self, item, action_type, body={}):
pass
class FakeException(BaseException):
pass
class FakeKeyring(object):
password_set = False
def get_password(self, *args, **kwargs):
return "FAKE_TOKEN|FAKE_URL"
def set_password(self, *args, **kwargs):
self.password_set = True
class FakeEntity(object):
def __init__(self, *args, **kwargs):
self.id = utils.random_unicode()
def get(self, *args, **kwargs):
pass
def list(self, *args, **kwargs):
pass
class FakeDatabaseUser(CloudDatabaseUser):
pass
class FakeDatabaseVolume(CloudDatabaseVolume):
def __init__(self, instance, *args, **kwargs):
self.instance = instance
self.size = 1
self.used = 0.2
class FakeDatabaseInstance(CloudDatabaseInstance):
def __init__(self, *args, **kwargs):
self.id = utils.random_unicode()
self.manager = FakeDatabaseManager()
self.manager.api = FakeDatabaseClient()
self._database_manager = CloudDatabaseDatabaseManager(
FakeDatabaseClient())
self._user_manager = CloudDatabaseUserManager(FakeDatabaseClient())
self.volume = FakeDatabaseVolume(self)
class FakeDatabaseManager(CloudDatabaseManager):
def __init__(self, api=None, *args, **kwargs):
if api is None:
api = FakeDatabaseClient()
super(FakeDatabaseManager, self).__init__(api, *args, **kwargs)
self.uri_base = "instances"
class FakeDatabaseClient(CloudDatabaseClient):
def __init__(self, *args, **kwargs):
self._manager = FakeDatabaseManager(self)
self._flavor_manager = FakeManager()
ident = FakeIdentity()
super(FakeDatabaseClient, self).__init__(ident, "fakeuser",
"fakepassword", *args, **kwargs)
class FakeNovaVolumeClient(BaseClient):
def __init__(self, *args, **kwargs):
pass
class FakeBlockStorageManager(CloudBlockStorageManager):
def __init__(self, api=None, *args, **kwargs):
ident = FakeIdentity()
if api is None:
api = FakeBlockStorageClient(ident)
super(FakeBlockStorageManager, self).__init__(api, *args, **kwargs)
class FakeBlockStorageVolume(CloudBlockStorageVolume):
def __init__(self, *args, **kwargs):
volname = utils.random_unicode(8)
self.id = utils.random_unicode()
self.manager = FakeBlockStorageManager()
self._nova_volumes = FakeNovaVolumeClient()
class FakeBlockStorageSnapshot(CloudBlockStorageSnapshot):
def __init__(self, *args, **kwargs):
self.id = utils.random_unicode()
self.manager = FakeManager()
self.status = "available"
class FakeBlockStorageClient(CloudBlockStorageClient):
def __init__(self, *args, **kwargs):
self._types_manager = FakeManager()
self._snapshot_manager = FakeManager()
ident = FakeIdentity()
super(FakeBlockStorageClient, self).__init__(ident, "fakeuser",
"fakepassword", *args, **kwargs)
class FakeSnapshotManager(CloudBlockStorageSnapshotManager):
def __init__(self, api=None, *args, **kwargs):
ident = FakeIdentity()
if api is None:
api = FakeBlockStorageClient(ident)
super(FakeSnapshotManager, self).__init__(api, *args, **kwargs)
class FakeLoadBalancerClient(CloudLoadBalancerClient):
def __init__(self, *args, **kwargs):
ident = FakeIdentity()
super(FakeLoadBalancerClient, self).__init__(ident, "fakeuser",
"fakepassword", *args, **kwargs)
class FakeLoadBalancerManager(CloudLoadBalancerManager):
def __init__(self, api=None, *args, **kwargs):
if api is None:
api = FakeLoadBalancerClient()
super(FakeLoadBalancerManager, self).__init__(api, *args, **kwargs)
class FakeLoadBalancer(CloudLoadBalancer):
def __init__(self, name=None, info=None, *args, **kwargs):
name = name or utils.random_ascii()
info = info or {"fake": "fake"}
super(FakeLoadBalancer, self).__init__(name, info, *args, **kwargs)
self.id = utils.random_ascii()
self.port = random.randint(1, 256)
self.manager = FakeLoadBalancerManager()
class FakeNode(Node):
def __init__(self, address=None, port=None, condition=None, weight=None,
status=None, parent=None, type=None, id=None):
if address is None:
address = "0.0.0.0"
if port is None:
port = 80
if id is None:
id = utils.random_unicode()
super(FakeNode, self).__init__(address=address, port=port,
condition=condition, weight=weight, status=status,
parent=parent, type=type, id=id)
class FakeVirtualIP(VirtualIP):
pass
class FakeStatusChanger(object):
check_count = 0
id = utils.random_unicode()
@property
def status(self):
if self.check_count < 2:
self.check_count += 1
return "changing"
return "ready"
class FakeDNSClient(CloudDNSClient):
def __init__(self, *args, **kwargs):
ident = FakeIdentity()
super(FakeDNSClient, self).__init__(ident, "fakeuser",
"fakepassword", *args, **kwargs)
class FakeDNSManager(CloudDNSManager):
def __init__(self, api=None, *args, **kwargs):
if api is None:
api = FakeDNSClient()
super(FakeDNSManager, self).__init__(api, *args, **kwargs)
self.resource_class = FakeDNSDomain
self.response_key = "domain"
self.plural_response_key = "domains"
self.uri_base = "domains"
class FakeDNSDomain(CloudDNSDomain):
def __init__(self, *args, **kwargs):
self.id = utils.random_ascii()
self.name = utils.random_unicode()
self.manager = FakeDNSManager()
class FakeDNSRecord(CloudDNSRecord):
def __init__(self, mgr, info, *args, **kwargs):
super(FakeDNSRecord, self).__init__(mgr, info, *args, **kwargs)
class FakeDNSPTRRecord(CloudDNSPTRRecord):
pass
class FakeDNSDevice(FakeLoadBalancer):
def __init__(self, *args, **kwargs):
self.id = utils.random_unicode()
class FakeCloudNetworkClient(CloudNetworkClient):
def __init__(self, *args, **kwargs):
ident = FakeIdentity()
super(FakeCloudNetworkClient, self).__init__(ident, "fakeuser",
"fakepassword", *args, **kwargs)
class FakeCloudNetwork(CloudNetwork):
def __init__(self, *args, **kwargs):
info = kwargs.pop("info", {"fake": "fake"})
label = kwargs.pop("label", kwargs.pop("name", utils.random_unicode()))
info["label"] = label
super(FakeCloudNetwork, self).__init__(manager=None, info=info, *args,
**kwargs)
self.id = uuid.uuid4().hex
class FakeAutoScaleClient(AutoScaleClient):
def __init__(self, *args, **kwargs):
ident = FakeIdentity()
self._manager = FakeManager()
super(FakeAutoScaleClient, self).__init__(ident, *args, **kwargs)
class FakeAutoScalePolicy(AutoScalePolicy):
def __init__(self, *args, **kwargs):
super(FakeAutoScalePolicy, self).__init__(*args, **kwargs)
self.id = utils.random_ascii()
class FakeAutoScaleWebhook(AutoScaleWebhook):
def __init__(self, *args, **kwargs):
super(FakeAutoScaleWebhook, self).__init__(*args, **kwargs)
self.id = utils.random_ascii()
class FakeScalingGroupManager(ScalingGroupManager):
def __init__(self, api=None, *args, **kwargs):
if api is None:
api = FakeAutoScaleClient()
super(FakeScalingGroupManager, self).__init__(api, *args, **kwargs)
self.id = utils.random_ascii()
class FakeScalingGroup(ScalingGroup):
def __init__(self, name=None, info=None, *args, **kwargs):
name = name or utils.random_ascii()
info = info or {"fake": "fake", "scalingPolicies": []}
self.groupConfiguration = {}
super(FakeScalingGroup, self).__init__(name, info, *args, **kwargs)
self.id = utils.random_ascii()
self.name = name
self.manager = FakeScalingGroupManager()
class FakeCloudMonitorClient(CloudMonitorClient):
def __init__(self, *args, **kwargs):
ident = FakeIdentity()
super(FakeCloudMonitorClient, self).__init__(ident, "fakeuser",
"fakepassword", *args, **kwargs)
class FakeCloudMonitorEntity(CloudMonitorEntity):
def __init__(self, *args, **kwargs):
info = kwargs.pop("info", {"fake": "fake"})
info["id"] = utils.random_ascii()
super(FakeCloudMonitorEntity, self).__init__(FakeManager(), info=info,
*args, **kwargs)
self.manager.api = FakeCloudMonitorClient()
class FakeCloudMonitorCheck(CloudMonitorCheck):
def __init__(self, *args, **kwargs):
info = kwargs.pop("info", {"fake": "fake"})
entity = kwargs.pop("entity", None)
info["id"] = utils.random_ascii()
super(FakeCloudMonitorCheck, self).__init__(FakeManager(), info, *args,
**kwargs)
self.set_entity(entity)
self.id = uuid.uuid4()
class FakeCloudMonitorNotification(CloudMonitorNotification):
def __init__(self, *args, **kwargs):
info = kwargs.pop("info", {"fake": "fake"})
super(FakeCloudMonitorNotification, self).__init__(manager=None,
info=info, *args, **kwargs)
self.id = uuid.uuid4()
class FakeQueue(Queue):
def __init__(self, *args, **kwargs):
info = kwargs.pop("info", {"fake": "fake"})
info["name"] = utils.random_unicode()
mgr = kwargs.pop("manager", FakeQueueManager())
super(FakeQueue, self).__init__(manager=mgr, info=info, *args, **kwargs)
class FakeQueueClaim(QueueClaim):
def __init__(self, *args, **kwargs):
info = kwargs.pop("info", {"fake": "fake"})
info["name"] = utils.random_unicode()
mgr = kwargs.pop("manager", FakeQueueManager())
super(FakeQueueClaim, self).__init__(manager=mgr, info=info, *args,
**kwargs)
class FakeQueueClient(QueueClient):
def __init__(self, *args, **kwargs):
ident = FakeIdentity()
super(FakeQueueClient, self).__init__(ident, "fakeuser",
"<PASSWORD>password", *args, **kwargs)
class FakeQueueManager(QueueManager):
def __init__(self, api=None, *args, **kwargs):
if api is None:
api = FakeQueueClient()
super(FakeQueueManager, self).__init__(api, *args, **kwargs)
self.id = utils.random_ascii()
class FakeImage(Image):
def __init__(self, *args, **kwargs):
info = kwargs.pop("info", {"fake": "fake"})
info["name"] = utils.random_unicode()
info["id"] = utils.random_unicode()
mgr = kwargs.pop("manager", FakeImageManager())
kwargs["member_manager_class"] = FakeImageMemberManager
kwargs["tag_manager_class"] = FakeImageTagManager
super(FakeImage, self).__init__(mgr, info, *args, **kwargs)
class FakeImageClient(ImageClient):
def __init__(self, identity=None, *args, **kwargs):
if identity is None:
identity = FakeIdentity()
super(FakeImageClient, self).__init__(identity, "fakeuser",
"fakepassword", *args, **kwargs)
class FakeImageMemberManager(ImageMemberManager):
def __init__(self, api=None, *args, **kwargs):
if api is None:
api = FakeImageClient()
super(FakeImageMemberManager, self).__init__(api, *args, **kwargs)
self.id = utils.random_ascii()
class FakeImageTagManager(ImageTagManager):
def __init__(self, api=None, *args, **kwargs):
if api is None:
api = FakeImageClient()
super(FakeImageTagManager, self).__init__(api, *args, **kwargs)
self.id = utils.random_ascii()
class FakeImageManager(ImageManager):
def __init__(self, api=None, *args, **kwargs):
if api is None:
api = FakeImageClient()
super(FakeImageManager, self).__init__(api, *args, **kwargs)
self.plural_response_key = "images"
self.resource_class = FakeImage
self.id = utils.random_ascii()
class FakeIdentityService(Service):
def __init__(self, identity=None, *args, **kwargs):
self.identity = identity or FakeIdentity()
self.name = "fake"
self.prefix = ""
self.service_type = "fake"
self.clients = {}
self.endpoints = utils.DotDict()
class FakeEndpoint(Endpoint):
def __init__(self, ep_dict=None, service=None, region=None, identity=None):
if ep_dict is None:
ep_dict = {}
if identity is None:
identity = FakeIdentity()
if service is None:
service = FakeIdentityService(identity)
if region is None:
region = "fake_region"
super(FakeEndpoint, self).__init__(ep_dict, service, region, identity)
class FakeRaxIdentity(RaxIdentity):
pass
class FakeIdentity(BaseIdentity):
"""Class that returns canned authentication responses."""
def __init__(self, *args, **kwargs):
super(FakeIdentity, self).__init__(*args, **kwargs)
self._good_username = "fakeuser"
self._good_password = "<PASSWORD>"
self._default_region = random.choice(("DFW", "ORD"))
self.services = {"fake": FakeIdentityService(self)}
def authenticate(self, connect=False):
if ((self.username == self._good_username) and
(self.password == self._good_password)):
self._parse_response(self.fake_response())
self.authenticated = True
else:
self.authenticated = False
raise exc.AuthenticationFailed("No match for '%s'/'%s' "
"username/password" % (self.username, self.password))
def auth_with_token(self, token, tenant_id=None, tenant_name=None):
self.token = token
self.tenant_id = tenant_id
self.tenant_name = tenant_name
self.authenticated = True
def get_token(self, force=False):
return self.token
def fake_response(self):
return fake_identity_response
fake_config_file = """[settings]
identity_type = rackspace
keyring_username =
region = FAKE
custom_user_agent = FAKE
http_debug =
"""
# This will handle both singular and plural responses.
fake_identity_user_response = {
"users": [{"name": "fake", "id": "fake"},
{"name": "faker", "id": "faker"}],
"user": {"name": "fake", "id": "fake"},
"roles": [{u'description': 'User Admin Role.',
'id': '3',
'name': 'identity:user-admin'}],
}
fake_identity_tenant_response = {"name": "fake", "id": "fake",
"description": "fake", "enabled": True}
fake_identity_tenants_response = {
"tenants": [
{"name": "fake", "id": "fake", "description": "fake",
"enabled": True},
{"name": "faker", "id": "faker", "description": "faker",
"enabled": True},
]}
fake_identity_tokens_response = {"access":
{'metadata': {u'is_admin': 0,
'roles': [u'asdfgh',
'sdfghj',
'dfghjk']},
'serviceCatalog': [{u'endpoints': [
{u'adminURL': 'http://10.0.0.0:8774/v2/qweqweqwe',
'id': 'dddddddddd',
'publicURL': 'http://10.0.0.0:8774/v2/qweqweqwe',
'internalURL': 'http://10.0.0.0:8774/v2/qweqweqwe',
'region': 'some_region'}],
'endpoints_links': [],
'name': 'nova',
'type': 'compute'},
{u'endpoints': [{u'adminURL': 'http://10.0.0.0:35357/v2.0',
'id': 'qweqweqwe',
'internalURL': 'http://10.0.0.0:5000/v2.0',
'publicURL': 'http://10.0.0.0:5000/v2.0',
'region': 'some_region'}],
'endpoints_links': [],
'name': 'keystone',
'type': 'identity'}],
'token': {u'expires': '1999-05-04T16:45:05Z',
'id': 'qweqweqwe',
'tenant': {u'description': 'admin Tenant',
'enabled': True,
'id': 'qweqweqwe',
'name': 'admin'}},
'user': {u'id': 'qweqweqwe',
'name': 'admin',
'roles': [{u'id': 'qweqweqwe', 'name': 'admin'},
{u'id': 'qweqweqwe', 'name': 'KeystoneAdmin'},
{u'id': 'qweqweqwe',
'name': 'KeystoneServiceAdmin'}],
'roles_links': [],
'username': 'admin'}}}
fake_identity_endpoints_response = {"access": {
"endpoints": ["fake", "faker", "fakest"]}}
fake_identity_response = {u'access':
{u'serviceCatalog': [
{u'endpoints': [{u'publicURL':
'https://ord.loadbalancers.api.rackspacecloud.com/v1.0/000000',
'region': 'ORD',
'tenantId': '000000'},
{u'publicURL':
'https://dfw.loadbalancers.api.rackspacecloud.com/v1.0/000000',
'region': 'DFW',
'tenantId': '000000'},
{u'publicURL':
'https://syd.loadbalancers.api.rackspacecloud.com/v1.0/000000',
'region': 'SYD',
'tenantId': '000000'}],
'name': 'cloudLoadBalancers',
'type': 'rax:load-balancer'},
{u'endpoints': [{u'internalURL':
'https://snet-aa.fake1.clouddrive.com/v1/MossoCloudFS_abc',
'publicURL': 'https://aa.fake1.clouddrive.com/v1/MossoCloudFS_abc',
'region': 'FAKE',
'tenantId': 'MossoCloudFS_abc'},
{u'internalURL':
'https://snet-aa.dfw1.clouddrive.com/v1/MossoCloudFS_abc',
'publicURL': 'https://aa.dfw1.clouddrive.com/v1/MossoCloudFS_abc',
'region': 'DFW',
'tenantId': 'MossoCloudFS_abc'},
{u'internalURL':
'https://snet-aa.ord1.clouddrive.com/v1/MossoCloudFS_abc',
'publicURL': 'https://aa.ord1.clouddrive.com/v1/MossoCloudFS_abc',
'region': 'ORD',
'tenantId': 'MossoCloudFS_abc'},
{u'internalURL':
'https://snet-aa.syd1.clouddrive.com/v1/MossoCloudFS_abc',
'publicURL': 'https://aa.ord1.clouddrive.com/v1/MossoCloudFS_abc',
'region': 'SYD',
'tenantId': 'MossoCloudFS_abc'}],
'name': 'cloudFiles',
'type': 'object-store'},
{u'endpoints': [{u'publicURL':
'https://dfw.servers.api.rackspacecloud.com/v2/000000',
'region': 'DFW',
'tenantId': '000000',
'versionId': '2',
'versionInfo': 'https://dfw.servers.api.rackspacecloud.com/v2',
'versionList': 'https://dfw.servers.api.rackspacecloud.com/'},
{u'publicURL':
'https://ord.servers.api.rackspacecloud.com/v2/000000',
'region': 'ORD',
'tenantId': '000000',
'versionId': '2',
'versionInfo': 'https://ord.servers.api.rackspacecloud.com/v2',
'versionList': 'https://ord.servers.api.rackspacecloud.com/'},
{u'publicURL':
'https://syd.servers.api.rackspacecloud.com/v2/000000',
'region': 'SYD',
'tenantId': '000000',
'versionId': '2',
'versionInfo': 'https://syd.servers.api.rackspacecloud.com/v2',
'versionList': 'https://syd.servers.api.rackspacecloud.com/'}],
'name': 'cloudServersOpenStack',
'type': 'compute'},
{u'endpoints': [{u'publicURL':
'https://dns.api.rackspacecloud.com/v1.0/000000',
'tenantId': '000000'}],
'name': 'cloudDNS',
'type': 'rax:dns'},
{u'endpoints': [{u'publicURL':
'https://dfw.databases.api.rackspacecloud.com/v1.0/000000',
'region': 'DFW',
'tenantId': '000000'},
{u'publicURL':
'https://syd.databases.api.rackspacecloud.com/v1.0/000000',
'region': 'SYD',
'tenantId': '000000'},
{u'publicURL':
'https://ord.databases.api.rackspacecloud.com/v1.0/000000',
'region': 'ORD',
'tenantId': '000000'}],
'name': 'cloudDatabases',
'type': 'rax:database'},
{u'endpoints': [{u'publicURL':
'https://servers.api.rackspacecloud.com/v1.0/000000',
'tenantId': '000000',
'versionId': '1.0',
'versionInfo': 'https://servers.api.rackspacecloud.com/v1.0',
'versionList': 'https://servers.api.rackspacecloud.com/'}],
'name': 'cloudServers',
'type': 'compute'},
{u'endpoints': [{u'publicURL':
'https://cdn1.clouddrive.com/v1/MossoCloudFS_abc',
'region': 'DFW',
'tenantId': 'MossoCloudFS_abc'},
{u'publicURL': 'https://cdn1.clouddrive.com/v1/MossoCloudFS_abc',
'region': 'FAKE',
'tenantId': 'MossoCloudFS_abc'},
{u'publicURL': 'https://cdn1.clouddrive.com/v1/MossoCloudFS_abc',
'region': 'SYD',
'tenantId': 'MossoCloudFS_abc'},
{u'publicURL': 'https://cdn2.clouddrive.com/v1/MossoCloudFS_abc',
'region': 'ORD',
'tenantId': 'MossoCloudFS_abc'}],
'name': 'cloudFilesCDN',
'type': 'rax:object-cdn'},
{u'endpoints': [{u'publicURL':
'https://monitoring.api.rackspacecloud.com/v1.0/000000',
'tenantId': '000000'}],
'name': 'cloudMonitoring',
'type': 'rax:monitor'}],
u'token': {u'expires': '2222-02-22T22:22:22.000-02:00',
'id': 'xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx',
'tenant': {u'id': '000000', 'name': '000000'}},
u'user': {u'id': '123456',
'name': 'fakeuser',
'RAX-AUTH:defaultRegion': 'DFW',
'roles': [{u'description': 'User Admin Role.',
'id': '3',
'name': 'identity:user-admin'}],
}}}
class FakeIdentityResponse(FakeResponse):
status_code = 200
response_type = "auth"
responses = {"auth": fake_identity_response,
"users": fake_identity_user_response,
"tenant": fake_identity_tenant_response,
"tenants": fake_identity_tenants_response,
"tokens": fake_identity_tokens_response,
"endpoints": fake_identity_endpoints_response,
}
@property
def content(self):
return self.responses.get(self.response_type)
def json(self):
return self.content
def read(self):
return json.dumps(self.content)
| StarcoderdataPython |
3358133 | <filename>twitch_irc.py
from twisted.internet import protocol, reactor
from collections import defaultdict
import bot
import time
import logging
import logging.config
logging.config.fileConfig('logging.conf')
class BotFactory(protocol.ClientFactory):
protocol = bot.TwitchBot
tags = defaultdict(dict)
activity = dict()
wait_time = 1
def clientConnectionLost(self, connector, reason):
logging.error("Lost connection, reconnecting")
self.protocol = reload(bot).TwitchBot
connector.connect()
def clientConnectionFailed(self, connector, reason):
msg = "Could not connect, retrying in {}s"
logging.warning(msg.format(self.wait_time))
time.sleep(self.wait_time)
self.wait_time = min(512, self.wait_time * 2)
connector.connect()
if __name__ == "__main__":
reactor.connectTCP('irc.twitch.tv', 6667, BotFactory())
reactor.run()
| StarcoderdataPython |
4829220 | <filename>simprod-scripts/resources/scripts/nwandkowsky/detector_baseline/detector.py
#!/bin/sh /cvmfs/icecube.opensciencegrid.org/py2-v2/icetray-start
#METAPROJECT: /data/user/nwandkowsky/tarballs/simulation.V05-01-02/build/simulation.V05-01-02
# import required icecube-related stuff
from icecube import icetray, dataclasses, dataio, simclasses
from icecube.icetray import I3Units
from I3Tray import I3Tray
# command line options required to configure the simulation
from optparse import OptionParser
from os.path import expandvars
#./mcpe_nugen.py -s 2 -r 1 -g /cvmfs/icecube.opensciencegrid.org/data/GCD/GeoCalibDetectorStatus_IC86_Merged.i3.gz --domos 5 --domeff 0.99 --holeice flasher_p1=0.3_p2=0.0 -i /data/ana/Cscd/StartingEvents/NuGen_new/NuE/medium_energy/photon_spice3_2/1/photon_00000001.i3.zst -o nue_me_mcpe.i3.bz2 -b /data/ana/Cscd/StartingEvents/CORSIKA_bg/12531/photon_spice3_2/1/photon_00000001.i3.zst
usage = "usage: %prog [options] "
#outputfile"
parser = OptionParser(usage)
parser.add_option("-g", "--gcd",default="/home/nwandkowsky/workspace/data/GCD/GeoCalibDetectorStatus_IC86.55697_V2.i3",
dest="GCDFILE", help="Read geometry from GCDFILE (.i3{.gz} format)")
parser.add_option("-i", "--input", action="store", type="string", default="", dest="INPUT", help="Input file to process")
parser.add_option("-o", "--output", action="store", type="string", default="", dest="OUTPUT", help="Output i3 file")
parser.add_option("-s", "--seed",type="int",default=12345, dest="SEED", help="Initial seed for the random number generator")
parser.add_option("-r", "--runnumber", type="int", default=1, dest="RUNNUMBER", help="The run number for this simulation")
parser.add_option("--holeice",default="as_50", dest="HOLEICE", help="Holeice file")
parser.add_option("--domos", type="float", default=1., dest="DOMOS", help="dom oversizing parameter")
parser.add_option("--domeff", type="float", default=1., dest="DOMEFF", help="dom efficiency parameter")
parser.add_option("-b", "--bgfile", action="store", type="string", default="", dest="BGFILE", help="Output i3 file")
# parse cmd line args, bail out if anything is not understood
(options,args) = parser.parse_args()
GCD = options.GCDFILE
infile = options.INPUT
outfile = options.OUTPUT
print("Command line options parsed...")
holeiceparameterization=expandvars("$I3_SRC/ice-models/resources/models/holeice_msu/"+options.HOLEICE)
import os, sys
tray = I3Tray()
tray.AddModule("I3Reader", "reader", filenamelist=[GCD, infile] )
# import phys_services which includes rng
from icecube import phys_services
# set up a random number generator
randomService = phys_services.I3SPRNGRandomService(
seed = options.SEED,
nstreams = 100000000,
streamnum = options.RUNNUMBER)
tray.context['I3RandomService'] = randomService
prandomService = phys_services.I3SPRNGRandomService(
seed = options.SEED,
nstreams = 200000000,
streamnum = options.RUNNUMBER+100000000)
tray.Add('Delete',Keys=["I3MCTree","MMCTrackList"])
from icecube.simprod.segments.PropagateMuons import PropagateMuons
tray.AddSegment(PropagateMuons,"PropagateMuons", RandomService=prandomService)
def RemoveInvisible(frame):
return len(frame["PhotonSeriesMap"])>0;
tray.Add(RemoveInvisible)(("Streams",[icetray.I3Frame.DAQ]));
from icecube import polyplopia
tray.AddService("CoincidentI3ReaderServiceFactory","BackgroundService")(("FileName",options.BGFILE));
tray.AddModule("PoissonPEMerger")(
("BaseIsBackground",False),
("CoincidentEventService","BackgroundService"),
("MCTreeName","I3MCTree_preMuonProp"),
("RandomService","I3RandomService"),
("PhotonsToMerge","PhotonSeriesMap"),
("MMCTrackName","MMCTrackList")
);
tray.AddModule("I3GeometryDecomposer", "decomposeGeometry")
from icecube import clsim
tray.AddSegment(clsim.I3CLSimMakeHitsFromPhotons,"makePhotons",
PhotonSeriesName="PhotonSeriesMap",
MCPESeriesName="I3MCPESeriesMap",
RandomService='I3RandomService',
DOMOversizeFactor=options.DOMOS,
UnshadowedFraction=(1.17),
HoleIceParameterization=holeiceparameterization
)
# Delete all MCPEs we're not operating on
def empty_mcpe(frame):
entries = 0
for k in frame.keys():
if isinstance(frame[k], simclasses.I3MCPESeriesMap):
entries = entries + len(frame[k])
return entries>0
tray.AddModule(empty_mcpe, Streams=[icetray.I3Frame.DAQ])
map_in = 'I3MCPESeriesMap'
map_out = 'I3MCPESeriesMap_'+str(options.DOMEFF)
domeff = float(options.DOMEFF)/1.17
print map_in, map_out, domeff
# Downsampling from input pe series (map_in with highest dom eff = 1.17) to desired output pe series (map_out)
tray.AddModule("I3DownsampleMCPE", InputName=map_in, OutputName=map_out, SampleFrac = domeff, RandomService = 'I3RandomService')
from icecube.simprod import segments
tray.AddSegment(segments.DetectorSim, "DetectorSim",
RandomService = 'I3RandomService',
RunID = options.RUNNUMBER,
GCDFile = GCD,
KeepMCHits = False,
KeepPropagatedMCTree = True,
KeepMCPulses = False,
SkipNoiseGenerator = False,
LowMem = True,
InputPESeriesMapName = map_out
)
# acquire some more MC truth information
def GetNeutrino(frame):
def sanitize(particle):
if particle is None:
return dataclasses.I3Particle()
else:
return particle
mcTree = frame["I3MCTree"]
primary = None
neutrino = None
for p in mcTree:
if mcTree.depth(p) != 0: continue
if p.is_neutrino:
if neutrino is None or p.energy > neutrino.energy:
neutrino = p
if primary is None or p.energy > primary.energy:
primary = p
del frame["MCPrimary"]
frame["MCPrimary"] = sanitize(primary)
tray.AddModule(GetNeutrino, "GetNeutrino", Streams=[icetray.I3Frame.DAQ])
def GetMCTrack(frame):
# Get the track from an MCTree. If there is no track, get the hadronic cascade.
mcTree = frame["I3MCTree"]
trackParticle = None
cascadeParticle = None
numCascades = 0
neutrino = None
for p in mcTree:
depth = mcTree.depth(p)
if depth == 0:
# if not p.is_neutrino:
# raise RuntimeError("primary particle is not a neutrino!")
if neutrino is None or p.energy > neutrino.energy:
neutrino = p
if depth != 1: continue # depth==0 is the root (assume it is the primary neutrino)
if p.type in [dataclasses.I3Particle.ParticleType.MuPlus,
dataclasses.I3Particle.ParticleType.MuMinus,
dataclasses.I3Particle.ParticleType.TauPlus,
dataclasses.I3Particle.ParticleType.TauMinus]:
# if trackParticle is not None:
# raise RuntimeError("got multiple leptons from a single neutrino.")
trackParticle = p
else:
if cascadeParticle is None or p.energy > cascadeParticle.energy:
cascadeParticle = p
numCascades += 1
theTrack = None
if trackParticle is not None:
theTrack = trackParticle
else:
if numCascades == 0: theTrack = None
if numCascades == 1: theTrack = cascadeParticle
if neutrino is None:
raise RuntimeError("Internal error. Cascades found, but no neutrino in MCTree.")
theTrack = neutrino
if theTrack is None:
raise RuntimeError("no MC track could be found in MCTree")
# shift the vertex to the point of closest approach to the origin (0,0,0)
a = - (theTrack.pos.x*theTrack.dir.x + theTrack.pos.y*theTrack.dir.y + theTrack.pos.z*theTrack.dir.z)
newPos = dataclasses.I3Position(theTrack.pos.x + theTrack.dir.x * a,
theTrack.pos.y + theTrack.dir.y * a,
theTrack.pos.z + theTrack.dir.z * a)
newTime = theTrack.time + a/dataclasses.I3Constants.c
# generate a "reconstructed" particle from the MCTrack
outputTrack = dataclasses.I3Particle()
outputTrack.shape = dataclasses.I3Particle.ParticleShape.InfiniteTrack
outputTrack.pos = newPos
outputTrack.dir = theTrack.dir
outputTrack.time = newTime
outputTrack.fit_status = dataclasses.I3Particle.FitStatus.OK
outputTrack.location_type = dataclasses.I3Particle.LocationType.InIce
frame["MCTrack"] = outputTrack
tray.AddModule(GetMCTrack, "GetMCTrack", Streams=[icetray.I3Frame.DAQ])
#tray.AddModule('Dump', "dumpy")
tray.Add('Delete',Keys=[map_out,'I3MCPESeriesMap','I3MCPulseSeriesMapPrimaryIDMap','IceTopRawData','PhotonSeriesMap'])
tray.AddModule('I3Writer', 'writer',
Streams=[icetray.I3Frame.Stream('S'), icetray.I3Frame.TrayInfo, icetray.I3Frame.DAQ, icetray.I3Frame.Physics],
filename=outfile)
# final clean-up and execution
tray.AddModule('TrashCan', 'YesWeCan')
print("Executing...")
tray.Execute()
print("Finish!")
tray.Finish()
del tray
| StarcoderdataPython |
3279587 | params = {
'type': 'MBPO',
'universe': 'gym',
'domain': 'HalfCheetah',
'task': 'v2',
'log_dir': '~/ray_mbpo/',
'exp_name': 'defaults',
'kwargs': {
'epoch_length': 1000,
'train_every_n_steps': 1,
'n_train_repeat': 40,
'eval_render_mode': None,
'eval_n_episodes': 1,
'eval_deterministic': True,
'discount': 0.99,
'tau': 5e-3,
'reward_scale': 1.0,
'model_train_freq': 250,
'model_retain_epochs': 1,
'rollout_batch_size': 100e3,
'deterministic': False,
'num_networks': 7,
'num_elites': 5,
'real_ratio': 0.05,
'target_entropy': -3,
'max_model_t': None,
'rollout_schedule': [20, 150, 1, 1],
}
} | StarcoderdataPython |
1640098 | import os
from os import path as p
from datetime import date as d
# module vars
_basedir = p.dirname(__file__)
_user = os.environ.get('USER', os.environ.get('USERNAME'))
# configurable vars
__APP_NAME__ = 'Proposer'
__YOUR_NAME__ = '<NAME>'
__YOUR_COMPANY__ = 'Nerevu Development'
__YOUR_POSITION__ = 'Managing Director'
__YOUR_EMAIL__ = '<EMAIL>'
__YOUR_SKYPE__ = _user
__YOUR_PHONE__ = ['+255 789 477 319', '+254 703 576 035']
__YOUR_WEBSITE__ = 'http://nerevu.com'
# calculated vars
app = __APP_NAME__.lower()
year = d.today().strftime("%Y")
date = d.today().strftime("%B %d, %Y")
site_keys = (
'author', 'author_company', 'author_position', 'author_email',
'author_skype', 'author_phone', 'author_url', 'year', 'date')
site_values = (
__YOUR_NAME__, __YOUR_COMPANY__, __YOUR_POSITION__, __YOUR_EMAIL__,
__YOUR_SKYPE__, __YOUR_PHONE__, __YOUR_WEBSITE__, year, date)
# configuration
class Config(object):
SITE = dict(zip(site_keys, site_values))
DEBUG = False
TESTING = False
HOST = '127.0.0.1'
EXPORT_DIR = p.join(_basedir, 'exports')
INFO_PATH = p.join(_basedir, 'info.yml')
STYLE = 'development'
TYPE = 'html'
TABLE = '<table class="table table-striped">'
BOOTSTRAP_USE_MINIFIED = False
BOOTSTRAP_USE_CDN = False
BOOTSTRAP_FONTAWESOME = False
BOOTSTRAP_HTML5_SHIM = False
class Production(Config):
HOST = '0.0.0.0'
BOOTSTRAP_USE_MINIFIED = True
BOOTSTRAP_USE_CDN = True
BOOTSTRAP_FONTAWESOME = True
class Development(Config):
DEBUG = True
class Test(Config):
TESTING = True
| StarcoderdataPython |
4839038 | <gh_stars>0
#!/usr/bin/env python3
import cloudgenix
import argparse
from cloudgenix import jd, jd_detailed
import cloudgenix_settings
import sys
import logging
import os
import datetime
# Global Vars
TIME_BETWEEN_API_UPDATES = 60 # seconds
REFRESH_LOGIN_TOKEN_INTERVAL = 7 # hours
SDK_VERSION = cloudgenix.version
SCRIPT_NAME = 'CloudGenix: Example script: SNMP'
SCRIPT_VERSION = "v1"
# Set NON-SYSLOG logging to use function name
logger = logging.getLogger(__name__)
####################################################################
# Read cloudgenix_settings file for auth token or username/password
####################################################################
sys.path.append(os.getcwd())
try:
from cloudgenix_settings import CLOUDGENIX_AUTH_TOKEN
except ImportError:
# Get AUTH_TOKEN/X_AUTH_TOKEN from env variable, if it exists. X_AUTH_TOKEN takes priority.
if "X_AUTH_TOKEN" in os.environ:
CLOUDGENIX_AUTH_TOKEN = os.environ.get('X_AUTH_TOKEN')
elif "AUTH_TOKEN" in os.environ:
CLOUDGENIX_AUTH_TOKEN = os.environ.get('AUTH_TOKEN')
else:
# not set
CLOUDGENIX_AUTH_TOKEN = None
try:
from cloudgenix_settings import CLOUDGENIX_USER, CLOUDGENIX_PASSWORD
except ImportError:
# will get caught below
CLOUDGENIX_USER = None
CLOUDGENIX_PASSWORD = None
def addSNMP(cgx, data):
elem_resp = cgx.get.elements()
elem_list = elem_resp.cgx_content.get('items', None)
if not elem_resp.cgx_status or not elem_list:
logger.info("ERROR: unable to get elements for account '{0}'.".format(cgx_session.tenant_name))
return False
for element in elem_list:
elem_id = element['id']
name = element['name']
sid = element['site_id']
model_name = element['model_name']
if name == None:
name = "Unamed device"
if not sid is None:
snmp_resp = cgx.get.snmpagents(site_id=sid,element_id=elem_id)
snmp_resp = snmp_resp.cgx_content.get('items', None)
if snmp_resp:
print("Please delete SNMP Agent on element: " + name + " before trying to add or modify settings")
else:
resp = cgx.post.snmpagents(site_id=sid, element_id=elem_id, data=data)
if not resp:
print("Error creating SNMP Agent on " + name)
else:
print("Created SNMP Agent on " + name)
return True, "200"
def getSNMP(cgx):
elem_resp = cgx.get.elements()
elem_list = elem_resp.cgx_content.get('items', None)
if not elem_resp.cgx_status or not elem_list:
logger.info("ERROR: unable to get elements for account '{0}'.".format(cgx_session.tenant_name))
return False
for element in elem_list:
elem_id = element['id']
name = element['name']
sid = element['site_id']
model_name = element['model_name']
if name == None:
name = "Unamed device"
if not sid is None:
snmp_resp = cgx.get.snmpagents(site_id=sid,element_id=elem_id)
snmp_resp = snmp_resp.cgx_content.get('items', None)
if snmp_resp:
print("ION Name: " + name)
print(snmp_resp)
return True, "200"
def deleteSNMP(cgx, description):
elem_resp = cgx.get.elements()
elem_list = elem_resp.cgx_content.get('items', None)
if not elem_resp.cgx_status or not elem_list:
logger.info("ERROR: unable to get elements for account '{0}'.".format(cgx_session.tenant_name))
return False
for element in elem_list:
elem_id = element['id']
name = element['name']
sid = element['site_id']
model_name = element['model_name']
if name == None:
name = "Unamed device"
if not sid is None:
snmp_resp = cgx.get.snmpagents(site_id=sid,element_id=elem_id)
snmp_resp = snmp_resp.cgx_content.get('items', None)
if snmp_resp:
for snmp in snmp_resp:
snmp_id = snmp['id']
snmp_description = snmp['description']
if snmp_description == description:
resp = cgx.delete.snmpagents(site_id=sid, element_id=elem_id, snmpagent_id=snmp_id)
if not resp:
print("Error deleting SNMP Agent on " + name)
else:
print("Deleted SNMP Agent with description: " + description + " on " + name )
return True, "200"
def deleteSNMPall(cgx):
elem_resp = cgx.get.elements()
elem_list = elem_resp.cgx_content.get('items', None)
if not elem_resp.cgx_status or not elem_list:
logger.info("ERROR: unable to get elements for account '{0}'.".format(cgx_session.tenant_name))
return False
for element in elem_list:
elem_id = element['id']
name = element['name']
sid = element['site_id']
model_name = element['model_name']
if name == None:
name = "Unamed device"
if not sid is None:
snmp_resp = cgx.get.snmpagents(site_id=sid,element_id=elem_id)
snmp_resp = snmp_resp.cgx_content.get('items', None)
if snmp_resp:
for snmp in snmp_resp:
snmp_id = snmp['id']
resp = cgx.delete.snmpagents(site_id=sid, element_id=elem_id, snmpagent_id=snmp_id)
if not resp:
print("Error deleting SNMP Agent on " + name)
else:
print("Deleted SNMP Agent on " + name)
return True, "200"
def go():
############################################################################
# Begin Script, parse arguments.
############################################################################
# Parse arguments
parser = argparse.ArgumentParser(description="{0}.".format(SCRIPT_NAME))
# Allow Controller modification and debug level sets.
controller_group = parser.add_argument_group('API', 'These options change how this program connects to the API.')
controller_group.add_argument("--controller", "-C",
help="Controller URI, ex. "
"Alpha: https://api-alpha.elcapitan.cloudgenix.com"
"C-Prod: https://api.elcapitan.cloudgenix.com",
default=None)
controller_group.add_argument("--insecure", "-I", help="Disable SSL certificate and hostname verification",
dest='verify', action='store_false', default=True)
login_group = parser.add_argument_group('Login', 'These options allow skipping of interactive login')
login_group.add_argument("--email", "-E", help="Use this email as User Name instead of prompting",
default=None)
login_group.add_argument("--pass", <PASSWORD>", help="Use this Password instead of prompting",
default=None)
debug_group = parser.add_argument_group('Debug', 'These options enable debugging output')
debug_group.add_argument("--debug", "-D", help="Verbose Debug info, levels 0-2", type=int,
default=0)
# Allow Controller modification and debug level sets.
config_group = parser.add_argument_group('Config', 'These options change how the configuration is generated.')
config_group.add_argument("--destroy", help="DESTROY SNMP agent that matches the description",
default=False, action="store_true")
config_group.add_argument("--destroyall", help="DESTROY all SNMP agents",
default=False, action="store_true")
config_group.add_argument("--get", help="Get SNMP agents",
default=False, action="store_true")
args = vars(parser.parse_args())
destroy = args['destroy']
destroyall = args['destroyall']
get = args['get']
############################################################################
# Instantiate API
############################################################################
cgx_session = cloudgenix.API(controller=args["controller"], ssl_verify=args["verify"])
# set debug
cgx_session.set_debug(args["debug"])
##
# ##########################################################################
# Draw Interactive login banner, run interactive login including args above.
############################################################################
print("{0} v{1} ({2})\n".format(SCRIPT_NAME, SCRIPT_VERSION, cgx_session.controller))
# login logic. Use cmdline if set, use AUTH_TOKEN next, finally user/pass from config file, then prompt.
# figure out user
if args["email"]:
user_email = args["email"]
elif CLOUDGENIX_USER:
user_email = CLOUDGENIX_USER
else:
user_email = None
# figure out password
if args["pass"]:
user_password = args["pass"]
elif CLOUDGENIX_PASSWORD:
user_password = <PASSWORD>
else:
user_password = <PASSWORD>
# check for token
if CLOUDGENIX_AUTH_TOKEN and not args["email"] and not args["pass"]:
cgx_session.interactive.use_token(CLOUDGENIX_AUTH_TOKEN)
if cgx_session.tenant_id is None:
print("AUTH_TOKEN login failure, please check token.")
sys.exit()
else:
while cgx_session.tenant_id is None:
cgx_session.interactive.login(user_email, user_password)
# clear after one failed login, force relogin.
if not cgx_session.tenant_id:
user_email = None
user_password = None
############################################################################
# End Login handling, begin script..
############################################################################
# get time now.
curtime_str = datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M-%S')
# create file-system friendly tenant str.
tenant_str = "".join(x for x in cgx_session.tenant_name if x.isalnum()).lower()
cgx = cgx_session
data = {"description":"Example","tags":None,"v2_config":{"community":"test","enabled":"true"},"v3_config":None}
#data = {"description":description,"tags":None,"v2_config":None,"v3_config":{"enabled":true,"users_access":[{"user_name":"Example-SNMP","engine_id":None,"security_level":"auth","auth_type":"md5","auth_phrase":None,"enc_type":"aes","enc_phrase":None}]}
if destroy == True:
deleteSNMP(cgx, data["description"])
elif destroyall == True:
deleteSNMPall(cgx)
elif get == True:
getSNMP(cgx)
else:
addSNMP(cgx, data)
# end of script, run logout to clear session.
cgx_session.get.logout()
if __name__ == "__main__":
go() | StarcoderdataPython |
25160 | <filename>mmdet/models/utils/__init__.py
from .conv_ws import conv_ws_2d, ConvWS2d
from .conv_module import build_conv_layer, ConvModule
from .norm import build_norm_layer
from .scale import Scale
from .weight_init import (
xavier_init,
normal_init,
uniform_init,
kaiming_init,
bias_init_with_prob,
)
__all__ = [
"conv_ws_2d",
"ConvWS2d",
"build_conv_layer",
"ConvModule",
"build_norm_layer",
"xavier_init",
"normal_init",
"uniform_init",
"kaiming_init",
"bias_init_with_prob",
"Scale",
]
| StarcoderdataPython |
12493 | #
#
# Copyright 2009 HPGL Team
#
# This file is part of HPGL (High Perfomance Geostatistics Library).
#
# HPGL is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, version 2 of the License.
#
# HPGL is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with HPGL. If not, see http://www.gnu.org/licenses/.
#
from geo import *
from sys import *
import os
import time
if not os.path.exists("results/"):
os.mkdir("results/")
if not os.path.exists("results/medium/"):
os.mkdir("results/medium/")
#grid = SugarboxGrid(166, 141, 225)
#prop = load_cont_property("test_data/BIG_HARD_DATA.INC", -99)
grid = SugarboxGrid(166, 141, 20)
prop = load_cont_property("test_data/BIG_SOFT_DATA_CON_160_141_20.INC",-99)
sgs_params = {
"prop": prop,
"grid": grid,
"seed": 3439275,
"kriging_type": "sk",
"radiuses": (20, 20, 20),
"max_neighbours": 12,
"covariance_type": covariance.exponential,
"ranges": (10, 10, 10),
"sill": 0.4
}
for x in xrange(1):
time1 = time.time()
psgs_result = sgs_simulation(workers_count = x+2, use_new_psgs = True, **sgs_params)
time2 = time.time()
print "Workers: %s" % (x+2)
print "Time: %s" % (time2 - time1)
write_property(psgs_result, "results/medium/PSGS_workers_1.inc", "PSIS_MEDIUM_workers_1", -99)
| StarcoderdataPython |
34464 | #!/usr/bin/env python3
# coding=utf-8
import os as os
import sys as sys
import io as io
import traceback as trb
import argparse as argp
import gzip as gz
import operator as op
import functools as fnt
def parse_command_line():
"""
:return:
"""
parser = argp.ArgumentParser()
parser.add_argument('--target', '-t', type=str, dest='target')
parser.add_argument('--query', '-q', type=str, dest='query')
parser.add_argument('--output', '-o', type=str, dest='output')
parser.add_argument('--switch', '-s', action='store_true', default=False, dest='switch',
help='Switch target and query in the output')
parser.add_argument('--filter', '-f', type=int, dest='filter', default=0,
help='Skip blocks smaller than this size. Default: 0')
args = parser.parse_args()
return args
def join_parts(switch, *args):
"""
:param switch:
:param args: tc, ts, te, tstr, bc, qc, qs, qe, qstr
:return:
"""
# had an annoying bug here - passed "(switch,)" instead of "switch"
# which always evaluated to True; but did not affect the one file
# where I used switch, so maybe introduced the error later...?
# anyway, just to be sure here, some manual type checking...
assert isinstance(switch, bool), 'Received wrong type for switch: {}'.format(switch)
if switch:
items = op.itemgetter(*(5, 6, 7, 3, # the new target / original query region
4, # block ID
0, 1, 2, 8)) # the new query / original target region
else:
items = op.itemgetter(*(0, 1, 2, 3, # the target region
4, # block ID
5, 6, 7, 8)) # the query region
joined = '\t'.join(items(args))
return joined
def main():
"""
:return:
"""
args = parse_command_line()
outbuffer = io.StringIO()
bufsize = 0
block_count = 0
block_ids = set()
build_block = fnt.partial(join_parts, *(args.switch, ))
with open(args.target, 'r') as trgfile:
with open(args.query, 'r') as qryfile:
while 1:
tb = trgfile.readline().strip()
qb = qryfile.readline().strip()
try:
tc, ts, te, tid = tb.split()
qc, qs, qe, qid = qb.split()
assert tid == qid,\
'Block mismatch for files {} and {}\nLines {} and {}'.format(os.path.basename(args.target),
os.path.basename(args.query),
tb, qb)
assert tid not in block_ids,\
'Block ID duplicate in files {} and {}\nLines {} and {}'.format(os.path.basename(args.target),
os.path.basename(args.query),
tb, qb)
tl = int(te) - int(ts)
ql = int(qe) - int(qs)
assert tl == ql,\
'Coverage mismatch for files {} and {}\nLines {} and {}'.format(os.path.basename(args.target),
os.path.basename(args.query),
tb, qb)
if tl < args.filter:
continue
block_count += 1
qstrand = qid.split('_')[-1]
#blockline = '\t'.join([tc, ts, te, '+', str(block_count),
# qc, qs, qe, qstrand])
blockline = build_block(tc, ts, te, '+',
str(block_count),
qc, qs, qe, qstrand)
bufsize += len(blockline)
outbuffer.write(blockline + '\n')
if bufsize > 100000:
with gz.open(args.output, 'at') as outfile:
_ = outfile.write(outbuffer.getvalue())
outfile.flush()
outbuffer = io.StringIO()
bufsize = 0
except ValueError:
break
with gz.open(args.output, 'at') as outfile:
_ = outfile.write(outbuffer.getvalue())
# head a corrupted gzip once - not sure about the cause... I/O interrupted???
outfile.flush()
return
if __name__ == '__main__':
try:
main()
except Exception as err:
trb.print_exc(file=sys.stderr)
sys.stderr.write('\nError: {}\n'.format(str(err)))
sys.exit(1)
else:
sys.exit(0)
| StarcoderdataPython |
17750 | <reponame>omBratteng/mottak
import pytest
from app.domain.models.Metadatafil import Metadatafil, MetadataType
from app.exceptions import InvalidContentType
from app.routers.mappers.metadafil import _get_file_content, metadatafil_mapper, _content_type2metadata_type
def test__content_type2metadata_type__success():
"""
GIVEN the string 'text/xml' as content_type
WHEN calling the method _content_type2metadata_type
THEN check that return value is MetadataType.XML_METS
"""
expected = MetadataType.XML_METS
actual = _content_type2metadata_type('text/xml')
assert actual == expected
def test__content_type2metadata_type__failure():
"""
GIVEN the string 'text' as content_type
WHEN calling the method _content_type2metadata_type
THEN check that a InvalidContentType Exception has been raised
"""
with pytest.raises(InvalidContentType):
_content_type2metadata_type('text')
def test__get_file_content(testfile, testfile_content):
"""
GIVEN a file with testdata where the content is an METS/XML file
WHEN calling the method _get_file_content
THEN check that the returned string is correct
"""
expected = testfile_content
actual = _get_file_content(testfile)
assert actual == expected
def test_metadatafil_mapper(testfile, testfile_content):
"""
GIVEN a file with testdata where the content is an METS/XML file
WHEN calling the method metadatafil_mapper
THEN check that the returned Metadatafil object is correct
"""
expected = Metadatafil(
filnavn="df53d1d8-39bf-4fea-a741-58d472664ce2.xml",
type_=MetadataType.XML_METS,
innhold=testfile_content)
actual = metadatafil_mapper(testfile)
assert vars(actual) == vars(expected)
| StarcoderdataPython |
3254726 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import requests
import sqlite3
import time
import random
import sqlite3
sqlite3.connect('weather.db')
#ID OW_TEMP OW_HUMIDITY OW_PRESSURE OW_COVER
def CREAT_TABLE():
conn = sqlite3.connect('weather.db')
print ("Opened database successfully")
conn.execute('''CREATE TABLE OPEN_WEATHER
(ID INT PRIMARY KEY NOT NULL,
TIMESTAMP INT NOT NULL,
TEMPERATURE INT NOT NULL,
HUMIDITY INT NOT NULL,
ATMOSPHERIC INT NOT NULL,
CLOUD_COVER TEXT);''')
print ("Table created successfully")
conn.close()
try:
CREAT_TABLE()
except:
pass
print("5")
Print('4') | StarcoderdataPython |
1626400 | import requests, json
import pandas as pd
from dataiku.connector import Connector
import importio_utils
class ImportIOConnector(Connector):
def __init__(self, config):
"""Make the only API call, which downloads the data"""
Connector.__init__(self, config)
if self.config['api_url'].startswith('https://api.import.io/'):
self.api_version = 'api'
elif self.config['api_url'].startswith('https://extraction.import.io/'):
self.api_version = 'extraction'
else:
raise Exception(
'It looks like this URL is not an API URL. URLs to call the API (and get a json response) start with "https://api.import.io" .')
print '[import.io connector] calling API...'
response = requests.get(self.config['api_url'])
print '[import.io connector] got response'
try:
self.json = response.json()
except Exception as e:
print e
print 'response was:\n', response.text
raise
def get_read_schema(self):
if self.api_version == 'api':
columns = importio_utils.convert_schema(self.json['outputProperties'])
return {"columns":columns}
else:
return None
def generate_rows(self, dataset_schema=None, dataset_partitioning=None, partition_id=None, records_limit = -1):
if self.api_version == 'api':
for row in self.json['results']:
yield row
else:
df = pd.DataFrame(self.json['extractorData']['data'][0]['group'])
for col in df.columns:
lengths = df[col].apply(lambda x: len(x) if type(x) == list else 0)
if lengths.max() == 1:
df[col] = df[col].apply(lambda x: x[0] if type(x) == list else {})
keys = df[col].apply(lambda x: x.keys())
for key in set([key for line in keys for key in line]): # drop duplicates
df[col + '_' + key] = df[col].apply(lambda x: x.get(key,''))
del df[col]
else:
df[col] = df[col].apply(json.dumps)
for row in df.to_dict(orient='records'):
yield row
| StarcoderdataPython |
1607389 | <filename>map.py
class Map:
width = 0
height = 0
max_lvl = 0
detail = None
def __init__(self):
self.width = 16
self.height = 12
self.max_lvl = 3
self.detail = [[[0 for col in range(self.width)]for row in range(self.height)] for x in range(self.max_lvl)]
self.detail[0] = [[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1],
[1,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1],
[1,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1],
[1,3,0,0,1,1,1,1,1,1,1,1,1,1,1,1],
[1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1],
[1,2,0,0,1,1,1,1,1,1,1,1,1,1,1,1],
[1,0,4,0,0,1,1,1,1,1,1,1,1,1,1,1],
[1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]]
self.detail[1] = [[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[1,1,1,1,1,0,0,0,0,0,1,1,1,1,1,1],
[1,1,1,1,1,0,0,0,0,0,1,1,1,1,1,1],
[1,1,1,1,1,0,0,0,1,1,1,1,1,1,1,1],
[1,1,1,1,1,1,2,3,1,1,0,0,0,0,0,1],
[1,1,1,1,1,0,0,1,1,1,0,2,0,1,1,1],
[1,1,1,1,1,2,0,0,4,0,0,0,0,1,1,1],
[1,1,0,0,0,0,0,0,0,0,0,0,0,1,1,1],
[1,1,1,1,0,0,0,0,0,0,0,0,0,1,1,1],
[1,1,1,1,1,1,3,0,1,1,1,1,0,1,1,1],
[1,1,1,1,1,1,1,1,1,1,1,1,3,1,1,1],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]]
self.detail[2] = [[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[1,1,1,1,1,1,1,3,0,0,0,1,0,1,1,1],
[1,1,1,0,0,0,0,0,1,0,0,1,0,1,1,1],
[1,1,1,0,1,0,1,0,0,0,1,0,0,1,1,1],
[1,1,1,0,3,0,0,1,0,0,1,0,0,1,1,1],
[1,1,1,0,1,0,0,1,2,1,1,2,0,1,1,1],
[1,1,1,0,0,0,1,0,0,0,0,0,0,1,1,1],
[1,1,1,1,0,0,1,0,4,0,0,0,1,1,1,1],
[1,1,1,0,0,1,1,0,0,0,1,1,1,1,1,1],
[1,1,1,0,0,0,2,0,0,0,0,3,0,1,1,1],
[1,1,1,0,0,0,0,0,0,0,0,0,0,1,1,1],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]]
def get_lvl(self, x):
return self.detial[x]
def get_max_lvl(self):
return self.max_lvl | StarcoderdataPython |
1691511 | class Config:
DEBUG = True
| StarcoderdataPython |
1711337 | """
Test wrapper on FMCalcs with friendlier data structures and illustrative
reinsurance functaionality.
"""
import argparse
import itertools
import json
import os
import shutil
import subprocess
from collections import namedtuple
from tabulate import tabulate
import pandas as pd
DEDUCTIBLE_AND_LIMIT_CALCRULE_ID = 1
FRANCHISE_DEDUCTIBLE_AND_LIMIT_CALCRULE_ID = 3
DEDUCTIBLE_ONLY_CALCRULE_ID = 12
DEDUCTIBLE_AS_A_CAP_ON_THE_RETENTION_OF_INPUT_LOSSES_CALCRULE_ID = 10
DEDUCTIBLE_AS_A_FLOOR_ON_THE_RETENTION_OF_INPUT_LOSSES_CALCRULE_ID = 11
DEDUCTIBLE_LIMIT_AND_SHARE_CALCRULE_ID = 2
DEDUCTIBLE_AND_LIMIT_AS_A_PROPORTION_OF_LOSS_CALCRUKE_ID = 5
DEDUCTIBLE_WITH_LIMIT_AS_A_PROPORTION_OF_LOSS_CALCRUKE_ID = 9
LIMIT_ONLY_CALCRULE_ID = 14
LIMIT_AS_A_PROPORTION_OF_LOSS_CALCRULE_ID = 15
DEDUCTIBLE_AS_A_PROPORTION_OF_LOSS_CALCRULE_ID = 16
NO_ALLOCATION_ALLOC_ID = 0
ALLOCATE_TO_ITEMS_BY_GUL_ALLOC_ID = 1
ALLOCATE_TO_ITEMS_BY_PREVIOUS_LEVEL_ALLOC_ID = 2
BUILDINGS_COVERAGE_TYPE_ID = 1
CONTENTS_COVERAGE_TYPE_ID = 2
TIME_COVERAGE_TYPE_ID = 3
PERIL_WIND = 1
GUL_INPUTS_FILES = [
'coverages',
'gulsummaryxref',
'items']
IL_INPUTS_FILES = [
'fm_policytc',
'fm_profile',
'fm_programme',
'fm_xref',
'fmsummaryxref']
OPTIONAL_INPUTS_FILES = [
'events']
CONVERSION_TOOLS = {
'coverages': 'coveragetobin',
'events': 'evetobin',
'fm_policytc': 'fmpolicytctobin',
'fm_profile': 'fmprofiletobin',
'fm_programme': 'fmprogrammetobin',
'fm_xref': 'fmxreftobin',
'fmsummaryxref': 'fmsummaryxreftobin',
'gulsummaryxref': 'gulsummaryxreftobin',
'items': "itemtobin"}
COVERAGE_TYPES = [
BUILDINGS_COVERAGE_TYPE_ID,
CONTENTS_COVERAGE_TYPE_ID,
TIME_COVERAGE_TYPE_ID]
PERILS = [PERIL_WIND]
"""
Ktools FM File Structure
---------------------------------------------------------------------------------------------------
items
item_id int Identifier of the exposure item
coverage_id int Identifier of the coverage
areaperil_id int Identifier of the locator and peril
vulnerability_id int Identifier of the vulnerability distribution
group_id int Identifier of the correlaton group
---------------------------------------------------------------------------------------------------
coverages
coverage_id int Identifier of the coveragegenerate
tiv float The total insured value of the coverage
---------------------------------------------------------------------------------------------------
gulsummaryxref
coverage_id int Identifier of the coverage
summary_id int Identifier of the summary level grouping
summaryset_id int Identifier of the summary set
---------------------------------------------------------------------------------------------------
fm_programme
from_agg_id int Oasis Financial Module from_agg_id
level_id int Oasis Financial Module level_id
to_agg_id int Oasis Financial Module to_agg_id
---------------------------------------------------------------------------------------------------
fm_profile
policytc_id int Oasis Financial Module policytc_id
calcrule_id int Oasis Financial Module calcrule_id
allocrule_id int Oasis Financial Module allocrule_id
ccy_id int Oasis Financial Module ccy_id
deductible float Deductible
limit float Limit
share_prop_of_lim float Share/participation as a proportion of limit
deductible_prop_of_loss float Deductible as a proportion of loss
limit_prop_of_loss float Limit as a proportion of loss
deductible_prop_of_tiv float Deductible as a proportion of TIV
limit_prop_of_tiv float Limit as a proportion of TIV
deductible_prop_of_limit float Deductible as a proportion of limit
---------------------------------------------------------------------------------------------------
fm_policytc
layer_id int Oasis Financial Module layer_id
level_id int Oasis Financial Module level_id
agg_id int Oasis Financial Module agg_id
policytc_id int Oasis Financial Module policytc_id
---------------------------------------------------------------------------------------------------
fmsummaryxref
output_id int Identifier of the coverage
summary_id int Identifier of the summary level group for one or more output losses
summaryset_id int Identifier of the summary set (0 to 9 inclusive)
---------------------------------------------------------------------------------------------------
fm_xref
output_id int Identifier of the output group of losses
agg_id int Identifier of the agg_id to output
layer_id int Identifier of the layer_id to output
---------------------------------------------------------------------------------------------------
"""
Item = namedtuple(
"Item", "item_id coverage_id areaperil_id vulnerability_id group_id")
Coverage = namedtuple(
"Coverage", "coverage_id tiv")
FmProgramme = namedtuple(
"FmProgramme", "from_agg_id level_id to_agg_id")
FmProfile = namedtuple(
"FmProfile", "policytc_id calcrule_id allocrule_id ccy_id deductible limit " +
"share_prop_of_lim deductible_prop_of_loss limit_prop_of_loss deductible_prop_of_tiv " +
"limit_prop_of_tiv deductible_prop_of_limit")
FmPolicyTc = namedtuple(
"FmPolicyTc", "layer_id level_id agg_id policytc_id")
GulSummaryXref = namedtuple(
"GulSummaryXref", "coverage_id summary_id summaryset_id")
FmSummaryXref = namedtuple(
"FmSummaryXref", "output_id summary_id summaryset_id")
FmXref = namedtuple(
"FmXref", "output_id agg_id layer_id")
XrefDescription = namedtuple(
"Description", ("xref_id policy_id location_id coverage_type_id peril_id tiv"))
GulRecord = namedtuple(
"GulRecord", "event_id item_id sidx loss")
class Location(object):
def __init__(
self,
location_id, area_peril_id, vulnerability_id,
buildings_value, contents_value, time_value):
self.location_id = location_id
self.area_peril_id = area_peril_id
self.vulnerability_id = vulnerability_id
self.buildings_value = buildings_value
self.contents_value = contents_value
self.time_value = time_value
def get_tiv(self, coverage_type_id):
switcher = {
BUILDINGS_COVERAGE_TYPE_ID: self.buildings_value,
CONTENTS_COVERAGE_TYPE_ID: self.contents_value,
TIME_COVERAGE_TYPE_ID: self.time_value
}
return switcher.get(coverage_type_id, 0)
class Policy(object):
def __init__(
self,
policy_id, site_limit, blanket_deductible, blanket_limit, locations):
self.policy_id = policy_id
self.site_limit = site_limit
self.blanket_deductible = blanket_deductible
self.blanket_limit = blanket_limit
self.locations = locations
class Treaty(object):
def __init__(
self):
pass
class CatXlTreaty(Treaty):
"""
Layer that applies to entire portfolio.
"""
def __init__(
self,
treaty_id, attachment, occurrence_limit, share):
self.treaty_id = treaty_id
self.attachment = attachment
self.occurrence_limit = occurrence_limit
self.share = share
class LocationFacTreaty(Treaty):
"""
Layer that applies to a specific location.
"""
def __init__(
self,
treaty_id, location_id, attachment, limit, share):
self.treaty_id = treaty_id
self.location_id = location_id
self.attachment = attachment
self.limit = limit
self.share = share
class DirectLayer(object):
"""
Set of direct policiies.
"""
def __init__(self, policies):
self.policies = policies
self.item_ids = list()
self.item_tivs = list()
self.coverages = pd.DataFrame()
self.items = pd.DataFrame()
self.fmprogrammes = pd.DataFrame()
self.fmprofiles = pd.DataFrame()
self.fm_policytcs = pd.DataFrame()
self.fm_xrefs = pd.DataFrame()
self.xref_descriptions = pd.DataFrame()
def generate_oasis_structures(self):
coverage_id = 0
item_id = 0
group_id = 0
policy_agg_id = 0
policytc_id = 0
coverages_list = list()
items_list = list()
fmprogrammes_list = list()
fmprofiles_list = list()
fm_policytcs_list = list()
fm_xrefs_list = list()
xref_descriptions_list = list()
site_agg_id = 0
for policy in self.policies:
policy_agg_id = policy_agg_id + 1
policytc_id = policytc_id + 1
fmprofiles_list.append(FmProfile(
policytc_id=policytc_id,
calcrule_id=DEDUCTIBLE_AND_LIMIT_CALCRULE_ID,
ccy_id=-1,
allocrule_id=ALLOCATE_TO_ITEMS_BY_GUL_ALLOC_ID,
deductible=policy.blanket_deductible,
limit=policy.blanket_limit,
share_prop_of_lim=0.0, # Not used
deductible_prop_of_loss=0.0, # Not used
limit_prop_of_loss=0.0, # Not used
deductible_prop_of_tiv=0.0, # Not used
limit_prop_of_tiv=0.0, # Not used
deductible_prop_of_limit=0.0 # Not used
))
fm_policytcs_list.append(FmPolicyTc(
layer_id=1,
level_id=2,
agg_id=policy_agg_id,
policytc_id=policytc_id
))
for location in policy.locations:
group_id = group_id + 1
site_agg_id = site_agg_id + 1
policytc_id = policytc_id + 1
fmprofiles_list.append(FmProfile(
policytc_id=policytc_id,
calcrule_id=DEDUCTIBLE_AND_LIMIT_CALCRULE_ID,
ccy_id=-1,
allocrule_id=ALLOCATE_TO_ITEMS_BY_GUL_ALLOC_ID,
deductible=0,
limit=policy.site_limit,
share_prop_of_lim=0.0, # Not used
deductible_prop_of_loss=0.0, # Not used
limit_prop_of_loss=0.0, # Not used
deductible_prop_of_tiv=0.0, # Not used
limit_prop_of_tiv=0.0, # Not used
deductible_prop_of_limit=0.0 # Not used
))
fm_policytcs_list.append(FmPolicyTc(
layer_id=1,
level_id=1,
agg_id=site_agg_id,
policytc_id=policytc_id
))
fmprogrammes_list.append(
FmProgramme(
from_agg_id=site_agg_id,
level_id=2,
to_agg_id=policy_agg_id
)
)
for coverage_type_id in COVERAGE_TYPES:
tiv = location.get_tiv(coverage_type_id)
if tiv > 0:
coverage_id = coverage_id + 1
coverages_list.append(
Coverage(
coverage_id=coverage_id,
tiv=tiv
))
for peril in PERILS:
item_id = item_id + 1
self.item_ids.append(item_id)
self.item_tivs.append(tiv)
items_list.append(
Item(
item_id=item_id,
coverage_id=coverage_id,
areaperil_id=location.area_peril_id,
vulnerability_id=location.vulnerability_id,
group_id=group_id
))
fmprogrammes_list.append(
FmProgramme(
from_agg_id=item_id,
level_id=1,
to_agg_id=site_agg_id
)
)
fm_xrefs_list.append(
FmXref(
output_id=item_id,
agg_id=item_id,
layer_id=1
))
xref_descriptions_list.append(XrefDescription(
xref_id=item_id,
location_id=location.location_id,
coverage_type_id=coverage_type_id,
peril_id=peril,
policy_id=policy.policy_id,
tiv=tiv
)
)
self.coverages = pd.DataFrame(coverages_list)
self.items = pd.DataFrame(items_list)
self.fmprogrammes = pd.DataFrame(fmprogrammes_list)
self.fmprofiles = pd.DataFrame(fmprofiles_list)
self.fm_policytcs = pd.DataFrame(fm_policytcs_list)
self.fm_xrefs = pd.DataFrame(fm_xrefs_list)
self.xref_descriptions = pd.DataFrame(xref_descriptions_list)
def write_oasis_files(self):
self.coverages.to_csv("coverages.csv", index=False)
self.items.to_csv("items.csv", index=False)
self.fmprogrammes.to_csv("fm_programme.csv", index=False)
self.fmprofiles.to_csv("fm_profile.csv", index=False)
self.fm_policytcs.to_csv("fm_policytc.csv", index=False)
self.fm_xrefs.to_csv("fm_xref.csv", index=False)
directory = "direct"
if os.path.exists(directory):
shutil.rmtree(directory)
os.mkdir(directory)
input_files = GUL_INPUTS_FILES + IL_INPUTS_FILES
for input_file in input_files:
conversion_tool = CONVERSION_TOOLS[input_file]
input_file_path = input_file + ".csv"
if not os.path.exists(input_file_path):
continue
output_file_path = os.path.join(directory, input_file + ".bin")
command = "{} < {} > {}".format(
conversion_tool, input_file_path, output_file_path)
proc = subprocess.Popen(command, shell=True)
proc.wait()
if proc.returncode != 0:
raise Exception(
"Failed to convert {}: {}".format(input_file_path, command))
def apply_fm(self, loss_percentage_of_tiv=1.0, net=False):
guls_list = list()
for item_id, tiv in itertools.izip(self.item_ids, self.item_tivs):
event_loss = loss_percentage_of_tiv * tiv
guls_list.append(
GulRecord(event_id=1, item_id=item_id, sidx=-1, loss=event_loss))
guls_list.append(
GulRecord(event_id=1, item_id=item_id, sidx=1, loss=event_loss))
guls_df = pd.DataFrame(guls_list)
guls_df.to_csv("guls.csv", index=False)
net_flag = ""
if net:
net_flag = "-n"
command = "gultobin -S 1 < guls.csv | fmcalc -p direct {} | tee ils.bin | fmtocsv > ils.csv".format(
net_flag)
proc = subprocess.Popen(command, shell=True)
proc.wait()
if proc.returncode != 0:
raise Exception("Failed to run fm")
losses_df = pd.read_csv("ils.csv")
losses_df.drop(losses_df[losses_df.sidx != 1].index, inplace=True)
del losses_df['sidx']
guls_df.drop(guls_df[guls_df.sidx != 1].index, inplace=True)
del guls_df['event_id']
del guls_df['sidx']
guls_df = pd.merge(
self.xref_descriptions,
guls_df, left_on=['xref_id'], right_on=['item_id'])
losses_df = pd.merge(
guls_df,
losses_df, left_on='xref_id', right_on='output_id',
suffixes=["_gul", "_il"])
del losses_df['event_id']
del losses_df['output_id']
del losses_df['xref_id']
del losses_df['item_id']
return losses_df
class ReinsuranceLayer(object):
def __init__(self, name, treaties, items, coverages, xref_descriptions):
self.name = name
self.treaties = treaties
self.coverages = items
self.items = coverages
self.xref_descriptions = xref_descriptions
self.item_ids = list()
self.item_tivs = list()
self.fmprogrammes = pd.DataFrame()
self.fmprofiles = pd.DataFrame()
self.fm_policytcs = pd.DataFrame()
self.fm_xrefs = pd.DataFrame()
def generate_oasis_structures(self):
level_1_agg_id = 0
level_2_agg_id = 0
level_3_agg_id = 0
treatytc_id = 0
treaty_layer_id = 0
fmprogrammes_list = list()
fmprofiles_list = list()
fm_policytcs_list = list()
fm_xrefs_list = list()
# treatytc_id = treatytc_id + 1
# passthroughtc_id = treatytc_id
# fmprofiles_list.append(FmProfile(
# policytc_id=passthroughtc_id,
# calcrule_id=DEDUCTIBLE_ONLY_CALCRULE_ID,
# ccy_id=-1,
# allocrule_id=ALLOCATE_TO_ITEMS_BY_PREVIOUS_LEVEL_ALLOC_ID,
# deductible=0.0,
# limit=0.0, # Not used
# share_prop_of_lim=0.0, # Not used
# deductible_prop_of_loss=0.0, # Not used
# limit_prop_of_loss=0.0, # Not used
# deductible_prop_of_tiv=0.0, # Not used
# limit_prop_of_tiv=0.0, # Not used
# deductible_prop_of_limit=0.0 # Not used
# ))
treatytc_id = treatytc_id + 1
nolosstc_id = treatytc_id
fmprofiles_list.append(FmProfile(
policytc_id=nolosstc_id,
calcrule_id=LIMIT_ONLY_CALCRULE_ID,
ccy_id=-1,
allocrule_id=ALLOCATE_TO_ITEMS_BY_PREVIOUS_LEVEL_ALLOC_ID,
deductible=0.0, # Not used
limit=0.0,
share_prop_of_lim=0.0, # Not used
deductible_prop_of_loss=0.0, # Not used
limit_prop_of_loss=0.0, # Not used
deductible_prop_of_tiv=0.0, # Not used
limit_prop_of_tiv=0.0, # Not used
deductible_prop_of_limit=0.0 # Not used
))
level_1_agg_id = level_1_agg_id + 1
noloss_level_1_agg_id = level_1_agg_id
fm_policytcs_list.append(FmPolicyTc(
layer_id=1,
level_id=1,
agg_id=noloss_level_1_agg_id,
policytc_id=nolosstc_id
))
for treaty in self.treaties:
level_1_agg_id = level_1_agg_id + 1
treatytc_id = treatytc_id + 1
treaty_layer_id = treaty_layer_id + 1
if type(treaty) == CatXlTreaty:
fmprofiles_list.append(FmProfile(
policytc_id=treatytc_id,
calcrule_id=DEDUCTIBLE_LIMIT_AND_SHARE_CALCRULE_ID,
ccy_id=-1,
allocrule_id=ALLOCATE_TO_ITEMS_BY_PREVIOUS_LEVEL_ALLOC_ID,
deductible=treaty.attachment,
limit=treaty.occurrence_limit,
share_prop_of_lim=treaty.share,
deductible_prop_of_loss=0.0, # Not used
limit_prop_of_loss=0.0, # Not used
deductible_prop_of_tiv=0.0, # Not used
limit_prop_of_tiv=0.0, # Not used
deductible_prop_of_limit=0.0 # Not used
))
elif type(treaty) == LocationFacTreaty:
fmprofiles_list.append(FmProfile(
policytc_id=treatytc_id,
calcrule_id=DEDUCTIBLE_LIMIT_AND_SHARE_CALCRULE_ID,
ccy_id=-1,
allocrule_id=ALLOCATE_TO_ITEMS_BY_PREVIOUS_LEVEL_ALLOC_ID,
deductible=treaty.attachment,
limit=treaty.limit,
share_prop_of_lim=treaty.share,
deductible_prop_of_loss=0.0, # Not used
limit_prop_of_loss=0.0, # Not used
deductible_prop_of_tiv=0.0, # Not used
limit_prop_of_tiv=0.0, # Not used
deductible_prop_of_limit=0.0 # Not used
))
fm_policytcs_list.append(FmPolicyTc(
layer_id=treaty_layer_id,
level_id=1,
agg_id=level_1_agg_id,
policytc_id=treatytc_id
))
if type(treaty) == CatXlTreaty:
for __, xref_description in self.xref_descriptions.iterrows():
fmprogrammes_list.append(
FmProgramme(
from_agg_id=xref_description.xref_id,
level_id=1,
to_agg_id=level_1_agg_id
)
)
elif type(treaty) == LocationFacTreaty:
for __, xref_description in self.xref_descriptions.iterrows():
if xref_description.location_id == treaty.location_id:
fmprogrammes_list.append(
FmProgramme(
from_agg_id=xref_description.xref_id,
level_id=1,
to_agg_id=level_1_agg_id
)
)
else:
fmprogrammes_list.append(
FmProgramme(
from_agg_id=xref_description.xref_id,
level_id=1,
to_agg_id=noloss_level_1_agg_id
)
)
print treaty_layer_id
for __, xref_description in self.xref_descriptions.iterrows():
for layer_id in range(1, treaty_layer_id + 1):
fm_xrefs_list.append(
FmXref(
output_id=xref_description.xref_id,
agg_id=xref_description.xref_id,
layer_id=layer_id
))
self.fmprogrammes = pd.DataFrame(fmprogrammes_list)
self.fmprofiles = pd.DataFrame(fmprofiles_list)
self.fm_policytcs = pd.DataFrame(fm_policytcs_list)
self.fm_xrefs = pd.DataFrame(fm_xrefs_list)
def write_oasis_files(self):
self.fmprogrammes.to_csv("fm_programme.csv", index=False)
self.fmprofiles.to_csv("fm_profile.csv", index=False)
self.fm_policytcs.to_csv("fm_policytc.csv", index=False)
self.fm_xrefs.to_csv("fm_xref.csv", index=False)
directory = self.name
if os.path.exists(directory):
shutil.rmtree(directory)
os.mkdir(directory)
input_files = GUL_INPUTS_FILES + IL_INPUTS_FILES
for input_file in input_files:
conversion_tool = CONVERSION_TOOLS[input_file]
input_file_path = input_file + ".csv"
if not os.path.exists(input_file_path):
continue
output_file_path = os.path.join(directory, input_file + ".bin")
command = "{} < {} > {}".format(
conversion_tool, input_file_path, output_file_path)
proc = subprocess.Popen(command, shell=True)
proc.wait()
if proc.returncode != 0:
raise Exception(
"Failed to convert {}: {}".format(input_file_path, command))
def apply_fm(self, input):
command = \
"fmcalc -p {0} -n < {1}.bin | tee {0}.bin | fmtocsv > {0}.csv".format(
self.name, input)
print command
proc = subprocess.Popen(command, shell=True)
proc.wait()
if proc.returncode != 0:
raise Exception("Failed to run fm")
losses_df = pd.read_csv("{}.csv".format(self.name))
inputs_df = pd.read_csv("{}.csv".format(input))
losses_df.drop(losses_df[losses_df.sidx != 1].index, inplace=True)
inputs_df.drop(inputs_df[inputs_df.sidx != 1].index, inplace=True)
losses_df = pd.merge(
inputs_df,
losses_df, left_on='output_id', right_on='output_id',
suffixes=('_pre', '_net'))
losses_df = pd.merge(
self.xref_descriptions,
losses_df, left_on='xref_id', right_on='output_id')
del losses_df['event_id_pre']
del losses_df['sidx_pre']
del losses_df['event_id_net']
del losses_df['sidx_net']
del losses_df['output_id']
del losses_df['xref_id']
return losses_df
class CustomJsonEncoder(json.JSONEncoder):
def default(self, o):
# Here you can serialize your object depending of its type
# or you can define a method in your class which serializes the object
if isinstance(o, (
Policy, Location, )):
return o.__dict__ # Or another method to serialize it
else:
return json.JSONEncoder.encode(self, o)
mapping = {
frozenset((
'policy_id',
'site_limit',
'blanket_deductible',
'blanket_limit',
'locations')): Policy,
frozenset((
'location_id',
'area_peril_id',
'vulnerability_id',
'buildings_value',
'contents_value',
'time_value')): Location,
frozenset((
'treaty_id',
'location_id',
'attachment',
'limit',
'share')): LocationFacTreaty,
frozenset((
'treaty_id',
'attachment',
'occurrence_limit',
'share')): CatXlTreaty
}
def class_mapper(d):
return mapping[frozenset(d.keys())](**d)
parser = argparse.ArgumentParser(
description='Run Oasis FM examples with reinsurance.')
parser.add_argument(
'-n', '--name', metavar='N', type=str, required=True,
help='The analysis name. All intermediate files will be "+ \
"saved in a labelled directory.')
parser.add_argument(
'-p', '--policy_json', metavar='N', type=str, required=True,
help='The JSON file containing the direct policies.')
parser.add_argument(
'-r1', '--ri_1_json', metavar='N', type=str, required=False,
help='The JSON file containing the first reinsurance inuring layer.')
parser.add_argument(
'-r2', '--ri_2_json', metavar='N', type=str, required=False,
help='The JSON file containing the second reinsurance inuring layer.')
args = parser.parse_args()
run_name = args.name
policy_json_file = args.policy_json
ri_1_json_file = args.ri_1_json
ri_2_json_file = args.ri_2_json
for filepath in [policy_json_file, ri_1_json_file, ri_2_json_file]:
if filepath is None:
continue
if not os.path.exists(policy_json_file):
print "Path does not exist: {}".format(policy_json_file)
exit(1)
if os.path.exists(run_name):
shutil.rmtree(run_name)
os.mkdir(run_name)
do_ri_1 = ri_1_json_file is not None
do_ri_2 = ri_2_json_file is not None
with open(policy_json_file) as json_data:
policies = json.load(json_data, object_hook=class_mapper)
if do_ri_1:
with open(ri_1_json_file) as json_data:
treaties1 = json.load(json_data, object_hook=class_mapper)
if do_ri_2:
with open(ri_2_json_file) as json_data:
treaties2 = json.load(json_data, object_hook=class_mapper)
cwd = os.getcwd()
try:
os.chdir(run_name)
direct_layer = DirectLayer(policies=policies)
direct_layer.generate_oasis_structures()
direct_layer.write_oasis_files()
losses_df = direct_layer.apply_fm(loss_percentage_of_tiv=0.1, net=False)
print "Direct layer loss"
print tabulate(losses_df, headers='keys', tablefmt='psql', floatfmt=".2f")
print ""
print ""
if do_ri_1:
reinsurance_layer = ReinsuranceLayer(
name="ri1",
treaties=treaties1,
items=direct_layer.items,
coverages=direct_layer.coverages,
xref_descriptions=direct_layer.xref_descriptions
)
reinsurance_layer.generate_oasis_structures()
reinsurance_layer.write_oasis_files()
treaty_losses_df = reinsurance_layer.apply_fm("ils")
print "Reinsurance - first inuring layer"
print tabulate(treaty_losses_df, headers='keys', tablefmt='psql', floatfmt=".2f")
print ""
print ""
if do_ri_2:
reinsurance_layer = ReinsuranceLayer(
name="ri2",
treaties=treaties2,
items=direct_layer.items,
coverages=direct_layer.coverages,
xref_descriptions=direct_layer.xref_descriptions
)
reinsurance_layer.generate_oasis_structures()
reinsurance_layer.write_oasis_files()
treaty_losses_df = reinsurance_layer.apply_fm("ri1")
print "Reinsurance - second inuring layer"
print tabulate(treaty_losses_df, headers='keys', tablefmt='psql', floatfmt=".2f")
print ""
print ""
finally:
os.chdir(cwd)
| StarcoderdataPython |
1716510 | from django.apps import AppConfig
class MetricsCollectorConfig(AppConfig):
name = 'metrics_collector'
| StarcoderdataPython |
1648664 | # Copyright 2020, <NAME>, mailto:<EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from random import choice, randint
from string import ascii_letters
def random_str(min_len, max_len, avoid=None, spaces=False):
""" Generate a random string of the specified length.
Args:
min_len: Minimum length of string
max_len: Maximum length of string
avoid : Optional list of strings to avoid
spaces : Whether to include spaces
Returns: Random ASCII string
"""
def rand_ascii(min_len, max_len):
return "".join([
choice(ascii_letters) for _x in range(randint(min_len, max_len))
])
while True:
r_str = ""
if spaces:
while len(r_str) < min_len:
r_str += rand_ascii(1, min(6, max_len - len(r_str)))
if len(r_str) < (min_len - 1): r_str += " "
else:
r_str = rand_ascii(min_len, max_len)
if not isinstance(avoid, list) or r_str not in avoid:
return r_str
| StarcoderdataPython |
3399889 | # Copyright (c) Alibaba Inc. All rights reserved.
import cv2
import numpy as np
import os
from scipy.spatial.distance import cdist
import torch
class Tracker(object):
""" Track the sparse keypoints via their descriptors.
"""
def __init__(self, max_length, matching_method = 'nearest',
cross_check = True, dist_thresh = 0.8, dim_descriptor = 128):
self.max_length = max_length
self.matching_method = matching_method
self.cross_check = cross_check
self.dim_descriptor = dim_descriptor
self.dist_thresh = dist_thresh
self.all_points = []
self.tracks_forward = []
self.tracks_backward = []
for n in range(self.max_length):
self.all_points.append(np.zeros([3, 0]))
self.tracks_forward.append(np.zeros([2,0], dtype=np.int))
self.tracks_backward.append(np.zeros([2,0], dtype=np.int))
self.prev_desc = np.zeros([dim_descriptor, 0])
self.prev_desc_index = np.zeros([2,0], dtype=np.int)
def track(self, points, desc):
""" Update all_points, tracks, descriptors using the newly results.
points - 3xN array of 2D point observations.
desc - dimxN array of dim dimensional descriptors.
"""
# Remove oldest points and descriptors.
self.all_points.pop(0)
self.tracks_forward.pop(0)
self.tracks_backward.pop(0)
reserve_desc_id = (self.prev_desc_index[0, :] != 0)
self.prev_desc = self.prev_desc[:, reserve_desc_id]
self.prev_desc_index = self.prev_desc_index[:, reserve_desc_id]
self.prev_desc_index[0, :] -= 1
# Update the tracks.
if points is None or desc is None:
# No matching points, update tracks.
self.all_points.append(np.zeros((3, 0)))
self.tracks_forward.append(np.zeros([2,0], dtype=np.int))
self.tracks_backward.append(np.zeros([2,0], dtype=np.int))
else:
correspondence_vector = self.find_correspondence(desc, self.prev_desc)
self.all_points.append(points)
self.tracks_forward.append(np.zeros([2,points.shape[1]], dtype=np.int) - 1)
self.tracks_backward.append(np.zeros([2,points.shape[1]], dtype=np.int) - 1)
reserve_desc_id = np.ones(self.prev_desc.shape[1], dtype=np.bool)
for i in range(correspondence_vector.shape[0]):
if correspondence_vector[i] > -1:
reserve_desc_id[correspondence_vector[i]] = False
i_frame_prev = self.prev_desc_index[0, correspondence_vector[i]]
i_point_prev = self.prev_desc_index[1, correspondence_vector[i]]
self.tracks_backward[-1][0, i] = (
i_frame_prev - len(self.all_points) + 1)
self.tracks_backward[-1][1, i] = i_point_prev
self.tracks_forward[i_frame_prev][0,i_point_prev] = (
len(self.all_points) - i_frame_prev - 1)
self.tracks_forward[i_frame_prev][1,i_point_prev] = i
self.prev_desc = self.prev_desc[:, reserve_desc_id]
self.prev_desc_index = self.prev_desc_index[:, reserve_desc_id]
self.prev_desc = np.concatenate((self.prev_desc, desc), axis=1)
for i in range(points.shape[1]):
self.prev_desc_index = np.concatenate(
(self.prev_desc_index, np.array(
[[len(self.all_points)-1], [i]], dtype=np.int)), axis=1)
return
def find_correspondence(self, desc_a, desc_b):
correspondence_vector = np.zeros(desc_a.shape[1], dtype=np.int) - 1
if desc_a.shape[1] == 0 or desc_b.shape[1] == 0:
return correspondence_vector
#distance_matrix = cdist(desc_a.T, desc_b.T)
distance_matrix = torch.cdist(
torch.tensor(desc_a.astype(np.float32).T).unsqueeze(0).cuda(),
torch.tensor(desc_b.astype(np.float32).T).unsqueeze(0).cuda()
).squeeze(0).cpu().numpy()
if self.matching_method == 'nearest':
min_index_a2b = np.argmin(distance_matrix, axis=1)
correspondence_vector[:] = min_index_a2b[:]
if self.cross_check:
min_index_b2a = np.argmin(distance_matrix, axis=0)
all_index_a = np.array([i for i in range(desc_a.shape[1])])
cross_check = (min_index_b2a[min_index_a2b[all_index_a]] == all_index_a)
correspondence_vector[:] = -1
correspondence_vector[cross_check] = min_index_a2b[cross_check]
if self.dist_thresh > 0:
min_distance_a2b = np.min(distance_matrix, axis=1)
min_distance_discard = (min_distance_a2b > self.dist_thresh)
correspondence_vector[min_distance_discard] = -1
else:
print("[Error] now only support nearest matching.")
return correspondence_vector
def draw_tracks(self, img_rgb):
assert(len(img_rgb.shape) == 3)
assert(img_rgb.shape[2] == 3)
for i_track in range(self.tracks_backward[-1].shape[1]):
if self.tracks_backward[-1][1, i_track] > -1:
point = (self.all_points[-1][0, i_track],
self.all_points[-1][1, i_track])
cv2.circle(img_rgb, point, 1, [0., 0., 255.], -1, lineType=16)
i_frame = len(self.all_points) - 1
i_point = i_track
track_length = 1
while self.tracks_backward[i_frame][1, i_point] > -1:
track_length += 1
i_frame_prev = i_frame + self.tracks_backward[i_frame][0, i_point]
i_point_prev = self.tracks_backward[i_frame][1, i_point]
if i_frame_prev < 0:
break
point_prev = (self.all_points[i_frame_prev][0, i_point_prev],
self.all_points[i_frame_prev][1, i_point_prev])
cv2.line(img_rgb, point, point_prev, [0., 64., 255.], thickness=1, lineType=16)
point = point_prev
i_frame = i_frame_prev
i_point = i_point_prev
return img_rgb
| StarcoderdataPython |
4836708 | """Developed by: <NAME> 2017
This Module contains the database class that handles all of the data gathering
and cleaning. It also contains functions that help us work with our data.
"""
import os
import pandas as pd
import numpy as np
import sklearn
from sklearn import preprocessing, model_selection
# from sklearn.model_selection import cross_validate
import random
import csv
import sys
def apply_RFECV_mask(mask, *args):
"""Applies a binary mask to a dataframe to remove columns. Binary mask is
created from recursive feature elimination and cross validation and
optimizes the generalization of the model
Args:
:param mask (string): text file containing the binary mask
:param *args (pandas dataframe): Dataframes containing columns to mask
Returns:
:new dataframes (pandas df): new dataframes with columns removed
"""
assert os.path.isfile(mask), "please pass a string specifying mask location"
dir_path = os.path.dirname(os.path.realpath(__file__))
mask = os.path.join(dir_path, mask)
# get mask data
updated_args = []
with open(mask, 'r') as f:
reader = csv.reader(f)
column_mask = list(reader)[0]
# apply mask to columns
column_indexes = []
for dataframe in args:
assert len(column_mask) == len(list(dataframe)), 'mask length {} does not match dataframe length {}'.format(len(column_mask), len(list(dataframe)))
for i, col in enumerate(column_mask):
if col.strip() == 'False':
column_indexes.append(i)
updated_args.append(dataframe.drop(dataframe.columns[column_indexes], axis=1))
return updated_args
class data_base(object):
"""Handles all data fetching and preparation. Attributes
can be assigned to csv files with the assignment operator. Typical use
case is to set raw_data to a csv file matching the format found in
Input files and then calling clean_raw_data(). This sets the clean_X_data,
y_enrichment and target values. From this point you can split the data
to train/test the model using our data. To predict your own data, make sure your excel sheet
matches the format in <Input_Files/database.csv>. Then you can
call db.predict = <your_csv_path>. The X_test and Y_test data will now
be your data. Just remove the stratified_data_split from the pipeline
because you will now not need to split any data.
Args:
None
Attributes:
:self._raw_data (Pandas Dataframe): Holds raw data in the same form as excel file. initialized after fetch_raw_data() is called
:self._clean_X_data (Pandas Dataframe): Holds cleaned and prepared X data.
:self._Y_enrichment (numpy array): Holds continous Y values
:self._X_train (Pandas Dataframe): Holds the X training data
:self._X_test (Pandas Dataframe): Holds the X test data
:self._Y_train (Pandas Dataframe): Holds the Y training data
:self._Y_test (Pandas Dataframe): Holds the T testing data
:self._test_accesion_numbers (list): holds the accesion_numbers
in the test set
"""
_ENRICHMENT_SPLIT_VALUE = 1 # enrichment threshold to classify as bound or unbound
categorical_data = ['Enzyme Commission Number', 'Particle Size', 'Particle Charge', 'Solvent Cysteine Concentration', 'Solvent NaCl Concentration']
columns_to_drop = ['Protein Length', 'Sequence', 'Enrichment', 'Accesion Number']
def __init__(self):
self._raw_data = None
self._clean_X_data = None
self._Y_enrichment = None
self._target = None
self._X_train = None
self._Y_train = None
self._X_test = None
self._Y_test = None
self._test_accesion_numbers = None
# If you want to use our model set this to your csv file using the assignment operator
self._predict = None
def clean_raw_data(self):
""" Cleans the raw data, drops useless columns, one hot encodes, and extracts
class information
Args, Returns: None
"""
self.clean_X_data = self.raw_data
# Categorize Interprot identifiers n hot encoding
self.clean_X_data = multi_label_encode(self.clean_X_data, 'Interprot')
# one hot encode categorical data
for category in self.categorical_data:
self.clean_X_data = one_hot_encode(self.clean_X_data, category)
# Grab some useful data before dropping from independent variables
self.Y_enrichment = self.clean_X_data['Enrichment']
accesion_numbers = self.clean_X_data['Accesion Number']
# drop useless columns
for column in self.columns_to_drop:
self.clean_X_data = self.clean_X_data.drop(column, 1)
self.clean_X_data = fill_nan(self.clean_X_data, 'Protein Abundance')
self.clean_X_data = normalize_and_reshape(self.clean_X_data, accesion_numbers)
self._target = classify(self.Y_enrichment, self._ENRICHMENT_SPLIT_VALUE) #enrichment or nsaf
self.X_train = self.clean_X_data
self.Y_train = self.target
def clean_user_test_data(self, user_data):
"""This method makes it easy for other people to make predictions
on their data.
called by assignment operator when users set db.predict = <path_to_csv>
Args:
:param user_data: users data they wish to predict
Returns:
None
"""
# Categorize Interprot identifiers n hot encoding
user_data = multi_label_encode(user_data, 'Interprot')
# one hot encode categorical data
for category in self.categorical_data:
user_data = one_hot_encode(user_data, category)
# Grab some useful data before dropping from independant variables
self.Y_test = user_data['Enrichment']
accesion_numbers = user_data['Accesion Number']
for column in self.columns_to_drop:
user_data = user_data.drop(column, 1)
user_data = fill_nan(user_data, 'Protein Abundance')
self.X_test = normalize_and_reshape(user_data, accesion_numbers)
self.Y_test = classify(self.Y_test, self._ENRICHMENT_SPLIT_VALUE) # enrichment or nsaf
# Get accession number
self.test_accesion_numbers = self.X_test['Accesion Number']
self.X_train = self.X_train.drop('Accesion Number', 1)
self.X_test = self.X_test.drop('Accesion Number', 1)
def stratified_data_split(self, test_size=0.0):
"""Randomized stratified shuffle split that sets training and testing data
Args:
:param test_size (float): The percentage of data to use for testing
Returns:
None
"""
assert 1.0 >= test_size >= 0.0, "test_size must be between 0 and 1"
assert self.predict is None, "Remove stratified_data_split() if using your own data"
self.X_train, self.X_test, self.Y_train, self.Y_test = model_selection.train_test_split(self.clean_X_data, self.target, test_size = test_size, stratify=self.target, random_state=int((random.random()*100)))
self.test_accesion_numbers = self.X_test['Accesion Number']
self.X_train = self.X_train.drop('Accesion Number', 1)
self.X_test = self.X_test.drop('Accesion Number', 1)
@staticmethod
def fetch_raw_data(enm_database):
"""Fetches enm-protein data from a csv file
called by assignment operator for db.raw_data
Args:
:param enm_database (str): path to csv database
Returns:
None
"""
assert os.path.isfile(enm_database), "please pass a string specifying database location"
dir_path = os.path.dirname(os.path.realpath(__file__))
enm_database = os.path.join(dir_path, enm_database)
try:
raw_data = pd.read_csv(enm_database)
except ValueError:
raise ValueError("File is not a valid csv")
return raw_data
@property
def X_train(self):
if self._X_train is None:
raise ValueError("Initialize X_train by calling stratified_data_split()")
else:
return self._X_train
@property
def X_test(self):
if self._X_test is None:
raise ValueError("Initialize X_test by calling stratified_data_split()")
else:
return self._X_test
@property
def Y_train(self):
if self._Y_train is None:
raise ValueError("Initialize Y_train by calling stratified_data_split()")
else:
return self._Y_train
@property
def Y_test(self):
return self._Y_test
@property
def raw_data(self):
if self._raw_data is None:
raise ValueError("Initialize raw data by setting raw_data=<path.csv>")
return self._raw_data
@property
def clean_X_data(self):
if self._clean_X_data is None:
raise ValueError("Initialize clean_X_data by calling clean_data()")
else:
return self._clean_X_data
@property
def Y_enrichment(self):
if self._Y_enrichment is None:
raise ValueError("Initialize Y_enrichment by calling clean_data()")
else:
return self._Y_enrichment
@property
def target(self):
if self._target is None:
raise ValueError("Initialize target by calling clean_data()")
else:
return self._target
@property
def test_accesion_numbers(self):
if self._test_accesion_numbers is None:
raise ValueError("Initialize test_accesion_numbers by calling stratified_data_split()")
else:
return self._test_accesion_numbers
@property
def predict(self):
return self._predict
@X_train.setter
def X_train(self, path):
if isinstance(path, str) and os.path.isfile(path):
# If trying to set to value from excel
self._X_train = fetch_raw_data(path)
else:
# If trying to set to already imported array
self._X_train = path
@X_test.setter
def X_test(self, path):
if isinstance(path, str) and os.path.isfile(path):
# If trying to set to value from excel
self._X_test = fetch_raw_data(path)
else:
# If trying to set to already imported array
self._X_test = path
@Y_train.setter
def Y_train(self, path):
if isinstance(path, str) and os.path.isfile(path):
# If trying to set to value from excel
self._Y_train = fetch_raw_data(path)
else:
# If trying to set to already imported array
self._Y_train = path
@Y_test.setter
def Y_test(self, path):
if isinstance(path, str) and os.path.isfile(path):
# If trying to set to value from excel
self._Y_test = fetch_raw_data(path)
else:
# If trying to set to already imported array
self._Y_test = path
@raw_data.setter
def raw_data(self, enm_database):
if isinstance(enm_database, str) and os.path.isfile(enm_database):
self._raw_data = self.fetch_raw_data(enm_database)
else:
self._raw_data = enm_database
@clean_X_data.setter
def clean_X_data(self, path):
if isinstance(path, str) and os.path.isfile(path):
# If trying to set to value from excel
self.clean_X_data = fetch_raw_data(path)
else:
# If trying to set to already imported array
self._clean_X_data = path
@Y_enrichment.setter
def Y_enrichment(self, path):
if isinstance(path, str) and os.path.isfile(path):
# If trying to set to value from excel
self._Y_enrichment = fetch_raw_data(path)
else:
# If trying to set to already imported array
self._Y_enrichment = path
@test_accesion_numbers.setter
def test_accesion_numbers(self, path):
if isinstance(path, str) and os.path.isfile(path):
# If trying to set to value from excel
self._Y_enrichment = fetch_raw_data(path)
else:
# If trying to set to already imported array
self._test_accesion_numbers = path
@predict.setter
def predict(self, path):
if os.path.isfile(path):
self._predict = self.fetch_raw_data(path)
self._predict = self.clean_user_test_data(self._predict)
else:
self._predict = path
def normalize_and_reshape(data, labels):
"""Normalize and reshape the data by columns while preserving labels
information
Args:
:param data (pandas df): The data to normalize
:param labels (pandas series): The column labels
Returns:
:param data (pandas df): normalized dataframe with preserved column labels
"""
norm_df = preprocessing.MinMaxScaler().fit_transform(data)
data = pd.DataFrame(norm_df,columns=list(data))
data = pd.concat([labels, data], axis=1)
data.reset_index(drop=True, inplace=True)
return data
def classify(data, cutoff):
"""
This function classifies continous data.
In our case we classify particles as bound or unbound
Args:
:param data (array): array of continous data
:param cutoff (float): cutoff value for classification
Returns:
:classified_data(np.array): classified data
"""
if not isinstance(data, np.ndarray):
try:
data = np.array(data)
except TypeError:
print("data could not be converted to type: numpy array")
classified_data = np.empty((len(data)))
for i, val in enumerate(data):
if float(val) >= float(cutoff):
classified_data[i] = 1
else:
classified_data[i] = 0
return classified_data
def fill_nan(data, column):
""" Fills nan values with mean in specified column.
Args:
:param data (pandas Dataframe): Dataframe containing column with nan values
:param column (String): specifying column to fill_nans
Returns:
:data (pandas Dataframe): Containing the column with filled nan values
"""
assert isinstance(data, pd.DataFrame), 'data argument needs to be pandas dataframe'
assert isinstance(column, str), 'Column must be a string'
count = 0
total = 0
for val in data[column]:
if not np.isnan(val):
count += 1
total += val
data[column] = data[column].fillna(total/count)
return data
def one_hot_encode(dataframe, category):
"""This function converts categorical variables into one hot vectors
Args:
:param dataframe (pandas Dataframe): Dataframe containing column to be encoded
:param category (String): specifying the column to encode
Returns:
:dataframe (Pandas Dataframe): With the specified column now encoded into a one
hot representation
"""
assert isinstance(dataframe, pd.DataFrame), 'data argument needs to be pandas dataframe'
dummy = pd.get_dummies(dataframe[category], prefix=category)
dataframe = pd.concat([dataframe, dummy], axis=1)
dataframe.drop(category, axis=1, inplace=True)
return dataframe
def multi_label_encode(dataframe, column):
"""This function is used as a multilabel encoder for the Interprot numbers in the database.
The interprot numbers are seperated by a semi-colon. We use a multi label encoder because
a protein can have several function domains. This injects a multi-hot style encoding into the database
Args:
:param dataframe (Pandas Dataframe): Dataframe containing protein data
:param column: (String): Name of column to be multi-label-encoded
Returns:
:new_dataframe (Pandas Dataframe): With new multi label columns
"""
dataframe.reset_index(drop=True, inplace=True)
interprot_identifiers = []
protein_ips = {}
for row, iprot in enumerate(dataframe[column].values):
ip_list = [i for i in iprot.split(';') if i != '']
protein_ips[row] = []
for ip in ip_list:
interprot_identifiers.append(ip)
protein_ips[row].append(ip)
categorical_df = pd.DataFrame(index=np.arange(dataframe.shape[0]), columns=set(interprot_identifiers))
categorical_df = categorical_df.fillna(0)
for key, val in protein_ips.items():
for v in val:
if v != 0:
# categorical_df.set_value(key, v, 1)
categorical_df.at[key, v] = 1
dataframe = dataframe.drop(column, 1)
new_dataframe = pd.concat([dataframe, categorical_df], axis=1)
return new_dataframe
def clean_print(obj):
"""
Prints the JSON in a clean format for all my
Biochemistry friends
Args:
:param obj (object): Any object you wish to print in readable format
Returns:
None
"""
if isinstance(obj, dict):
for key, val in obj.items():
if hasattr(val, '__iter__'):
print("\n" + key)
clean_print(val)
else:
print('%s : %s' % (key, val))
elif isinstance(obj, list):
for val in obj:
if hasattr(val, '__iter__'):
clean_print(val)
else:
print(val)
else:
if isinstance(obj, pd.DataFrame):
clean_print(obj.to_dict(orient='records'))
else:
print(str(obj) + "\n")
def to_excel(classification_information):
""" Prints model output to an excel file
Args:
:classification_information (numpy array): Information about results
>classification_information = {
'all_predict_proba' : np.empty([TOTAL_TESTED_PROTEINS], dtype=float),
'all_true_results' : np.empty([TOTAL_TESTED_PROTEINS], dtype=int),
'all_accesion_numbers' : np.empty([TOTAL_TESTED_PROTEINS], dtype=str),
'all_particle_information' : np.empty([2, TOTAL_TESTED_PROTEINS], dtype=int),
'all_solvent_information' : np.empty([3, TOTAL_TESTED_PROTEINS], dtype=int)
}
Returns:
None
"""
with open('prediction_probability.csv', 'w') as file:
file.write('Protein Accesion Number, Particle Type, Solvent Conditions, True Bound Value, Predicted Bound Value, Predicted Probability of Being Bound, Properly Classified\n')
for pred, true_val, protein, particle_s, particle_c, cys, salt8, salt3, in zip(classification_information['all_predict_proba'],
classification_information['all_true_results'],
classification_information['all_accesion_numbers'],
classification_information['all_particle_information'][0],
classification_information['all_particle_information'][1],
classification_information['all_solvent_information'][0],
classification_information['all_solvent_information'][1],
classification_information['all_solvent_information'][2]
):
bound = 'no'
predicted_bound = 'no'
properly_classified = 'no'
particle_charge = 'negative'
particle_size = '10nm'
solvent = '10 mM NaPi pH 7.4'
if int(round(pred)) == true_val:
properly_classified = 'yes'
if true_val == 1:
bound = 'yes'
if int(round(pred)) == 1:
predicted_bound = 'yes'
if particle_s == 0:
particle_size = '100nm'
if particle_c == 1:
particle_charge = 'positive'
if particle_size == '10nm' and particle_charge == 'positive':
particle = '(+) 10 nm AgNP'
if particle_size == '10nm' and particle_charge == 'negative':
particle = '(-) 10 nm AgNP'
if particle_size == '100nm' and particle_charge == 'negative':
particle = '(-) 100 nm AgNP'
if cys == 1:
solvent = '10 mM NaPi pH 7.4 + 0.1 mM cys'
if salt8 == 1:
solvent = '10 mM NaPi pH 7.4 + 0.8 mM NaCl'
if salt3 == 1:
solvent = '10 mM NaPi pH 7.4 + 3.0 mM NaCl'
file.write('{}, {}, {}, {}, {}, {}, {}\n'.format(protein, particle, solvent, bound, predicted_bound,round(pred, 2), properly_classified))
def hold_in_memory(classification_information, metrics, iterations, test_size):
"""Holds classification data in memory to be exported to excel
Args:
:classification_information (dict): container for all the classification_information from all the runs
:metrics (tuple): information from the current test set to add to classification_information
:iterations (int): The current test iterations
:test_size (int): The amount of values in the current test set
Returns:
None
"""
i = iterations
TEST_SIZE = test_size # 10% of training data is used for testing ceil(10% of 3012)=302
PARTICLE_SIZE = 0
PARTICLE_CHARGE = 1
SOLVENT_CYS = 0
SOLVENT_SALT_08 = 1
SOLVENT_SALT_3 = 2
# Information is placed into numpy arrays as blocks
classification_information['all_predict_proba'][i*TEST_SIZE:(i*TEST_SIZE)+TEST_SIZE] = metrics[0]
classification_information['all_true_results'][i*TEST_SIZE:(i*TEST_SIZE)+TEST_SIZE] = metrics[1]
classification_information['all_accesion_numbers'][i*TEST_SIZE:(i*TEST_SIZE)+TEST_SIZE] = metrics[2]
classification_information['all_particle_information'][PARTICLE_CHARGE][i*TEST_SIZE:(i*TEST_SIZE)+TEST_SIZE] = metrics[3]['Particle Charge_1']
classification_information['all_particle_information'][PARTICLE_SIZE][i*TEST_SIZE:(i*TEST_SIZE)+TEST_SIZE] = metrics[3]['Particle Size_10']
classification_information['all_solvent_information'][SOLVENT_CYS][i*TEST_SIZE:(i*TEST_SIZE)+TEST_SIZE] = metrics[3]['Solvent Cysteine Concentration_0.1']
classification_information['all_solvent_information'][SOLVENT_SALT_08][i*TEST_SIZE:(i*TEST_SIZE)+TEST_SIZE] = metrics[3]['Solvent NaCl Concentration_0.8']
classification_information['all_solvent_information'][SOLVENT_SALT_3][i*TEST_SIZE:(i*TEST_SIZE)+TEST_SIZE] = metrics[3]['Solvent NaCl Concentration_3.0']
if __name__ == "__main__":
db = data_base()
db.clean_data() | StarcoderdataPython |
1742546 | <filename>tools/reindex/reindex.py
import argparse
import time
import reindex_helpers
from config import Config
from elasticsearch import Elasticsearch
from elasticsearch_dsl import Search
#load the args & config
parser = argparse.ArgumentParser("Run the reindex script")
parser.add_argument("--sourceindex", "-s", required=True, help="Source index to copy data from.")
parser.add_argument("--configfile", "-c", default="config.json", required=False, help="Path to the config file to use.")
parser.add_argument("--reqpersec", "-r", type=int, default=-1, required=False, help="Num requests per second to throttle the reindex task.")
parser.add_argument("--slices", default="auto", required=False, help="Num of parallel slices to split the reindex task into. 'auto' lets Elasticsearch choose.")
parser.add_argument("--verify", "-v", action="store_true", required=False, help="Verify the diff between source and target indices instead of running the reindex")
args = parser.parse_args()
print()
print("Running with arguments:")
print(args)
print()
config = Config.load(args.configfile)
print("Using '{0}' as source index and '{1}' as target index.".format(args.sourceindex, config.elasticsearch_index_name))
print()
es = Elasticsearch(hosts=[config.elasticsearch_host],
verify_certs=config.elasticsearch_verify_certs,
timeout=config.elasticsearch_timeout_secs)
if args.verify:
#verifying the data
print ("Verifying data...")
print()
source_search = Search(using=es, index=args.sourceindex)
source_search_query = { "match_all": {}} if config.elasticsearch_query is None else config.elasticsearch_query
source_search = source_search.update_from_dict({
"_source": False,
"query" : source_search_query
})
scan_size = 10000
source_search = source_search.params(size=scan_size)
source_total = 0
target_total = 0
source_ids = []
for hit in source_search.scan():
source_total += 1
source_ids.append(hit.meta["id"])
if source_total % scan_size == 0:
target_total += reindex_helpers.verify_target_docs(es, config, source_ids, scan_size)
source_ids.clear()
print("Verified {0} source documents and {1} target documents...".format(source_total, target_total))
time.sleep(0.01)
if len(source_ids) > 0:
target_total += reindex_helpers.verify_target_docs(es, config, source_ids, scan_size)
print()
print("Verify complete. Source total: {0}. Target total: {1}. (should be equal after successful reindex)"
.format(source_total, target_total))
else:
#reindexing the data
print("Reindexing data...")
print()
reindex_body = {
"source": {
"index": args.sourceindex
},
"dest": {
"index": config.elasticsearch_index_name
}
}
if config.elasticsearch_query is not None:
reindex_body["source"]["query"] = config.elasticsearch_query
res = es.reindex(reindex_body, wait_for_completion=False, refresh=True, requests_per_second=args.reqpersec, slices=args.slices)
task_id = res["task"]
while True:
task = es.tasks.get(task_id)
status = task["task"]["status"]
total = status["total"]
created = status["created"]
updated = status["updated"]
deleted = status["deleted"]
processed = created + updated + deleted
batches = status["batches"]
print("{0} batches complete - processed {1}/{2} documents.".format(batches, processed, total))
if task["completed"] == True:
failures = task["response"]["failures"]
print()
print("Reindexing complete with {0} failure(s).".format(len(failures)))
print("Created: {0}; Updated: {1}; Deleted: {2}; Total: {3}".format(created, updated, deleted, processed))
if (len(failures) > 0):
print()
print("The following failures occurred:")
print()
for fail in failures:
print(fail)
break
time.sleep(5)
| StarcoderdataPython |
1733279 | <gh_stars>0
def process_media_attribute(attribute, resp, val):
if val:
if val.startswith('jr://'):
pass
elif val.startswith('/file/'):
val = 'jr:/' + val
elif val.startswith('file/'):
val = 'jr://' + val
elif val.startswith('/'):
val = 'jr://file' + val
else:
val = 'jr://file/' + val
resp['corrections'][attribute] = val
else:
val = None
return val
def handle_media_edits(request, item, should_edit, resp, lang):
if 'corrections' not in resp:
resp['corrections'] = {}
for attribute in ('media_image', 'media_audio'):
if should_edit(attribute):
media_path = process_media_attribute(attribute, resp, request.POST.get(attribute))
item._set_media(attribute, lang, media_path)
| StarcoderdataPython |
68426 | import pytest
from reversion.models import Version
from reversion.revisions import create_revision
from djmoney.money import Money
from .testapp.models import RevisionedModel
@pytest.mark.django_db
def test_that_can_safely_restore_deleted_object():
amount = Money(100, "GHS")
with create_revision():
instance = RevisionedModel.objects.create(amount=amount)
instance.delete()
version = Version.objects.get_deleted(RevisionedModel)[0]
version.revision.revert()
instance = RevisionedModel.objects.get(pk=1)
assert instance.amount == amount
| StarcoderdataPython |
3293062 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
},
'targets': [
{
# GN version: //ui/base:unittests
'target_name': 'ui_base_unittests',
'type': '<(gtest_target_type)',
'dependencies': [
'../../base/base.gyp:base',
'../../base/base.gyp:test_support_base',
'../../net/net.gyp:net',
'../../skia/skia.gyp:skia',
'../../testing/gmock.gyp:gmock',
'../../testing/gtest.gyp:gtest',
'../../third_party/icu/icu.gyp:icui18n',
'../../third_party/icu/icu.gyp:icuuc',
'../../url/url.gyp:url_lib',
'../events/events.gyp:events_base',
'../gfx/gfx.gyp:gfx_test_support',
'../resources/ui_resources.gyp:ui_resources',
'../resources/ui_resources.gyp:ui_test_pak',
'../strings/ui_strings.gyp:ui_strings',
'ime/ui_base_ime.gyp:ui_base_ime',
'ui_base.gyp:ui_base',
'ui_base.gyp:ui_base_test_support',
],
# iOS uses a small subset of ui. common_sources are the only files that
# are built on iOS.
'common_sources' : [
# Note: file list duplicated in GN build.
'ios/cru_context_menu_controller_unittest.mm',
'l10n/l10n_util_mac_unittest.mm',
'l10n/l10n_util_unittest.cc',
'l10n/l10n_util_win_unittest.cc',
'l10n/time_format_unittest.cc',
'layout_unittest.cc',
'models/tree_node_iterator_unittest.cc',
'resource/data_pack_literal.cc',
'resource/data_pack_unittest.cc',
'resource/resource_bundle_unittest.cc',
'resource/resource_bundle_mac_unittest.mm',
'template_expressions_unittest.cc',
'test/run_all_unittests.cc',
],
'all_sources': [
# Note: file list duplicated in GN build.
'<@(_common_sources)',
'accelerators/accelerator_manager_unittest.cc',
'accelerators/menu_label_accelerator_util_linux_unittest.cc',
'clipboard/custom_data_helper_unittest.cc',
'cocoa/base_view_unittest.mm',
'cocoa/cocoa_base_utils_unittest.mm',
'cocoa/constrained_window/constrained_window_animation_unittest.mm',
'cocoa/controls/blue_label_button_unittest.mm',
'cocoa/controls/hover_image_menu_button_unittest.mm',
'cocoa/controls/hyperlink_button_cell_unittest.mm',
'cocoa/controls/hyperlink_text_view_unittest.mm',
'cocoa/focus_tracker_unittest.mm',
'cocoa/fullscreen_window_manager_unittest.mm',
'cocoa/hover_image_button_unittest.mm',
'cocoa/menu_controller_unittest.mm',
'cocoa/nscolor_additions_unittest.mm',
'cocoa/nsgraphics_context_additions_unittest.mm',
'cocoa/nsview_additions_unittest.mm',
'cocoa/three_part_image_unittest.mm',
'cocoa/tracking_area_unittest.mm',
'dragdrop/os_exchange_data_provider_aurax11_unittest.cc',
'ime/candidate_window_unittest.cc',
'ime/composition_text_unittest.cc',
'ime/chromeos/character_composer_unittest.cc',
'ime/composition_text_util_pango_unittest.cc',
'ime/input_method_base_unittest.cc',
'ime/input_method_chromeos_unittest.cc',
'ime/remote_input_method_win_unittest.cc',
'ime/win/imm32_manager_unittest.cc',
'ime/win/tsf_input_scope_unittest.cc',
'models/list_model_unittest.cc',
'models/list_selection_model_unittest.cc',
'models/tree_node_model_unittest.cc',
'resource/material_design/material_design_controller_unittest.cc',
'test/data/resource.h',
'test/test_clipboard_unittest.cc',
'test/scoped_fake_nswindow_fullscreen_unittest.mm',
'text/bytes_formatting_unittest.cc',
'touch/selection_bound_unittest.cc',
'user_activity/user_activity_detector_unittest.cc',
'view_prop_unittest.cc',
'webui/web_ui_util_unittest.cc',
'x/selection_requestor_unittest.cc',
],
'include_dirs': [
'../..',
],
'conditions': [
['OS!="ios"', {
'sources' : [ '<@(_all_sources)' ],
}, { # OS=="ios"
'sources' : [ '<@(_common_sources)' ],
# The ResourceBundle unittest expects a locale.pak file to exist in
# the bundle for English-US. Copy it in from where it was generated
# by ui_resources.gyp:ui_test_pak.
'mac_bundle_resources': [
'<(PRODUCT_DIR)/ui/en.lproj/locale.pak',
],
'actions': [
{
'action_name': 'copy_test_data',
'variables': {
'test_data_files': [
'test/data',
],
'test_data_prefix' : 'ui/base',
},
'includes': [ '../../build/copy_test_data_ios.gypi' ],
},
],
}],
['OS == "linux" and use_aura==1 and chromeos==0', {
'sources': [
'ime/input_method_auralinux_unittest.cc',
]
}],
['OS == "win"', {
'sources': [
'dragdrop/os_exchange_data_win_unittest.cc',
'win/hwnd_subclass_unittest.cc',
'win/open_file_name_win_unittest.cc',
],
'msvs_settings': {
'VCLinkerTool': {
'DelayLoadDLLs': [
'd2d1.dll',
'd3d10_1.dll',
],
'AdditionalDependencies': [
'd2d1.lib',
'd3d10_1.lib',
],
},
},
'link_settings': {
'libraries': [
'-limm32.lib',
'-loleacc.lib',
],
},
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [ 4267, ],
}],
['OS == "android"', {
'dependencies': [
'../../testing/android/native_test.gyp:native_test_native_code',
],
'sources!': [
'touch/selection_bound_unittest.cc',
'user_activity/user_activity_detector_unittest.cc',
],
}],
['use_pango == 1', {
'dependencies': [
'../../build/linux/system.gyp:pangocairo',
],
'conditions': [
['use_allocator!="none"', {
'dependencies': [
'../../base/allocator/allocator.gyp:allocator',
],
}],
],
}],
['use_x11==1', {
'dependencies': [
'../../build/linux/system.gyp:x11',
'../../tools/xdisplaycheck/xdisplaycheck.gyp:xdisplaycheck',
'../events/platform/x11/x11_events_platform.gyp:x11_events_platform',
'../gfx/x/gfx_x11.gyp:gfx_x11',
],
}],
['OS!="win" or use_aura==0', {
'sources!': [
'view_prop_unittest.cc',
],
}],
['use_x11==1 and use_aura==1', {
'sources': [
'cursor/cursor_loader_x11_unittest.cc',
],
}],
['OS=="mac"', {
'dependencies': [
'../../third_party/mozilla/mozilla.gyp:mozilla',
'../events/events.gyp:events_test_support',
'ui_base_tests_bundle',
],
'conditions': [
['component=="static_library"', {
# Needed for mozilla.gyp.
'xcode_settings': {'OTHER_LDFLAGS': ['-Wl,-ObjC']},
}],
],
}],
['use_aura==1 or toolkit_views==1', {
'sources': [
'dragdrop/os_exchange_data_unittest.cc',
],
'dependencies': [
'../events/events.gyp:events',
'../events/events.gyp:events_base',
'../events/events.gyp:events_test_support',
'../events/platform/events_platform.gyp:events_platform',
],
}],
['chromeos==1', {
'dependencies': [
'../../chromeos/chromeos.gyp:chromeos',
],
'sources!': [
'dragdrop/os_exchange_data_provider_aurax11_unittest.cc',
'x/selection_requestor_unittest.cc',
],
}],
['use_x11==0', {
'sources!': [
'ime/composition_text_util_pango_unittest.cc',
'ime/input_method_chromeos_unittest.cc',
],
}],
],
'target_conditions': [
['OS == "ios"', {
'sources/': [
# Pull in specific Mac files for iOS (which have been filtered out
# by file name rules).
['include', '^l10n/l10n_util_mac_unittest\\.mm$'],
],
}],
],
},
],
'conditions': [
# Mac target to build a test Framework bundle to mock out resource loading.
['OS == "mac"', {
'targets': [
{
'target_name': 'ui_base_tests_bundle',
'type': 'shared_library',
'dependencies': [
'../resources/ui_resources.gyp:ui_test_pak',
],
'includes': [ 'ui_base_tests_bundle.gypi' ],
# ui_base_tests_bundle doesn't actually contain a shared library and
# therefore should not depend on sanitizer_options or any other
# libraries. Adding such a dependency will result in creating a
# broken shared library within the bundle.
'conditions': [
['use_sanitizer_options==1', {
'dependencies!': [
'../../build/sanitizers/sanitizers.gyp:sanitizer_options',
],
}],
],
},
],
}],
['OS == "android"', {
'targets': [
{
'target_name': 'ui_base_unittests_apk',
'type': 'none',
'dependencies': [
# TODO(tfarina): This is a layer violation and should be removed.
# crbug.com/176960
# For now this is here as a temporary band-aid to fix the clobber
# issue we are seeing when running this target on Android.
# crbug.com/374490
'../../chrome/chrome_resources.gyp:packed_resources',
'ui_base_unittests',
],
'variables': {
'test_suite_name': 'ui_base_unittests',
'isolate_file': 'ui_base_tests.isolate',
},
'includes': [ '../../build/apk_test.gypi' ],
},
],
'conditions': [
['test_isolation_mode != "noop"',
{
'targets': [
{
'target_name': 'ui_base_unittests_apk_run',
'type': 'none',
'dependencies': [
'ui_base_unittests_apk',
],
'includes': [
'../../build/isolate.gypi',
],
'sources': [
'ui_base_unittests_apk.isolate',
],
},
],
},
],
],
}],
['test_isolation_mode != "noop" and OS != "android"', {
'targets': [
{
'target_name': 'ui_base_unittests_run',
'type': 'none',
'dependencies': [
'ui_base_unittests',
],
'includes': [
'../../build/isolate.gypi',
],
'sources': [
'ui_base_unittests.isolate',
],
'conditions': [
['use_x11 == 1', {
'dependencies': [
'../../tools/xdisplaycheck/xdisplaycheck.gyp:xdisplaycheck',
],
}],
],
},
],
}],
],
}
| StarcoderdataPython |
3278680 | <filename>tests/test_validation.py<gh_stars>0
def test_get_recommendation(client, auth):
auth.login()
registration = {"username": "Niklas35",
"password": "<PASSWORD>",
"email": "<EMAIL>"}
rv1 = client.post('/register', json=registration)
assert rv1.data == b'"Successfully registered and logged in."\n'
def test_validation_recommendation_empty(client, auth):
auth.login()
payload = {}
rv = client.post('/recommendation', json=payload)
assert rv.status_code == 401
def test_validation_recommendation_first_name(client, auth):
auth.register()
payload = {
"first_name": "Niklas",
"address": "Musterweg 21",
"occupation": "Employed",
"email_address": "<EMAIL>",
"children": False
}
rv = client.post('/recommendation', json=payload)
assert rv.status_code == 200
payload = {
"first_name": 1,
"address": "Musterweg 21",
"occupation": "Employed",
"email_address": "<EMAIL>",
"children": False
}
rv = client.post('/recommendation', json=payload)
assert rv.status_code == 422
assert rv.data == b'{"error":"Expected a str for \'first_name\'."}\n'
payload = {
"address": "Musterweg 21",
"occupation": "Employed",
"email_address": "<EMAIL>",
"children": False
}
rv = client.post('/recommendation', json=payload)
assert rv.status_code == 422
assert rv.data == b'{"error":"Field \'first_name\' is missing."}\n'
def test_validation_recommendation_address(client, auth):
auth.register()
payload = {
"first_name": "Niklas",
"address": 1,
"occupation": "Employed",
"email_address": "<EMAIL>",
"children": False
}
rv = client.post('/recommendation', json=payload)
assert rv.status_code == 422
assert rv.data == b'{"error":"Expected a str for \'address\'."}\n'
payload = {
"first_name": "Niklas",
"occupation": "Employed",
"email_address": "<EMAIL>",
"children": False
}
rv = client.post('/recommendation', json=payload)
assert rv.status_code == 422
assert rv.data == b'{"error":"Field \'address\' is missing."}\n'
def test_validation_recommendation_occupation(client, auth):
auth.register()
payload = {
"first_name": "Niklas",
"address": "Some street",
"occupation": "Wall Street Banker",
"email_address": "<EMAIL>",
"children": False
}
rv = client.post('/recommendation', json=payload)
assert rv.status_code == 422
def test_validation_recommendation_email(client, auth):
auth.register()
payload = {
"first_name": "Niklas",
"address": "Some street",
"occupation": "Wall Street Banker",
"email_address": "yolo",
"children": False
}
rv = client.post('/recommendation', json=payload)
assert rv.status_code == 422
def test_validation_recommendation_children(client, auth):
auth.register()
payload = {
"first_name": "Niklas",
"address": "Some street",
"occupation": "Student",
"email_address": "<EMAIL>",
"children": True
}
rv = client.post('/recommendation', json=payload)
assert rv.status_code == 422
def test_validation_recommendation_children2(client, auth):
auth.register()
payload = {
"first_name": "Niklas",
"address": "Some street",
"occupation": "Student",
"email_address": "<EMAIL>",
"children": True,
"children": 0
}
rv = client.post('/recommendation', json=payload)
assert rv.status_code == 422
def test_validation_recommendation_children3(client, auth):
auth.register()
payload = {
"first_name": "Niklas",
"address": "Some street",
"occupation": "Student",
"email_address": "<EMAIL>",
"children": False,
"children": 5
}
rv = client.post('/recommendation', json=payload)
assert rv.status_code == 422
| StarcoderdataPython |
3322637 |
# coding: utf-8
# In[29]:
get_ipython().magic('matplotlib inline')
# In[30]:
import os
home_folder = os.path.expanduser("~")
print(home_folder)
# In[31]:
# Change this to the location of your dataset
#data_folder = os.path.join(home_folder, "Data", "Ionosphere")
#data_filename = os.path.join(data_folder, "ionosphere.data")
data_filename = 'data\ionosphere.data'
print(data_filename)
# In[32]:
import csv
import numpy as np
# Size taken from the dataset and is known
X = np.zeros((351, 34), dtype='float')
y = np.zeros((351,), dtype='bool')
with open(data_filename, 'r') as input_file:
reader = csv.reader(input_file)
for i, row in enumerate(reader):
# Get the data, converting each item to a float
data = [float(datum) for datum in row[:-1]]
# Set the appropriate row in our dataset
X[i] = data
# 1 if the class is 'g', 0 otherwise
y[i] = row[-1] == 'g'
# In[33]:
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=14)
print("There are {} samples in the training dataset".format(X_train.shape[0]))
print("There are {} samples in the testing dataset".format(X_test.shape[0]))
print("Each sample has {} features".format(X_train.shape[1]))
# In[34]:
from sklearn.neighbors import KNeighborsClassifier
estimator = KNeighborsClassifier()
# In[35]:
estimator.fit(X_train, y_train)
# In[38]:
y_predicted = estimator.predict(X_test)
accuracy = np.mean(y_test == y_predicted) * 100
print("The accuracy is {0:.1f}%".format(accuracy))
# In[18]:
from sklearn.cross_validation import cross_val_score
# In[39]:
'''
Use cross validation: get one final score
i) cross_val_score
ii) np.mean(cv_score)
'''
scores = cross_val_score(estimator, X, y, scoring='accuracy') #default cv=3
print(scores.shape)
average_accuracy = np.mean(scores) * 100
print("The average accuracy is {0:.1f}%".format(average_accuracy))
# In[44]:
avg_scores = []
all_scores = []
parameter_values = list(range(1, 21)) # Including 20
for n_neighbors in parameter_values:
estimator = KNeighborsClassifier(n_neighbors=n_neighbors)
scores = cross_val_score(estimator, X, y, scoring='accuracy')
avg_scores.append(np.mean(scores))
all_scores.append(scores)
print((avg_scores))
print((all_scores))
# In[21]:
get_ipython().magic('pinfo plt.plot')
# In[22]:
from matplotlib import pyplot as plt
plt.figure(figsize=(32,20))
plt.plot(parameter_values, avg_scores, '-o', linewidth=5, markersize=24)
#plt.axis([0, max(parameter_values), 0, 1.0])
# In[23]:
# loop two lists simultaneously
for parameter, scores in zip(parameter_values, all_scores):
n_scores = len(scores)
plt.plot([parameter] * n_scores, scores, '-o')
# In[24]:
plt.plot(parameter_values, all_scores, 'bx')
# In[25]:
from collections import defaultdict
all_scores = defaultdict(list) #an list of list, holding 10 cv scores for each i in n_neighbors
parameter_values = list(range(1, 21)) # Including 20
for n_neighbors in parameter_values:
for i in range(100):
estimator = KNeighborsClassifier(n_neighbors=n_neighbors)
scores = cross_val_score(estimator, X, y, scoring='accuracy', cv=10) #return a list of scores
all_scores[n_neighbors].append(scores)
for parameter in parameter_values:
scores = all_scores[parameter]
n_scores = len(scores)
plt.plot([parameter] * n_scores, scores, '-o')
# In[26]:
plt.plot(parameter_values, avg_scores, '-o')
# In[27]:
from sklearn.preprocessing import MinMaxScaler
# In[ ]:
| StarcoderdataPython |
4825863 | <gh_stars>0
# encoding: utf-8
import mimetypes
import re
from django.core.urlresolvers import reverse
def order_name(name):
"""order_name -- Limit a text to 20 chars length, if necessary strips the
middle of the text and substitute it for an ellipsis.
name -- text to be limited.
"""
name = re.sub(r'^.*/', '', name)
if len(name) <= 37:
return name
return name[:37] + "..." + name[-7:]
def serialize(instance, file_attr='file'):
"""serialize -- Serialize a File instance into a dict.
instance -- File instance
file_attr -- attribute name that contains the FileField or ImageField
"""
obj = getattr(instance, file_attr)
return {
'url': obj.url,
'name': order_name(obj.name),
#'type': mimetypes.guess_type(obj.path)[0] or 'image/png',
'type': mimetypes.guess_type(obj.path)[0],
'thumbnailUrl': obj.url,
'size': obj.size,
'deleteUrl': reverse('upload-delete', args=[instance.pk]),
'deleteType': 'DELETE',
}
| StarcoderdataPython |
1681444 | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from epimargin.estimators import analytical_MPVS
from epimargin.etl.commons import download_data
from epimargin.etl.covid19india import data_path, get_time_series, load_all_data
from epimargin.model import Model, ModelUnit
from epimargin.plots import PlotDevice, plot_RR_est, plot_T_anomalies
from epimargin.smoothing import convolution
from epimargin.utils import cwd
# model details
CI = 0.99
smoothing = 10
if __name__ == "__main__":
root = cwd()
data = root/"data"
output = root/"output"
if not data.exists():
data.mkdir()
if not output.exists():
output.mkdir()
# define data versions for api files
paths = {
"v3": [data_path(i) for i in (1, 2)],
"v4": [data_path(i) for i in (3, 4, 5, 6, 7, 8)]
}
# download data from india covid 19 api
for target in paths['v3'] + paths['v4']:
download_data(data, target)
df = load_all_data(
v3_paths = [data/filepath for filepath in paths['v3']],
v4_paths = [data/filepath for filepath in paths['v4']]
)
data_recency = str(df["date_announced"].max()).split()[0]
run_date = str(pd.Timestamp.now()).split()[0]
ts = get_time_series(df[df.detected_state == "Delhi"])
(
dates,
RR_pred, RR_CI_upper, RR_CI_lower,
T_pred, T_CI_upper, T_CI_lower,
total_cases, new_cases_ts,
anomalies, anomaly_dates
) = analytical_MPVS(ts.delta[ts.delta > 0], CI = CI, smoothing = convolution(window = smoothing))
#= analytical_MPVS(ts.Hospitalized[ts.Hospitalized > 0], CI = CI, smoothing = lambda ts: box_filter(ts, smoothing, 10))
np.random.seed(33)
delhi = Model([ModelUnit("Delhi", 18_000_000, I0 = T_pred[-1], RR0 = RR_pred[-1], mobility = 0)])
delhi.run(14, np.zeros((1,1)))
t_pred = [dates[-1] + pd.Timedelta(days = i) for i in range(len(delhi[0].delta_T))]
plot_RR_est(dates, RR_pred, RR_CI_upper, RR_CI_lower, CI)
PlotDevice().title("Delhi: Reproductive Number Estimate").xlabel("Date").ylabel("Rt", rotation=0, labelpad=20)
plt.show()
delhi[0].lower_CI[0] = T_CI_lower[-1]
delhi[0].upper_CI[0] = T_CI_upper[-1]
print(delhi[0].delta_T)
print(delhi[0].lower_CI)
print(delhi[0].upper_CI)
plot_T_anomalies(dates, T_pred, T_CI_upper, T_CI_lower, new_cases_ts, anomaly_dates, anomalies, CI)
plt.scatter(t_pred, delhi[0].delta_T, color = "tomato", s = 4, label = "Predicted Net Cases")
plt.fill_between(t_pred, delhi[0].lower_CI, delhi[0].upper_CI, color = "tomato", alpha = 0.3, label="99% CI (forecast)")
plt.legend()
PlotDevice().title("Delhi: Net Daily Cases").xlabel("Date").ylabel("Cases")
plt.show()
pd.DataFrame(data={
"date" : dates,
"Rt" : RR_pred,
"Rt_CI_upper": RR_CI_upper,
"Rt_CI_lower": RR_CI_lower
}).set_index("date").to_csv(output/"Rt.csv")
pd.DataFrame(data={
"date" : list(dates) + t_pred[1:],
"net_daily_cases" : T_pred + delhi[0].delta_T[1:],
"net_daily_cases_CI_upper": T_CI_upper + delhi[0].upper_CI[1:],
"net_daily_cases_CI_lower": T_CI_lower + delhi[0].lower_CI[1:]
}).set_index("date").to_csv(output/"dT.csv")
| StarcoderdataPython |
3321096 | import math
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image, ImageFilter
from skimage import io, measure
from scipy.cluster.vq import kmeans
# can identify up to 255 objects
def mark_objects(image: Image.Image):
new_image = image.copy()
current = 0
free_labels = []
pixels = new_image.load()
for x in range(0, new_image.size[0]):
for y in range(0, new_image.size[1]):
kn = y - 1
if kn <= 0:
B = 0
else:
B = pixels[x, kn]
km = x - 1
if km <= 0:
C = 0
else:
C = pixels[km, y]
A = pixels[x, y]
if A != 0:
if B == 0 and C == 0:
if len(free_labels) > 0:
label = free_labels.pop()
else:
current = current + 1
label = current
pixels[x, y] = label
elif B != 0 and C == 0:
pixels[x, y] = B
elif B == 0 and C != 0:
pixels[x, y] = C
elif B != 0 and C != 0:
pixels[x, y] = B
if B != C:
change_label(new_image, C, B)
free_labels.append(C)
return new_image
def change_label(image, old_label: int, new_label: int):
pixels = image.load()
for x in range(image.size[0]):
for y in range(image.size[1]):
if pixels[x, y] == old_label:
pixels[x, y] = new_label
return
def binarization(image, threshold):
pixels = image.load()
for i in range(image.size[0]):
for j in range(image.size[1]):
pixels[i, j] = 0 if pixels[i, j] <= threshold else 255
return
def generate_unique_values(image, transform_colors=False):
matrix = np.array(image.getdata()).reshape(image.size)
# list of all the unique labels, associated with different objects
# i.e. their colors in grayscale image
unique_labels = np.unique(matrix)
# remove zero, because it is a background
unique_labels = np.delete(unique_labels, 0)
if len(unique_labels) < 25 and transform_colors:
for i in range(0, len(unique_labels)):
change_label(image, int(unique_labels[i]), int(unique_labels[i] * 10))
unique_labels[i] *= 10
return unique_labels
def plot_results(vectors, centroids, amount_of_clusters):
ox = [vector[0] for vector in vectors]
oy = [vector[1] for vector in vectors]
fig = plt.figure()
ax = fig.subplots()
ax.scatter(ox, oy, c='r', marker='x')
for i in range(0, len(ox)):
ax.annotate(
'Object {}'.format(i + 1),
xy=(ox[i], oy[i]),
xytext=(ox[i] + 0.05 * i, oy[i] + 0.05 * i),
arrowprops=dict(
arrowstyle="->",
connectionstyle='angle3',
),
)
for i in range(amount_of_clusters):
ax.scatter(
centroids[i][0],
centroids[i][1],
c='g', marker='o'
)
ax.set_xlabel('Area')
ax.set_ylabel('Eccentricity')
plt.show()
def get_region_vectors_skimage(image_name):
sk_image = io.imread(image_name)
label_image = measure.label(sk_image)
regions = measure.regionprops(
label_image
)
fig, ax = plt.subplots()
ax.imshow(sk_image)
j = 0
for props in regions:
j += 1
y0, x0 = props.centroid
orientation = props.orientation
x1 = x0 + math.cos(orientation) * 0.5 * props.major_axis_length
y1 = y0 - math.sin(orientation) * 0.5 * props.major_axis_length
x2 = x0 - math.sin(orientation) * 0.5 * props.minor_axis_length
y2 = y0 - math.cos(orientation) * 0.5 * props.minor_axis_length
ax.plot((x0, x1), (y0, y1), '-r', linewidth=2.5)
ax.plot((x0, x2), (y0, y2), '-r', linewidth=2.5)
ax.plot(x0, y0, '.g', markersize=15)
ax.annotate(
'Object {}'.format(j), xy=(x0, y0), arrowprops=dict(facecolor='black', shrink=0.05),
)
minr, minc, maxr, maxc = props.bbox
bx = (minc, maxc, maxc, minc, minc)
by = (minr, minr, maxr, maxr, minr)
ax.plot(bx, by, '-b', linewidth=2.5)
regions_vectors = []
for i in range(0, len(regions)):
regions_vectors.append([regions[i].area, regions[i].eccentricity])
print(regions[i].label)
return regions_vectors
def main():
initial_image_name = 'images/P0001465.jpg'
final_image_name = 'images/final.pgm'
# open image
initial_image = Image.open(initial_image_name)
image = initial_image.copy().convert("L")
plt.imshow(image)
plt.show()
binarization_threshold = 200
# initial binarization
binarization(image, binarization_threshold)
plt.imshow(image)
plt.show()
# filtering for noise reduction
new_image = image.filter(ImageFilter.MedianFilter(size=5))
# one more binarization to nullify median filtering "averaging" effect on borders
binarization(new_image, binarization_threshold)
plt.imshow(new_image)
plt.show()
# reset 255 to 1 in order to make unique labels sequence consistent and
# all its members have minimal values
change_label(new_image, 255, 1)
# mark objects
marked_image = mark_objects(new_image)
unique_labels = generate_unique_values(marked_image, transform_colors=True)
print('Unique labels:', unique_labels)
# save marked result
marked_image.save(final_image_name)
plt.imshow(marked_image)
plt.show()
# solution with skimage instead of manual computing
regions_vectors = get_region_vectors_skimage(final_image_name)
centroids, _ = kmeans(np.array(regions_vectors), 2)
plot_results(regions_vectors, centroids, 2)
plt.show()
if __name__ == '__main__':
main()
| StarcoderdataPython |
4815186 | import numpy as np
import tensorflow as tf
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
num_puntos = 2000
conjunto_puntos = []
for i in range(num_puntos):
if np.random.random() > 0.5:
x, y = np.random.normal(0.0, 0.9), np.random.normal(0.0, 0.9)
conjunto_puntos.append([x, y])
else:
x, y = np.random.normal(3.0, 0.5), np.random.normal(1.0, 0.5)
conjunto_puntos.append([x, y])
# df = pd.DataFrame({'x': [v[0] for v in conjunto_puntos], 'y':
# [v[1] for v in conjunto_puntos]})
# sns.lmplot('x', 'y', data=df, fit_reg=False, size=6)
# plt.show()
import time
N=2000
K=4
MAX_ITERS = 100
start = time.time()
# srcdata =tf.random_uniform([N,2])
# points = tf.Variable()
points =tf.Variable(conjunto_puntos)
cluster_assignments = tf.Variable(tf.zeros([N], dtype=tf.int64))
# Silly initialization: Use the first K points as the starting
# centroids. In the real world, do this better.
centroids = tf.Variable(tf.slice(points.initialized_value(), [0,0], [K,2]))
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# Replicate to N copies of each centroid and K copies of each
# point, then subtract and compute the sum of squared distances.
rep_centroids = tf.reshape(tf.tile(centroids, [N, 1]), [N, K, 2])
rep_points = tf.reshape(tf.tile(points, [1, K]), [N, K, 2])
sum_squares = tf.reduce_sum(tf.square(rep_points - rep_centroids),
reduction_indices=2)
# Use argmin to select the lowest-distance point
best_centroids = tf.argmin(sum_squares, 1)
did_assignments_change = tf.reduce_any(tf.not_equal(best_centroids,
cluster_assignments))
def bucket_mean(data, bucket_ids, num_buckets):
total = tf.unsorted_segment_sum(data, bucket_ids, num_buckets)
count = tf.unsorted_segment_sum(tf.ones_like(data), bucket_ids, num_buckets)
return total / count
means = bucket_mean(points, best_centroids, K)
# Do not write to the assigned clusters variable until after
# computing whether the assignments have changed - hence with_dependencies
with tf.control_dependencies([did_assignments_change]):
do_updates = tf.group(
centroids.assign(means),
cluster_assignments.assign(best_centroids))
changed = True
iters = 0
while changed and iters < MAX_ITERS:
iters += 1
[changed, _] = sess.run([did_assignments_change, do_updates])
[centers, assignments] = sess.run([centroids, cluster_assignments])
end = time.time()
print ("Found in %.2f seconds" % (end-start)), iters, "iterations"
print "Centroids:"
print centers
print "Cluster assignments:", assignments
res={"x":[],"y":[],"kmeans_res":[]}
for i in xrange(len(assignments)):
res["x"].append(conjunto_puntos[i][0])
res["y"].append(conjunto_puntos[i][1])
res["kmeans_res"].append(assignments[i])
pd_res=pd.DataFrame(res)
sns.lmplot("x","y",data=pd_res,fit_reg=False,size=5,hue="kmeans_res")
plt.show() | StarcoderdataPython |
1792556 | #!/usr/bin/env python3
from elm327 import ELM327, PROTOCOLS
from optparse import OptionParser
class OptParser( OptionParser ):
def format_epilog( self, formatter ):
return '\n{}\n'.format( '\n'.join( [formatter._format_text( x ) for x in self.epilog.split( '\n' )] ) )
# define your own ignore message byteranges in here
#SUBSETS = {
# 0x20f: (0, 7),
# 0x211: (0, 7),
#}
if __name__ == '__main__':
usage = 'Usage: %prog [options]'
parser = OptParser( epilog='Protocols supported by the ELM327:\n{}'.format( PROTOCOLS ) )
parser.add_option( '-d', '--device', dest='device', help='Path to ELM327 serial device' )
parser.add_option( '-b', '--baudrate', dest='baud_rate', help='Baud rate' )
parser.add_option( '-p', '--protocol', dest='protocol', help='ELM327 message protocol to use' )
parser.add_option( '-f', '--full', action='store_true', default=False, dest='full', help='Show all incoming CAN messages, instead of just changes to messages' )
parser.add_option( '-i', '--ignore', dest='ignore', help='Comma seperated list of message IDs to ignore' )
parser.add_option( '-c', '--can-filter', dest='can_filter', help='Set message ID filter' )
parser.add_option( '-m', '--can-mask', dest='can_mask', help='Set message ID mask' )
(options, argv) = parser.parse_args()
args = {}
if options.device:
args['device'] = options.device
elif len( argv ) >= 1:
args['device'] = argv[0]
if options.baud_rate:
args['baud_rate'] = options.baud_rate
elif len( argv ) >= 2:
args['baud_rate'] = argv[1]
if options.protocol:
args['protocol'] = options.protocol
elif len( argv ) >= 3:
args['protocol'] = argv[2]
elm = ELM327( **args )
elm.reset()
if options.can_filter:
elm.set_can_filter( int( options.can_filter, 0 ) )
if options.can_mask:
elm.set_can_mask( int( options.can_mask, 0 ) )
IGNORE = []
if options.ignore:
IGNORE = [int( x.strip(), 0 ) for x in options.ignore.split(',')]
elm.start_can()
firehose = options.full
last_msg = {}
try:
while True:
msg_id, msg_b = elm.recv_can()
if msg_b:
#if msg_id in SUBSETS:
# msg_b = msg_b[SUBSETS[msg_id][0]:SUBSETS[msg_id][1]]
if msg_id in IGNORE:
pass
if msg_id not in last_msg:
last_msg[msg_id] = msg_b
elif firehose or (last_msg[msg_id] != msg_b):
print( '{0:03x}: {1} -> {2}'.format( msg_id, last_msg[msg_id].hex(), msg_b.hex() ) )
last_msg[msg_id] = msg_b
except EOFError:
print( '-- Hit the end' )
except KeyboardInterrupt:
pass
elm.get_prompt()
| StarcoderdataPython |
1682385 | <filename>nrgpy/nsd_functions.py<gh_stars>10-100
from datetime import date
from nrgpy.utilities import check_platform
import traceback
if check_platform() == 'win32':
try:
import pyodbc
except:
print("pyodbc required for nrg functions")
import pandas as pd
class nsd(object):
"""class for handling NSD files from Symphonie Logger Data.
Parameters
----------
nsd_file : str
path to nsd file to open for reading and writing
Returns
-------
obj
Example
-------
>>> from nrgpy.nsd_functions import nsd
>>> db = nsd(nsd_file="C:/NRG/SiteFiles/0322.nsd")
>>> db.read_channel_settings(channel=1)
>>> db.channel_settings
TimeStamp Channel SensorType SensorDesc SerialNumber Height ScaleFactor Offset PrintPrecision Units SensorDetail SensorNotes
0 1899-12-30 1 1 NRG #40 Anem. m/s SN002618 50 m 0.766 0.332 1 m/s
>>> db.write_channel_settings(channel=1, description="50m CLASS 1 m/s", scale_factor=1, offset=1)
>>> db.read_channel_settings(channel=1)
>>> db.channel_settings
TimeStamp Channel SensorType SensorDesc SerialNumber Height ScaleFactor Offset PrintPrecision Units SensorDetail SensorNotes
0 1899-12-30 1 1 50m CLASS 1 m/s SN002618 50 m 1.0 1.0
"""
from nrgpy.utilities import check_platform
def __init__(self, nsd_file=''):
if check_platform() != 'win32':
print("nsd functions only compatible with Windows")
# return
# 0
self.nsd_file = nsd_file
self.driver_check = self.check_for_jet_drivers()
if self.driver_check == True:
try:
self.conn_str = r'DRIVER={Microsoft Access Driver (*.mdb, *.accdb)};'+r'DBQ='+self.nsd_file+';'
self.conn = pyodbc.connect(self.conn_str)
except Exception as e:
print(traceback.format_exc())
self.e = e
print("whomp, whomp.")
else:
print("Microsoft Access drivers required for these functions.")
print("Download drivers from:")
print("https://www.microsoft.com/en-US/download/details.aspx?id=13255\n\n")
print("Note: Python architecture must match any installed Microsoft Office")
print("architecture (32-bit or 64-bit)")
print("If your MS Office is installed in C:\Program Files (x86), you'll need")
print("the 32-bit version of Python 3+ to use these functions\n\n")
def read_sensor_history(self):
"""read SensorHistory table into dataframe
Returns
-------
obj
sensor_history : pandas dataframe
"""
sql = "SELECT * FROM SensorHistory"
try:
self.sensor_history = pd.read_sql(sql, self.conn)
except Exception as e_sh:
self.sensor_history_e = e_sh
def read_channel_settings(self, channel=0, dash=False):
"""read individual channel settings from sensor history table
Parameters
----------
channel : int
1 through 15 (12 if Sym Classic nsd file)
Returns
-------
obj
pandas dataframe of channel details
"""
sql = "SELECT * FROM SensorHistory WHERE Channel = {0}".format(channel)
try:
if dash == True:
self._channel_settings = pd.read_sql(sql, self.conn)
else:
self.channel_settings = pd.read_sql(sql, self.conn)
except Exception as rcs_e:
self.channel_settings = False
self.rcs_e = rcs_e
def write_channel_settings(self, channel=0, entry=1,
sensor_desc='', print_precision=-9999, units='',
serial_number='', height='',
sensor_detail='', sensor_notes='',
scale_factor=-9999, offset=-9999):
"""write new sensor history to site file
Parameters
----------
channel : int
required; 1 through 15 (or 1 through 12 for Sym Classic)
entry : int
default is 1 for channel baseline values, 2, 3, etc. for newer entries
sensor_desc : string
print_precision : int
1, 2, 3, or 4 or 0 for off
units : string
serial_number : string
height : string
sensor_detail : string
sensor_notes : string
scale_factor : float
offset : float
"""
if channel > 0:
self.read_channel_settings(channel=channel, dash=True)
entry_index = list(range(1,len(self._channel_settings)+1))
self._channel_settings.insert(loc=0, column='entry',value=entry_index)
entry_timestamp = pd.Timestamp(self._channel_settings[self._channel_settings.entry==entry].TimeStamp.item()).to_pydatetime()
channel = " WHERE Channel = {} AND TimeStamp = ?".format(str(channel))
if sensor_desc != '':
sensor_desc = " SensorDesc = '{}',".format(sensor_desc)
if print_precision != -9999:
print_precision = " PrintPrecision = {},".format(str(print_precision))
else:
print_precision = ""
if units != '':
units = " Units = '{}',".format(units)
if serial_number != '':
serial_number = " SerialNumber = '{}',"
if height != '':
height = " Height = '{}',".format(height)
if scale_factor != -9999:
scale_factor = " ScaleFactor = {},".format(str(scale_factor))
else:
scale_factor = ""
if offset != -9999:
offset = " Offset = {},".format(str(offset))
else:
offset = ""
if sensor_detail != "":
sensor_detail = " SensorDetail = '{}',".format(sensor_detail)
if sensor_notes != "":
sensor_notes = " SensorNotes = '{}',".format(sensor_notes)
sql = "UPDATE SensorHistory SET{0}{1}{2}{3}{4}{5}{6}{7}{8}".format(
sensor_desc, print_precision, units, serial_number, height,
str(scale_factor), str(offset), sensor_detail, sensor_notes)[:-1]
self.sql = sql + str(channel) # ''.join([char for char in sql+str(channel)])
self.conn.execute(self.sql, entry_timestamp)
self.conn.commit()
else:
print('specify channel for write "eg: write_channel_settings(channel=10 .. )"')
def add_channel_history(self, timestamp='', channel=0, sensor_type='1',
sensor_desc='', print_precision=4, units='',
serial_number='', height='',
sensor_detail='', sensor_notes='',
scale_factor=-9999, offset=-9999):
"""use for adding new sensor history registries
Parameters
----------
timestamp : string
"YYYY-MM-DD HH:MM:SS"
channel : int
or string, channel number
sensor_type : int
or string, number:
1 : anemometer
2 : totalizer (rain gauge)
3 : vane
4 : analog (temp, bp, rh, etc.)
sensor_desc : string
description
print_precision : int
1 through 4, number of decimals
units : string
serial_number : string
height : float
sensor_detail : note
sensor_notes : note
scale_factor : float
offset : float
"""
try:
sql = """
INSERT INTO SensorHistory
([TimeStamp], Channel, SensorType, SensorDesc, SerialNumber, Height,
ScaleFactor, Offset, PrintPrecision, Units, SensorDetail, SensorNotes)
VALUES
('{0}', '{1}', '{2}', '{3}','{4}','{5}',
'{6}','{7}','{8}','{9}','{10}','{11}');""".format(
timestamp, channel, sensor_type, sensor_desc, serial_number, height,
scale_factor, offset, print_precision, units, sensor_detail, sensor_notes
)
self.conn.execute(sql)
self.conn.commit()
except Exception as e:
print("[ERROR] Unable to add sensor history value")
print(e)
def close(self):
"""close connection to database"""
self.conn.close()
def check_for_jet_drivers(self):
"""check for jet database drivers
Returns
-------
bool
True if drivers present, otherwise False
"""
self.drivers = [x for x in pyodbc.drivers()]
if "Microsoft Access Driver (*.mdb, *.accdb)" in self.drivers:
return True
return False | StarcoderdataPython |
132318 | <reponame>hsiang-ever/django_blog
from urllib.request import urlopen, Request
import json
def getPostList():
# url = 'http://0.0.0.0:5000/posts/'
url = 'https://shorten-url-1491815099304.appspot.com/posts/'
headers = {'Content-Type': 'application/json'}
req = Request(url=url, headers=headers)
res = urlopen(req)
posts = json.loads(res.read())['posts']
return posts
# return res.read()
def getPostDetail(pk):
# url = 'http://0.0.0.0:5000/posts/{}/'.format(pk)
url = 'https://shorten-url-1491815099304.appspot.com/posts/{}/'.format(pk)
headers = {'Content-Type': 'application/json'}
req = Request(url=url, headers=headers)
res = urlopen(req)
post = json.loads(res.read())['post']
return post
def postPostDetail(data):
# url = 'http://0.0.0.0:5000/posts/'
url = 'https://shorten-url-1491815099304.appspot.com/posts/'
headers = {'Content-Type': 'application/json'}
req = Request(url=url, headers=headers, data=data)
res = urlopen(req)
post = json.loads(res.read())['post']
return post
def putPostDetail(pk, data):
# url = 'http://0.0.0.0:5000/posts/{}/'.format(pk)
url = 'https://shorten-url-1491815099304.appspot.com/posts/{}/'.format(pk)
headers = {'Content-Type': 'application/json'}
req = Request(url=url, headers=headers, data=data)
req.get_method = lambda:'PUT'
res = urlopen(req)
post = json.loads(res.read())['post']
return post
def deletePostDetail(pk):
# url = 'http://0.0.0.0:5000/posts/{}/'.format(pk)
url = 'https://shorten-url-1491815099304.appspot.com/posts/{}/'.format(pk)
headers = {'Content-Type': 'application/json'}
req = Request(url=url, headers=headers)
req.get_method = lambda:'DELETE'
res = urlopen(req)
# post = json.loads(res.read())['post']
# return post
| StarcoderdataPython |
3394107 | <reponame>Mephisto405/WCMC-Public<gh_stars>10-100
import os
import sys
import time
import argparse
import matplotlib.pyplot as plt
from collections import OrderedDict
import torch
import numpy as np
import torch.nn as nn
from torch.utils.data import DataLoader
import train_kpcn
import train_sbmc
import train_lbmc
from support.utils import crop_like
from support.img_utils import WriteImg
from support.datasets import FullImageDataset
from support.networks import PathNet, weights_init
from support.metrics import RelMSE, RelL1, SSIM, MSE, L1, _tonemap
def tonemap(c, ref=None, kInvGamma=1.0/2.2):
# c: (W, H, C=3)
if ref is None:
ref = c
luminance = 0.2126 * ref[:,:,0] + 0.7152 * ref[:,:,1] + 0.0722 * ref[:,:,2]
col = np.copy(c)
col[:,:,0] /= (1 + luminance / 1.5)
col[:,:,1] /= (1 + luminance / 1.5)
col[:,:,2] /= (1 + luminance / 1.5)
col = np.clip(col, 0, None)
return np.clip(col ** kInvGamma, 0.0, 1.0)
def load_input(filename, spp, args):
if 'KPCN' in args.model_name:
dataset = FullImageDataset(filename, spp, 'kpcn',
args.use_g_buf, args.use_sbmc_buf,
args.use_llpm_buf, args.pnet_out_size[0])
elif 'BMC' in args.model_name:
dataset = FullImageDataset(filename, spp, 'sbmc',
args.use_g_buf, args.use_sbmc_buf,
args.use_llpm_buf, 0)
return dataset
def inference(interface, dataloader, spp, args):
interface.to_eval_mode()
H, W = dataloader.dataset.h, dataloader.dataset.w
PATCH_SIZE = dataloader.dataset.PATCH_SIZE
out_rad = torch.zeros((3, H, W)).cuda()
out_path = None
with torch.no_grad():
for batch, i_start, j_start, i_end, j_end, i, j in dataloader:
for k in batch:
if not batch[k].__class__ == torch.Tensor:
continue
batch[k] = batch[k].cuda(args.device_id)
start = time.time()
out, p_buffers = interface.validate_batch(batch)
pad_h = PATCH_SIZE - out.shape[2]
pad_w = PATCH_SIZE - out.shape[3]
if pad_h != 0 and pad_w != 0:
out = nn.functional.pad(out, (pad_w//2, pad_w-pad_w//2, pad_h//2, pad_h-pad_h//2), 'replicate') # order matters
if args.use_llpm_buf and (out_path is None):
if type(p_buffers) == dict:
out_path = {}
for key in p_buffers:
b, s, c, h, w = p_buffers[key].shape
out_path[key] = torch.zeros((s, c, H, W)).cuda()
elif type(p_buffers) == torch.Tensor:
b, s, c, h, w = p_buffers.shape
out_path = torch.zeros((s, c, H, W)).cuda()
else:
assert False, 'P buffer type not defined.'
for b in range(out.shape[0]):
out_rad[:,i_start[b]:i_end[b],j_start[b]:j_end[b]] = out[b,:,i_start[b]-i[b]:i_end[b]-i[b],j_start[b]-j[b]:j_end[b]-j[b]]
if args.use_llpm_buf:
if type(p_buffers) == dict:
for key in p_buffers:
out_path[key][:,:,i_start[b]:i_end[b],j_start[b]:j_end[b]] = p_buffers[key][b,:,:,i_start[b]-i[b]:i_end[b]-i[b],j_start[b]-j[b]:j_end[b]-j[b]]
elif type(p_buffers) == torch.Tensor:
out_path[:,:,i_start[b]:i_end[b],j_start[b]:j_end[b]] = p_buffers[b,:,:,i_start[b]-i[b]:i_end[b]-i[b],j_start[b]-j[b]:j_end[b]-j[b]]
out_rad = out_rad.detach().cpu().numpy().transpose([1, 2, 0])
if args.use_llpm_buf:
if type(out_path) == dict:
for key in out_path:
out_path[key] = out_path[key].detach().cpu().numpy().transpose([2, 3, 0, 1])
elif type(out_path) == torch.Tensor:
out_path = out_path.detach().cpu().numpy().transpose([2, 3, 0, 1])
return out_rad, out_path
def denoise(args, input_dir, output_dir="../test_suite_2", scenes=None, spps=[8], save_figures=False, rhf=False, quantize=False):
assert os.path.isdir(input_dir), input_dir
assert 'KPCN' in args.model_name or 'BMC' in args.model_name, args.model_name
if scenes is None:
scenes = []
for fn in os.listdir(input_dir.replace(os.sep + 'input', os.sep + 'gt')):
if fn.endswith(".npy"):
scenes.append(fn)
num_metrics = 5 * 4 # (RelL2, RelL1, DSSIM, L1, MSE) * (linear, tmap w/o gamma, tmap gamma=2.2, tmap gamma=adaptive)
results = [[0 for i in range(len(scenes))] for j in range(num_metrics * len(spps))]
results_input = [[0 for i in range(len(scenes))] for j in range(num_metrics * len(spps))]
if args.model_name.endswith('.pth'):
p_model = os.path.join(args.save, args.model_name)
else:
p_model = os.path.join(args.save, args.model_name + '.pth')
ck = torch.load(p_model)
print(scenes)
for scene in scenes:
if not scene.endswith(".npy"):
scene = scene + '.npy'
filename = os.path.join(input_dir, scene).replace(os.sep + 'input', os.sep + 'gt')
if not os.path.isfile(filename):
raise FileNotFoundError(filename)
for i, scene in enumerate(scenes):
if scene.endswith(".npy"):
scene = scene[:-4]
print("Scene file: ", scene)
os.makedirs(os.path.join(output_dir, scene), exist_ok=True)
for j, spp in enumerate(spps):
print("Samples per pixel:", spp)
"""
Denoising
"""
# Dateload
filename = os.path.join(input_dir, scene + ".npy")
dataset = load_input(filename, spp, args)
MSPP = 32# if args.pnet_out_size[0] < 12 else 8
if spp <= MSPP:
dataloader = DataLoader(
dataset,
batch_size=8,
num_workers=1
)
elif spp <= 64:
dataloader = DataLoader(
dataset,
batch_size=4,
num_workers=1
)
else:
raise RuntimeError("Try higher spp after investigating your RAM and \
GRAM capacity.")
if i == 0 and j == 0:
datasets = {'train': dataset} # dirty code for now
if 'SBMC' in args.model_name:
interfaces, _ = train_sbmc.init_model(datasets, args)
elif 'LBMC' in args.model_name:
interfaces, _ = train_lbmc.init_model(datasets, args)
elif 'KPCN' in args.model_name:
interfaces, _ = train_kpcn.init_model(datasets, args)
'''
if tensorrt:
engines, contexts = export_and_load_onnx_model(interfaces[0], p_model, dataloader)
return
'''
out_rad, out_path = inference(interfaces[0], dataloader, spp, args)
"""
Post processing
"""
tgt = dataset.full_tgt
ipt = dataset.full_ipt
if out_path is not None:
if rhf:
print('Saving P-buffer as numpy file for RHF-like visualization...')
if 'BMC' in args.model_name:
print('Shape: ', out_path.shape)
np.save(os.path.join(output_dir, 'p_buffer_%s_%s.npy'%(scene, args.model_name)), out_path)
elif 'KPCN' in args.model_name:
print('Shape: ', out_path['diffuse'].shape)
np.save(os.path.join(output_dir, 'p_buffer_%s_%s.npy'%(scene, args.model_name)), out_path['diffuse'])
print('Saved.')
return
if type(out_path) == dict:
for key in out_path:
out_path[key] = np.clip(np.mean(out_path[key], 2), 0.0, 1.0)
assert len(out_path[key].shape) == 3, out_path[key].shape
if out_path[key].shape[2] >= 3:
out_path[key] = out_path[key][...,:3]
else:
tmp = np.mean(out_path[key], 2, keepdims=True)
out_path[key] = np.concatenate((tmp,) * 3, axis=2)
assert out_path[key].shape[2] == 3, out_path[key].shape
elif type(out_path) == torch.Tensor:
out_path = np.clip(np.mean(out_path, 2), 0.0, 1.0)
assert len(out_path.shape) == 3, out_path.shape
if out_path.shape[2] >= 3:
out_path = out_path[...,:3]
else:
tmp = np.mean(out_path, 2, keepdims=True)
out_path = np.concatenate((tmp,) * 3, axis=2)
assert out_path.shape[2] == 3, out_path.shape
# Crop
valid_size = 72
crop = (128 - valid_size) // 2
out_rad = out_rad[crop:-crop, crop:-crop, ...]
if out_path is not None:
if type(out_path) == dict:
for key in out_path:
out_path[key] = out_path[key][crop:-crop, crop:-crop, ...]
elif type(out_path) == torch.Tensor:
out_path = out_path[crop:-crop, crop:-crop, ...]
tgt = tgt[crop:-crop, crop:-crop, ...]
ipt = ipt[crop:-crop, crop:-crop, ...]
# Process the background and emittors which do not require to be denoised
has_hit = dataset.has_hit[crop:-crop, crop:-crop, ...]
out_rad = np.where(has_hit == 0, ipt, out_rad)
"""
Statistics
"""
err = RelMSE(out_rad, tgt, reduce=False)
err = err.reshape(out_rad.shape[0], out_rad.shape[1], 3)
# (RelL2, RelL1, DSSIM, L1, MSE) * (linear, tmap w/o gamma, tmap gamma=2.2, tmap gamma=adaptive)
def linear(x):
return x
def tonemap28(x):
return tonemap(x, kInvGamma = 1/2.8)
metrics = [RelMSE, RelL1, SSIM, L1, MSE]
tmaps = [linear, _tonemap, tonemap, tonemap28]
print(RelMSE(tonemap(out_rad), tonemap(tgt)))
print(RelMSE(tonemap(ipt), tonemap(tgt)))
for t, tmap in enumerate(tmaps):
for k, metric in enumerate(metrics):
results[(len(metrics) * t + k) * len(spps) + j][i] = metric(tmap(out_rad), tmap(tgt))
results_input[(len(metrics) * t + k) * len(spps) + j][i] = metric(tmap(ipt), tmap(tgt))
"""
Save
"""
if save_figures:
t_tgt = tmaps[-1](tgt)
t_ipt = tmaps[-1](ipt)
t_out = tmaps[-1](out_rad)
t_err = np.mean(np.clip(err**0.45, 0.0, 1.0), 2)
plt.imsave(os.path.join(output_dir, scene, 'target.png'), t_tgt)
#WriteImg(os.path.join(output_dir, scene, 'target.pfm'), tgt) # HDR image
plt.imsave(os.path.join(output_dir, scene, 'input_{}.png'.format(spp)), t_ipt)
#WriteImg(os.path.join(output_dir, scene, 'input_{}.pfm'.format(spp)), ipt)
plt.imsave(os.path.join(output_dir, scene, 'output_{}_{}.png'.format(spp, args.model_name)), t_out)
#WriteImg(os.path.join(output_dir, scene, 'output_{}_{}.pfm'.format(spp, args.model_name)), out_rad)
plt.imsave(os.path.join(output_dir, scene, 'errmap_rmse_{}_{}.png'.format(spp, args.model_name)), t_err, cmap=plt.get_cmap('magma'))
#WriteImg(os.path.join(output_dir, scene, 'errmap_{}_{}.pfm'.format(spp, args.model_name)), err.mean(2))
np.savetxt(os.path.join(output_dir, 'results_{}_{}.csv'.format(args.model_name, spps[-1])), results, delimiter=',')
np.savetxt(os.path.join(output_dir, 'results_input_%d.csv'%(spps[-1])), results_input, delimiter=',')
if __name__ == "__main__":
class Args(): # just for compatibility with argparse-related functions
save = '/root/LPM/weights/'
model_name = 'SBMC_v2.0'
single_gpu = True
use_g_buf, use_sbmc_buf, use_llpm_buf = True, True, True
lr_pnet = [1e-4]
lr_ckpt = True
pnet_out_size = [3]
w_manif = [0.1]
manif_learn = False
manif_loss = 'FMSE'
train_branches = False
disentangle = 'm11r11'
kpcn_ref = False
start_epoch = 0
single_gpu = True
device_id = 0
lr_dncnn = 1e-4
visual = False
start_epoch = 10
best_err = 1e4
kpcn_ref = False
kpcn_pre = False
not_save = False
args = Args()
input_dir = '/mnt/ssd2/iycho/KPCN/test2/input/'
scenes = ['bathroom_v3', 'bathroom-3_v2', 'car', 'car_v2', 'car_v3', 'chair-room', 'chair-room_v2', 'hookah_v3', 'kitchen-2', 'kitchen-2_v2', 'library-office', 'sitting-room-2']
spps = [8]
""" Test cases
# LBMC
print('LBMC_Path_P3')
args.model_name = 'LBMC_Path_P3'
args.pnet_out_size = [3]
args.disentangle = 'm11r11'
args.use_g_buf, args.use_sbmc_buf, args.use_llpm_buf, args.manif_learn = True, False, True, False
denoise(args, input_dir, spps=[2,4,8,16,32,64], scenes=scenes, save_figures=True)
print('LBMC_Manifold_P6')
args.model_name = 'LBMC_Manifold_P6'
args.pnet_out_size = [6]
args.disentangle = 'm11r11'
args.use_g_buf, args.use_sbmc_buf, args.use_llpm_buf, args.manif_learn = True, False, True, True
denoise(args, input_dir, spps=[2,4,8,16,32,64], scenes=scenes, save_figures=True)
print('LBMC_vanilla')
args.model_name = 'LBMC_vanilla'
args.pnet_out_size = [0]
args.disentangle = 'm11r11'
args.use_g_buf, args.use_sbmc_buf, args.use_llpm_buf, args.manif_learn = True, False, False, False
denoise(args, input_dir, spps=[2,4,8,16,32,64], scenes=scenes, save_figures=True)
# KPCN
print('KPCN_vanilla')
args.model_name = 'KPCN_vanilla'
args.pnet_out_size = [0]
args.use_llpm_buf, args.manif_learn = False, False
denoise(args, input_dir, spps=spps, scenes=scenes, save_figures=True)
print('KPCN_path')
args.model_name = 'KPCN_path'
args.pnet_out_size = [3]
args.disentangle = 'm11r11'
args.use_llpm_buf, args.manif_learn = True, False
denoise(args, input_dir, spps=spps, scenes=scenes, rhf=True)
# SBMC
print('SBMC_vanilla')
args.model_name = 'SBMC_vanilla'
args.pnet_out_size = [0]
args.disentangle = 'm11r11'
args.use_sbmc_buf, args.use_llpm_buf, args.manif_learn = True, False, False
denoise(args, input_dir, spps=spps, scenes=scenes, save_figures=True)
print('SBMC_path')
args.model_name = 'SBMC_path'
args.pnet_out_size = [3]
args.use_sbmc_buf, args.use_llpm_buf, args.manif_learn = False, True, False
denoise(args, input_dir, spps=spps, scenes=scenes, rhf=True)
print('SBMC_Manifold_Naive')
args.model_name = 'SBMC_Manifold_Naive'
args.pnet_out_size = [3]
args.use_sbmc_buf, args.use_llpm_buf, args.manif_learn = False, True, False
denoise(args, input_dir, spps=spps, scenes=scenes)
"""
| StarcoderdataPython |
3384192 | from game.environment import GameEnvironment
from game.handlers import Handlers
from game.handlers.network import NetworkHandler
from game.handlers.serialize import SerializeHandler
class LeaderBoardHandler(Handlers):
def __init__(self):
Handlers().__init__()
self.__game_env = GameEnvironment()
self.__serialize_handler = SerializeHandler(self.__game_env.static.leaders_file)
def load(self):
leaders = []
try:
deserialized_object = self.__serialize_handler.deserialize()
if deserialized_object:
leaders = dict(deserialized_object)
except Exception:
self.log('Failed to read leaders from file {}'.format(self.__game_env.static.leaders_file))
finally:
return leaders
def save(self, leaders):
try:
if leaders is None:
return
self.__serialize_handler.serialize(leaders)
except Exception:
self.log('Failed to save leaders to file {}'.format(self.__game_env.static.leaders_file))
async def update(self, api_key):
network_handler = NetworkHandler(api_key)
self.save(await network_handler.get_leaders())
| StarcoderdataPython |
3323943 | <filename>utils/disambiguate.py
import asyncio
import functools
import random
import re
import weakref
from itertools import starmap
import discord
from discord.ext import commands
from .examples import get_example
from .colors import random_color
_ID_REGEX = re.compile(r'([0-9]{15,21})$')
async def disambiguate(ctx, matches, transform=str, *, tries=3):
"""Prompts the user to choose from a list of matches."""
if not matches:
raise commands.BadArgument('No results found.')
num_matches = len(matches)
if num_matches == 1:
return matches[0]
entries = '\n'.join(starmap('{0}: {1}'.format, enumerate(map(transform, matches), 1)))
permissions = ctx.channel.permissions_for(ctx.me)
if permissions.embed_links:
# Build the embed as we go. And make it nice and pretty.
embed = discord.Embed(color=random_color(), description=entries)
embed.set_author(name=f"There were {num_matches} matches found. Which one did you mean?")
index = random.randrange(len(matches))
instructions = f'Just type the number.\nFor example, typing `{index + 1}` will return {matches[index]}'
embed.add_field(name='Instructions:', value=instructions)
message = await ctx.send(embed=embed)
else:
await ctx.send('There are too many matches. Which one did you mean? **Only say the number**.')
message = await ctx.send(entries)
def check(m):
return (m.author.id == ctx.author.id
and m.channel.id == ctx.channel.id
and m.content.isdigit())
await ctx.release()
try:
for i in range(tries):
try:
msg = await ctx.bot.wait_for('message', check=check, timeout=30.0)
except asyncio.TimeoutError:
raise commands.BadArgument('Took too long. Goodbye.')
index = int(msg.content)
try:
return matches[index - 1]
except IndexError:
await ctx.send(f'Please give me a valid number. {tries - i - 1} tries remaining...')
raise commands.BadArgument('Too many tries. Goodbye.')
finally:
await message.delete()
await ctx.acquire()
class _DisambiguateExampleGenerator:
def __get__(self, obj, cls):
cls_name = cls.__name__.replace('Disambiguate', '')
return functools.partial(get_example, getattr(discord, cls_name))
class Converter(commands.Converter):
"""This is the base class for all disambiguating converters.
By default, if there is more than one thing with a given name, the ext converters will only pick the first results.
These allow you to pick from multiple results.
Especially important when the args become case-insensitive.
"""
_transform = str
__converters__ = weakref.WeakValueDictionary()
random_example = _DisambiguateExampleGenerator()
def __init__(self, *, ignore_case=True):
self.ignore_case = ignore_case
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
Converter.__converters__[cls.__name__] = cls
def _get_possible_entries(self, ctx):
"""Returns an iterable of possible entries to find matches with.
Subclasses must provide this to allow disambiguating.
"""
raise NotImplementedError
def _exact_match(self, ctx, argument):
"""Returns an "exact" match given an argument.
If this returns anything but None, that result will be returned without going through disambiguating.
Subclasses may override this method to provide an "exact" functionality.
"""
return None
# The following predicates can be overridden if necessary
def _predicate(self, obj, argument):
"""Standard predicate for filtering."""
return obj.name == argument
def _predicate_ignore_case(self, obj, argument):
"""Same thing like `predicate` but with case-insensitive filtering."""
return obj.name.lower() == argument
def _get_possible_results(self, ctx, argument):
entries = self._get_possible_entries(ctx)
if self.ignore_case:
lowered = argument.lower()
predicate = self._predicate_ignore_case
else:
lowered = argument
predicate = self._predicate
return [obj for obj in entries if predicate(obj, lowered)]
async def convert(self, ctx, argument):
exact_match = self._exact_match(ctx, argument)
if exact_match:
return exact_match
matches = self._get_possible_results(ctx, argument)
return await disambiguate(ctx, matches, transform=self._transform)
class IDConverter(Converter):
MENTION_REGEX = None
def _get_from_id(self, ctx, id):
"""Returns an object via a given ID."""
raise NotImplementedError
def __get_id_from_mention(self, argument):
return re.match(self.MENTION_REGEX, argument) if self.MENTION_REGEX else None
def _exact_match(self, ctx, argument):
match = _ID_REGEX.match(argument) or self.__get_id_from_mention(argument)
if not match:
return None
return self._get_from_id(ctx, int(match[1]))
class UserConverterMixin:
MENTION_REGEX = r'<@!?([0-9]+)>$'
def _exact_match(self, ctx, argument):
result = super()._exact_match(ctx, argument)
if result is not None:
return result
if not (len(argument) > 5 and argument[-5] == '#'):
# No discriminator provided which makes an exact match impossible
return None
name, _, discriminator = argument.rpartition('#')
return discord.utils.find(
lambda u: u.name == name and u.discriminator == discriminator,
self._get_possible_entries(ctx)
)
class User(UserConverterMixin, IDConverter):
def _get_from_id(self, ctx, id):
return ctx.bot.get_user(id)
def _get_possible_entries(self, ctx):
return ctx._state._users.values()
class Member(UserConverterMixin, IDConverter):
def _get_from_id(self, ctx, id):
return ctx.guild.get_member(id)
def _get_possible_entries(self, ctx):
return ctx.guild._members.values()
# Overriding these is necessary due to members having nicknames
def _predicate(self, obj, argument):
return super()._predicate(obj, argument) or (obj.nick and obj.nick == argument)
def _predicate_ignore_case(self, obj, argument):
return (
super()._predicate_ignore_case(obj, argument)
or (obj.nick and obj.nick.lower() == argument)
)
class Role(IDConverter):
MENTION_REGEX = r'<@&([0-9]+)>$'
def _get_from_id(self, ctx, id):
return discord.utils.get(self._get_possible_entries(ctx), id=id)
def _get_possible_entries(self, ctx):
return ctx.guild.roles
class TextChannel(IDConverter):
MENTION_REGEX = r'<#([0-9]+)>$'
def _get_from_id(self, ctx, id):
return ctx.guild.get_channel(id)
def _get_possible_entries(self, ctx):
return ctx.guild.text_channels
class Guild(IDConverter):
def _get_from_id(self, ctx, id):
return ctx.bot.get_guild(id)
def _get_possible_entries(self, ctx):
return ctx._state._guilds.values()
def _is_discord_py_type(cls):
module = getattr(cls, '__module__', '')
return module.startswith('discord.') and not module.endswith('converter')
def _disambiguated(type_):
"""Return the corresponding disambiguating converter if one exists
for that type.
If no such converter exists, it returns the type.
"""
if not _is_discord_py_type(type_):
return type_
return Converter.__converters__.get(type_.__name__, type_)
def _get_current_parameter(ctx):
parameters = list(ctx.command.params.values())
# Need to account for varargs and consume-rest kwarg only
index = min(len(ctx.args) + len(ctx.kwargs), len(parameters) - 1)
return parameters[index]
class Union(commands.Converter):
_transform = '{0} ({0.__class__.__name__})'.format
def __init__(self, *types, ignore_case=True):
self.types = [
type_(ignore_case=ignore_case)
if isinstance(type_, type) and issubclass(type_, Converter)
else type_
for type_ in map(_disambiguated, types)
]
async def convert(self, ctx, argument):
param = _get_current_parameter(ctx)
results = []
for converter in self.types:
if isinstance(converter, Converter):
exact = converter._exact_match(ctx, argument)
if exact is not None:
return exact
results.extend(converter._get_possible_results(ctx, argument))
else:
# Standard type, so standard conversion
try:
result = await ctx.command.do_conversion(ctx, converter, argument, param)
except commands.BadArgument:
continue
else:
results.append(result)
return await disambiguate(ctx, results, transform=self._transform)
def random_example(self, ctx):
return get_example(random.choice(self.types), ctx)
| StarcoderdataPython |
30659 | <reponame>davidfotsa/Numerical_Methods_With_Python
# -*- coding: utf-8 -*-
def a(i,x,X,Y):
rep=1
for j in range(min(len(X),len(Y))):
if (i!=j):
rep*=(x-X[j])/(X[i]-X[j])
return (rep)
def P(x,X,Y):
rep=0
for i in range(min(len(X),len(Y))):
rep+=a(i,x,X,Y)*Y[i]
return (rep)
X=[-2,0,1,2]
Y=[49,5,7,49]
#x=float(input(" Vous voulez estimer f(x) pour x= "))
#print(P(x,X,Y))
from numpy.polynomial import Polynomial as poly
x=poly([0,1]) # Polynôme p(x)=0+x
p=P(x,X,Y)
print(p)
x=poly1d([1,0]) # Polynôme p(x)=0+x
p=P(x,X,Y)
print(p)
print(p.order)
print(p.coeffs)
print(p.roots)
print(roots(p))
print(p(0))
print(polyval(p,0))
print("%f"%(p.coef[0],),end=" ")
if (len(p)>1):
if (p.coef[1]>0):
print("+%f*x" %(p.coef[1],),end=" ")
elif (p.coef[1]<0):
print("%f*x" %(p.coef[1],),end=" ")
i=2
while (i<len(p)-1):
if (p.coef[i]>0):
print("+%f*x^%d" %(p.coef[i],i,),end=" ")
elif (p.coef[i]<0):
print("%f*x^%d" %(p.coef[i],i,),end=" ")
i=i+1
if (len(p)>1):
if (p.coef[len(p)-1]>0):
print("+%f*x^%d" %(p.coef[len(p)-1],len(p)-1,),end=" ")
elif (p.coef[len(p)-1]<0):
print("%f*x^%d" %(p.coef[len(p)-1],len(p)-1,),end=" ") | StarcoderdataPython |
3379016 | import dicom
from numpy import *
import SimpleITK as sitk
import os
def read_ct_scan(path, verbose=False):
# type: (object) -> object
# Read the slices from the dicom file
slices = []
if os.path.isfile(path):
try:
return sitk.ReadImage(path)
except:
if verbose:
print('Neither a DICOM nor a MHD file: %s' % os.path.basename(path))
if os.path.isdir(path):
files = os.listdir(path)
for filename in files:
try:
slices.append(dicom.read_file(os.path.join(path, filename)))
except dicom.filereader.InvalidDicomError:
if verbose:
print('Neither a DICOM nor a MHD file: %s' % filename)
slices.sort(key=lambda x: int(x.InstanceNumber))
try:
slice_thickness = abs(slices[0].ImagePositionPatient[2] - slices[1].ImagePositionPatient[2])
except AttributeError:
slice_thickness = abs(slices[0].SliceLocation - slices[1].SliceLocation)
for s in slices:
s.SliceThickness = slice_thickness
return slices
def extract_array(ct_scan):
heights = asarray([int(ct_slice.SliceLocation)for ct_slice in ct_scan])
ct_scan = stack([ct_slice.pixel_array for ct_slice in ct_scan])
ct_scan[ct_scan == ct_scan.min()] = 0
return ct_scan, heights
def get_pixels_hu(slices):
try:
image = stack([s.pixel_array for s in slices])
except AttributeError:
return sitk.GetArrayFromImage(slices)
# Convert to int16 (from sometimes int16),
# should be possible as values should always be low enough (<32k)
image = image.astype(int16)
# Set outside-of-scan pixels to 0
# The intercept is usually -1024, so air is approximately 0
image[image == image.min()] = 0
# Convert to Hounsfield units (HU)
for slice_number in range(len(slices)):
intercept = slices[slice_number].RescaleIntercept
slope = slices[slice_number].RescaleSlope
if slope != 1:
image[slice_number] = slope * image[slice_number].astype(float64)
image[slice_number] = image[slice_number].astype(int16)
image[slice_number] += int16(intercept)
return array(image, dtype=int16)
| StarcoderdataPython |
1607274 | <reponame>nanjekyejoannah/pypy
def test_process_prompt():
from pyrepl.reader import Reader
r = Reader(None)
assert r.process_prompt("hi!") == ("hi!", 3)
assert r.process_prompt("h\x01i\x02!") == ("hi!", 2)
assert r.process_prompt("hi\033[11m!") == ("hi\033[11m!", 3)
assert r.process_prompt("h\x01i\033[11m!\x02") == ("hi\033[11m!", 1)
assert r.process_prompt("h\033[11m\x01i\x02!") == ("h\033[11mi!", 2)
| StarcoderdataPython |
3398006 | <gh_stars>1-10
import tensorflow as tf
import numpy as np
#import re
import os
model_dir = './product-recognition/inception'
image = './product-recognition/pic/靴子/238320.png'
#将类别ID转换为人类易读的标签
class NodeLookup(object):
def __init__(self, label_lookup_path=None, uid_lookup_path=None):
if not label_lookup_path:
# 加载“label_lookup_path”文件
# 此文件将数据集中所含类别(1-1000)与一个叫做target_class_string的地址对应起来
# 其地址编码为“n********”星号代表数字
label_lookup_path = os.path.join(
model_dir, 'imagenet_2012_challenge_label_map_proto.pbtxt')
if not uid_lookup_path:
# 加载“uid_lookup_path”文件
# 此文件将数据集中所含类别具体名称与编码方式为“n********”的地址/UID一一对应起来
uid_lookup_path = os.path.join(
model_dir, 'imagenet_synset_to_human_label_map.txt')
self.node_lookup = self.load(label_lookup_path, uid_lookup_path)
def load(self, label_lookup_path, uid_lookup_path):
if not tf.gfile.Exists(uid_lookup_path):
# 预先检测地址是否存在
tf.logging.fatal('File does not exist %s', uid_lookup_path)
if not tf.gfile.Exists(label_lookup_path):
# 预先检测地址是否存在
tf.logging.fatal('File does not exist %s', label_lookup_path)
# Loads mapping from string UID to human-readable string
# 加载编号字符串n********,即UID与分类名称之间的映射关系(字典):uid_to_human
# 读取uid_lookup_path中所有的lines
# readlines(): Returns all lines from the file in a list.
# Leaves the '\n' at the end.
proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()
# 创建空字典uid_to_human用以存储映射关系
uid_to_human = {}
# =============================================================================
# # 使用正则化方法处理文件:
# p = re.compile(r'[n\d]*[ \S,]*')
# for line in proto_as_ascii_lines:
# = p.findall(line)
# uid = parsed_items[0]
# human_string = parsed_items[2]
# uid_to_human[uid] = human_string
# =============================================================================
# 使用简单方法处理文件:
# 一行行读取数据
for line in proto_as_ascii_lines:
# 去掉换行符
line = line.strip('\n')
# 按照‘\t’分割,即tab,将line分为两个部分
parse_items = line.split('\t')
# 获取分类编码,即UID
uid = parse_items[0]
# 获取分类名称
human_string = parse_items[1]
# 新建编号字符串n********,即UID与分类名称之间的映射关系(字典):uid_to_human
uid_to_human[uid] = human_string
# Loads mapping from string UID to integer node ID.
# 加载编号字符串n********,即UID与分类代号,即node ID之间的映射关系(字典)
# 加载分类字符串n********,即UID对应分类编号1-1000的文件
proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()
# 创建空字典node_id_to_uid用以存储分类代码node ID与UID之间的关系
node_id_to_uid = {}
for line in proto_as_ascii:
# 注意空格
if line.startswith(' target_class:'):
# 获取分类编号
target_class = int(line.split(': ')[1])
if line.startswith(' target_class_string:'):
# 获取UID(带双引号,eg:"n01484850")
target_class_string = line.split(': ')[1]
# 去掉前后的双引号,构建映射关系
node_id_to_uid[target_class] = target_class_string[1:-2]
# Loads the final mapping of integer node ID to human-readable string
# 加载node ID与分类名称之间的映射关系
node_id_to_name = {}
for key, val in node_id_to_uid.items():
# 假如uid不存在于uid_to_human中,则报错
if val not in uid_to_human:
tf.logging.fatal('Failed to locate: %s', val)
# 获取分类名称
name = uid_to_human[val]
# 构建分类编号1-1000对应分类名称的映射关系:key为node_id;val为name
node_id_to_name[key] = name
return node_id_to_name
# 传入分类编号1-1000,返回分类具体名称
def id_to_string(self, node_id):
# 若不存在,则返回空字符串
if node_id not in self.node_lookup:
return ''
return self.node_lookup[node_id]
# 读取并创建一个图graph来存放Google训练好的Inception_v3模型(函数)
def create_graph():
with tf.gfile.FastGFile(os.path.join(
model_dir, 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
#读取图片
image_data = tf.gfile.FastGFile(image, 'rb').read()
#创建graph
create_graph()
# 创建会话,因为是从已有的Inception_v3模型中恢复,所以无需初始化
with tf.Session() as sess:
# Inception_v3模型的最后一层softmax的输出
# 形如'conv1'是节点名称,而'conv1:0'是张量名称,表示节点的第一个输出张量
softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')
# 输入图像(jpg格式)数据,得到softmax概率值(一个shape=(1,1008)的向量)
predictions = sess.run(softmax_tensor,{'DecodeJpeg/contents:0': image_data})
# 将结果转为1维数据
predictions = np.squeeze(predictions)
# 新建类:ID --> English string label.
node_lookup = NodeLookup()
# 排序,取出前5个概率最大的值(top-5)
# argsort()返回的是数组值从小到大排列所对应的索引值
top_5 = predictions.argsort()[-5:][::-1]
for node_id in top_5:
# 获取分类名称
human_string = node_lookup.id_to_string(node_id)
# 获取该分类的置信度
score = predictions[node_id]
print('%s (score = %.5f)' % (human_string, score)) | StarcoderdataPython |
128882 | <reponame>DouglasUrner/markdown-pp
# Copyright 2015 <NAME>
# Licensed under the MIT license
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import re
from os import path
from MarkdownPP.Module import Module
from MarkdownPP.Transform import Transform
class Include(Module):
"""
Module for recursively including the contents of other files into the
current document using a command like `!INCLUDE "path/to/filename"`.
Target paths can be absolute or relative to the file containing the command
"""
# matches !INCLUDE directives in .mdpp files
includere = re.compile(r"^!INCLUDE\s+(?:\"([^\"]+)\"|'([^']+)')"
"\s*(?:,\s*(\d+))?\s*$")
# matches title lines in Markdown files
titlere = re.compile(r"^(:?#+.*|={3,}|-{3,})$")
# includes should happen before anything else
priority = 0
def transform(self, data):
transforms = []
linenum = 0
for line in data:
match = self.includere.search(line)
if match:
includedata = self.include(match)
transform = Transform(linenum=linenum, oper="swap",
data=includedata)
transforms.append(transform)
linenum += 1
return transforms
def include(self, match, pwd=""):
# file name is caught in group 1 if it's written with double quotes,
# or group 2 if written with single quotes
filename = match.group(1) or match.group(2)
shift = int(match.group(3) or 0)
if not path.isabs(filename):
filename = path.join(pwd, filename)
try:
f = open(filename, "r")
data = f.readlines()
f.close()
# line by line, apply shift and recursively include file data
linenum = 0
for line in data:
match = self.includere.search(line)
if match:
dirname = path.dirname(filename)
data[linenum:linenum+1] = self.include(match, dirname)
if shift:
titlematch = self.titlere.search(line)
if titlematch:
to_del = []
for _ in range(shift):
if data[linenum][0] == '#':
data[linenum] = "#" + data[linenum]
elif data[linenum][0] == '=':
data[linenum] = data[linenum].replace("=", '-')
elif data[linenum][0] == '-':
data[linenum] = '### ' + data[linenum - 1]
to_del.append(linenum - 1)
for l in to_del:
del data[l]
linenum += 1
return data
except (IOError, OSError) as exc:
print(exc)
return []
| StarcoderdataPython |
1796351 | <filename>laetoli/ontologies.py
# Laetoli Areas Vocabulary
laetoli = 'Laetoli'
kakesio = 'Kakesio'
esere = 'Esere'
LAETOLI_AREAS = (
(laetoli, 'Laetoli'),
(kakesio, 'Kakesio'),
(esere, 'Esere-Noiti'),
)
# Laetoli Stratographic Units
ngaloba = 'Ngaloba Beds'
qngaloba = '?Ngaloba Beds'
olpiro = 'Olpiro Beds'
naibadad = 'Naibadad Beds'
olgol = 'Olgol Lavas'
ndolanya = 'Ndolanya Beds'
upper_ndolanya = 'Upper Ndolanya Beds'
lower_ndolayna = 'Lower Ndolanya Beds'
laetolil = 'Laetolil Beds'
upper_laetolil = 'Upper Laetolil Beds'
lower_laetolil = 'Lower Laetolil Beds'
LAETOLI_UNITS = (
(ngaloba, 'Ngaloba Beds'),
(qngaloba, '?Ngaloba Beds'),
(olpiro,'Olpiro Beds'),
(naibadad, 'Naibadad Beds'),
(olgol, 'Olgol Lavas'),
(ndolanya, 'Ndolanya Beds'),
(upper_ndolanya, 'Upper Ndolanya Beds'),
(lower_ndolayna, 'Lower Ndolanya Beds'),
(laetolil, 'Laetolil Beds'),
(upper_laetolil, 'Upper Laetolil Beds'),
(lower_laetolil, 'Lower Laetolil Beds'),
)
LIFE_STAGE_CHOICES = (
('infant', 'infant'),
('juvenile', 'juvenile')
)
SIZE_CLASS_CHOICES = (
('indeterminate', 'indeterminate'),
('1', '1'),
('2', '2'),
('3', '3'),
('4', '4'),
('5', '5')
)
| StarcoderdataPython |
27084 | <gh_stars>0
# Get instance
import instaloader
import json
L = instaloader.Instaloader(max_connection_attempts=0)
# Login or load session
username = ''
password = ''
L.login(username, password) # (login)
# Obtain profile metadata
instagram_target = ''
profile = instaloader.Profile.from_username(L.context, instagram_target)
following_list = []
count=1
for followee in profile.get_followees():
username = followee.username
following_list.append(username)
print(str(count) + ". " + username)
count = count + 1
following_list_json = json.dumps(following_list)
open("list_following_" + instagram_target +".json","w").write(following_list_json)
print("selesai")
print("cek file json di file : list_following_" + instagram_target +".json") | StarcoderdataPython |
35409 | from .nodes import Host, HostSchema, Session, SessionSchema, Project, SSHKey
| StarcoderdataPython |
4829034 | <filename>backend/project-director/authentication/settings.py<gh_stars>0
TOKEN_LENGTH = 10
REFRESH_TOKEN = '<PASSWORD>'
ACCESS_TOKEN = 'access-token' | StarcoderdataPython |
27611 | <gh_stars>1-10
import os
import socket
from typing import Any, Dict, Optional
import hummingbot.connector.derivative.binance_perpetual.constants as CONSTANTS
from hummingbot.client.config.config_var import ConfigVar
from hummingbot.client.config.config_methods import using_exchange
from hummingbot.core.utils.tracking_nonce import get_tracking_nonce
from hummingbot.core.web_assistant.auth import AuthBase
from hummingbot.core.web_assistant.connections.data_types import RESTMethod, RESTRequest
from hummingbot.core.web_assistant.rest_pre_processors import RESTPreProcessorBase
from hummingbot.core.web_assistant.web_assistants_factory import WebAssistantsFactory
CENTRALIZED = True
EXAMPLE_PAIR = "BTC-USDT"
DEFAULT_FEES = [0.02, 0.04]
BROKER_ID = "x-3QreWesy"
class BinancePerpetualRESTPreProcessor(RESTPreProcessorBase):
async def pre_process(self, request: RESTRequest) -> RESTRequest:
if request.headers is None:
request.headers = {}
request.headers["Content-Type"] = (
"application/json" if request.method == RESTMethod.POST else "application/x-www-form-urlencoded"
)
return request
def get_client_order_id(order_side: str, trading_pair: object):
nonce = get_tracking_nonce()
symbols: str = trading_pair.split("-")
base: str = symbols[0].upper()
quote: str = symbols[1].upper()
base_str = f"{base[0]}{base[-1]}"
quote_str = f"{quote[0]}{quote[-1]}"
client_instance_id = hex(abs(hash(f"{socket.gethostname()}{os.getpid()}")))[2:6]
return f"{BROKER_ID}-{order_side.upper()[0]}{base_str}{quote_str}{client_instance_id}{nonce}"
def rest_url(path_url: str, domain: str = "binance_perpetual", api_version: str = CONSTANTS.API_VERSION):
base_url = CONSTANTS.PERPETUAL_BASE_URL if domain == "binance_perpetual" else CONSTANTS.TESTNET_BASE_URL
return base_url + api_version + path_url
def wss_url(endpoint: str, domain: str = "binance_perpetual"):
base_ws_url = CONSTANTS.PERPETUAL_WS_URL if domain == "binance_perpetual" else CONSTANTS.TESTNET_WS_URL
return base_ws_url + endpoint
def build_api_factory(auth: Optional[AuthBase] = None) -> WebAssistantsFactory:
api_factory = WebAssistantsFactory(auth=auth, rest_pre_processors=[BinancePerpetualRESTPreProcessor()])
return api_factory
def is_exchange_information_valid(exchange_info: Dict[str, Any]) -> bool:
"""
Verifies if a trading pair is enabled to operate with based on its exchange information
:param exchange_info: the exchange information for a trading pair
:return: True if the trading pair is enabled, False otherwise
"""
return exchange_info.get("status", None) == "TRADING"
KEYS = {
"binance_perpetual_api_key": ConfigVar(
key="binance_perpetual_api_key",
prompt="Enter your Binance Perpetual API key >>> ",
required_if=using_exchange("binance_perpetual"),
is_secure=True,
is_connect_key=True,
),
"binance_perpetual_api_secret": ConfigVar(
key="binance_perpetual_api_secret",
prompt="Enter your Binance Perpetual API secret >>> ",
required_if=using_exchange("binance_perpetual"),
is_secure=True,
is_connect_key=True,
),
}
OTHER_DOMAINS = ["binance_perpetual_testnet"]
OTHER_DOMAINS_PARAMETER = {"binance_perpetual_testnet": "binance_perpetual_testnet"}
OTHER_DOMAINS_EXAMPLE_PAIR = {"binance_perpetual_testnet": "BTC-USDT"}
OTHER_DOMAINS_DEFAULT_FEES = {"binance_perpetual_testnet": [0.02, 0.04]}
OTHER_DOMAINS_KEYS = {
"binance_perpetual_testnet": {
# add keys for testnet
"binance_perpetual_testnet_api_key": ConfigVar(
key="binance_perpetual_testnet_api_key",
prompt="Enter your Binance Perpetual testnet API key >>> ",
required_if=using_exchange("binance_perpetual_testnet"),
is_secure=True,
is_connect_key=True,
),
"binance_perpetual_testnet_api_secret": ConfigVar(
key="binance_perpetual_testnet_api_secret",
prompt="Enter your Binance Perpetual testnet API secret >>> ",
required_if=using_exchange("binance_perpetual_testnet"),
is_secure=True,
is_connect_key=True,
),
}
}
| StarcoderdataPython |
1650466 | <reponame>Khan/pyobjc-framework-Cocoa
from MyBaseGradientView import *
class MyBezierGradientView (MyBaseGradientView):
def init(self):
self = super(MyBaseGradientView, self).init()
if self is None:
return None
self.myOffsetPt = NSMakePoint(0.0, 0.0)
return self
def drawRect_(self, rect):
self.resetGradient()
bezierPath = NSBezierPath.alloc().init()
bezierPath.appendBezierPathWithOvalInRect_(rect)
if self.myIsRadial:
self.myGradient.drawInBezierPath_relativeCenterPosition_(bezierPath, self.myOffsetPt)
else:
self.myGradient.drawInBezierPath_angle_(bezierPath, self.myAngle)
| StarcoderdataPython |
1653340 | #!/usr/bin/env python3
import sys
sys.exit("[ - ] Sedang perbaikan, mohon tunggu update")
| StarcoderdataPython |
4836389 | <reponame>warsaw/pkg-gunicorn
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import t
import functools
import os
import sys
from gunicorn import config
from gunicorn.app.base import Application
from gunicorn.workers.sync import SyncWorker
dirname = os.path.dirname(__file__)
def cfg_file():
return os.path.join(dirname, "config", "test_cfg.py")
def paster_ini():
return os.path.join(dirname, "..", "examples", "frameworks", "pylonstest", "nose.ini")
class AltArgs(object):
def __init__(self, args=None):
self.args = args or []
self.orig = sys.argv
def __enter__(self):
sys.argv = self.args
def __exit__(self, exc_type, exc_inst, traceback):
sys.argv = self.orig
class NoConfigApp(Application):
def __init__(self):
super(NoConfigApp, self).__init__("no_usage", prog="gunicorn_test")
def init(self, parser, opts, args):
pass
def load(self):
pass
def test_defaults():
c = config.Config()
for s in config.KNOWN_SETTINGS:
t.eq(c.settings[s.name].validator(s.default),
c.settings[s.name].get())
def test_property_access():
c = config.Config()
for s in config.KNOWN_SETTINGS:
getattr(c, s.name)
# Class was loaded
t.eq(c.worker_class, SyncWorker)
# Workers defaults to 1
t.eq(c.workers, 1)
c.set("workers", 3)
t.eq(c.workers, 3)
# Address is parsed
t.eq(c.address, [("127.0.0.1", 8000)])
# User and group defaults
t.eq(os.geteuid(), c.uid)
t.eq(os.getegid(), c.gid)
# Proc name
t.eq("gunicorn", c.proc_name)
# Not a config property
t.raises(AttributeError, getattr, c, "foo")
# Force to be not an error
class Baz(object):
def get(self):
return 3.14
c.settings["foo"] = Baz()
t.eq(c.foo, 3.14)
# Attempt to set a cfg not via c.set
t.raises(AttributeError, setattr, c, "proc_name", "baz")
# No setting for name
t.raises(AttributeError, c.set, "baz", "bar")
def test_bool_validation():
c = config.Config()
t.eq(c.preload_app, False)
c.set("preload_app", True)
t.eq(c.preload_app, True)
c.set("preload_app", "true")
t.eq(c.preload_app, True)
c.set("preload_app", "false")
t.eq(c.preload_app, False)
t.raises(ValueError, c.set, "preload_app", "zilch")
t.raises(TypeError, c.set, "preload_app", 4)
def test_pos_int_validation():
c = config.Config()
t.eq(c.workers, 1)
c.set("workers", 4)
t.eq(c.workers, 4)
c.set("workers", "5")
t.eq(c.workers, 5)
c.set("workers", "0xFF")
t.eq(c.workers, 255)
c.set("workers", True)
t.eq(c.workers, 1) # Yes. That's right...
t.raises(ValueError, c.set, "workers", -21)
t.raises(TypeError, c.set, "workers", c)
def test_str_validation():
c = config.Config()
t.eq(c.proc_name, "gunicorn")
c.set("proc_name", " foo ")
t.eq(c.proc_name, "foo")
t.raises(TypeError, c.set, "proc_name", 2)
def test_str_to_list_validation():
c = config.Config()
t.eq(c.forwarded_allow_ips, ["127.0.0.1"])
c.set("forwarded_allow_ips", "127.0.0.1,192.168.0.1")
t.eq(c.forwarded_allow_ips, ["127.0.0.1", "192.168.0.1"])
c.set("forwarded_allow_ips", "")
t.eq(c.forwarded_allow_ips, [])
c.set("forwarded_allow_ips", None)
t.eq(c.forwarded_allow_ips, [])
t.raises(TypeError, c.set, "forwarded_allow_ips", 1)
def test_callable_validation():
c = config.Config()
def func(a, b):
pass
c.set("pre_fork", func)
t.eq(c.pre_fork, func)
t.raises(TypeError, c.set, "pre_fork", 1)
t.raises(TypeError, c.set, "pre_fork", lambda x: True)
def test_callable_validation_for_string():
from os.path import isdir as testfunc
t.eq(
config.validate_callable(-1)("os.path.isdir"),
testfunc
)
# invalid values tests
t.raises(
TypeError,
config.validate_callable(-1), ""
)
t.raises(
TypeError,
config.validate_callable(-1), "os.path.not_found_func"
)
t.raises(
TypeError,
config.validate_callable(-1), "notfoundmodule.func"
)
def test_cmd_line():
with AltArgs(["prog_name", "-b", "blargh"]):
app = NoConfigApp()
t.eq(app.cfg.bind, ["blargh"])
with AltArgs(["prog_name", "-w", "3"]):
app = NoConfigApp()
t.eq(app.cfg.workers, 3)
with AltArgs(["prog_name", "--preload"]):
app = NoConfigApp()
t.eq(app.cfg.preload_app, True)
def test_app_config():
with AltArgs():
app = NoConfigApp()
for s in config.KNOWN_SETTINGS:
t.eq(app.cfg.settings[s.name].validator(s.default),
app.cfg.settings[s.name].get())
def test_load_config():
with AltArgs(["prog_name", "-c", cfg_file()]):
app = NoConfigApp()
t.eq(app.cfg.bind, ["unix:/tmp/bar/baz"])
t.eq(app.cfg.workers, 3)
t.eq(app.cfg.proc_name, "fooey")
def test_cli_overrides_config():
with AltArgs(["prog_name", "-c", cfg_file(), "-b", "blarney"]):
app = NoConfigApp()
t.eq(app.cfg.bind, ["blarney"])
t.eq(app.cfg.proc_name, "fooey")
def test_default_config_file():
default_config = os.path.join(os.path.abspath(os.getcwd()),
'gunicorn.conf.py')
with open(default_config, 'w+') as default:
default.write("bind='0.0.0.0:9090'")
t.eq(config.get_default_config_file(), default_config)
with AltArgs(["prog_name"]):
app = NoConfigApp()
t.eq(app.cfg.bind, ["0.0.0.0:9090"])
os.unlink(default_config)
def test_post_request():
c = config.Config()
def post_request_4(worker, req, environ, resp):
return 4
def post_request_3(worker, req, environ):
return 3
def post_request_2(worker, req):
return 2
c.set("post_request", post_request_4)
t.eq(4, c.post_request(1, 2, 3, 4))
c.set("post_request", post_request_3)
t.eq(3, c.post_request(1, 2, 3, 4))
c.set("post_request", post_request_2)
t.eq(2, c.post_request(1, 2, 3, 4))
def test_nworkers_changed():
c = config.Config()
def nworkers_changed_3(server, new_value, old_value):
return 3
c.set("nworkers_changed", nworkers_changed_3)
t.eq(3, c.nworkers_changed(1, 2, 3))
| StarcoderdataPython |
1657316 | import collections
import io
import json
import logging
import string
import sys
from lxml import etree
logger = logging.getLogger("righter")
class StateController:
def __init__(self):
self.writing = {}
self.change = {}
self.inside_writing = False
self.inside_change = False
self.writing_failed = False
def _is_change_valid(self, change):
if not change.get('symbol'):
return False
if change['symbol'] in ('SP', 'C', 'NSW'):
required_keys = {'symbol', 'selection', 'start'}
if required_keys - set(change.keys()):
return False
return True
def start_writing(self, writing_id, level):
self.writing_failed = False
self.writing = {
"id": writing_id,
"level": level,
"text": "",
"changes": [],
}
self.inside_writing = True
def set_nationality(self, nationality):
self.writing['nationality'] = nationality
def set_topic(self, topic):
self.writing['topic'] = topic
def set_grade(self, grade):
self.writing["grade"] = grade
def end_writing(self):
self.inside_writing = False
def start_change(self):
self.inside_change = True
self.change = {
"start": self.offset
}
def end_change(self):
self.inside_change = False
self.update_text(self.change.get("selection"))
# ignore changes without symbols (they are impossible to analyse
# anyway)
if self._is_change_valid(self.change):
self.writing["changes"].append(self.change)
def set_selection(self, selection):
if selection:
self.change["selection"] = selection
def set_symbol(self, symbol):
if symbol == 'NSW':
symbol = 'SP'
self.change["symbol"] = symbol
def set_correct(self, correct):
if correct:
self.change["correct"] = correct
@property
def offset(self):
return len(self.writing["text"])
def _needs_space(self, a, b):
if not a or not b:
return False
if any(map(a.endswith, string.punctuation)) and not any(map(b.startswith, string.whitespace)):
return True
elif not any(map(a.endswith, string.whitespace)):
return not any(map(b.startswith, string.whitespace + string.punctuation))
return False
def update_text(self, text):
if text:
if self._needs_space(self.writing["text"], text):
separator = ' '
else:
separator = ''
self.writing["text"] += separator + text
def _parse_text(controller, blob):
"""Parses blob, extracted from text CDATA using XML Parser."""
stream = io.BytesIO(blob.encode('utf8'))
for event, element in etree.iterparse(stream, events=('start', 'end')):
if element.tag == 'text' and event == 'start':
controller.update_text(element.text)
elif element.tag == 'change':
if event == 'start':
controller.start_change()
else:
controller.end_change()
controller.update_text(element.tail)
elif element.tag == 'selection' and event == 'end':
controller.set_selection(element.text)
elif element.tag == 'symbol' and event == 'end':
controller.set_symbol(element.text)
elif element.tag == 'correct' and event == 'end':
controller.set_correct(element.text)
elif element.tag == 'br' and event == 'end':
if element.tail:
controller.update_text('\n{}'.format(element.tail))
else:
controller.update_text('\n')
elif not controller.inside_change and event == 'end':
controller.update_text(element.tail)
def parse(xml_file):
"""Given a xml file-like object, returns a list of dicts with two keys:
- text: original student writing
- changes: list of dicts with the fields:
- symbol: error type, according to the list
x>>y - change from x to y
AG - agreement
AR - article
CO - combine sentences
C - capitalization
D - delete
EX - expression of idiom
HL - highlight
I(x) - insert x
MW - missing word
NS - new sentence
NSW - no such word
PH - phraseology
PL - plural
PO - possessive
PR - preposition
PS - part of speech
PU - punctuation
SI - singular
SP - spelling
VT - verb tense
WC - word choice
WO - word order
- selection: excerpt of the original text
- start: indice where the word is in the text (in unicode characters)
- correct: if this field exists it is the teacher suggested correction
"""
controller = StateController()
for event, element in etree.iterparse(xml_file, events=('start', 'end')):
if element.tag == 'writing':
if event == 'start':
controller.start_writing(element.get('id'), element.get('level'))
else:
controller.end_writing()
if not controller.writing_failed:
yield controller.writing
# keep etree from keeping the entire tree in memory
element.clear()
elif element.tag == 'learner' and event == 'end':
if element.get('nationality'):
controller.set_nationality(element.get('nationality'))
elif element.tag == 'topic' and event == 'end':
if element.get('id'):
controller.set_topic(element.get('id'))
elif element.tag == 'grade' and event == 'end':
if element.text:
try:
controller.set_grade(int(element.text))
except ValueError:
pass
elif element.tag == 'text' and event == 'end':
try:
_parse_text(controller, '<text>{}</text>'.format(element.text))
except etree.XMLSyntaxError:
logger.warn("Text for writing <%s> is invalid XML", controller.writing.get('id'))
controller.writing_failed = True
| StarcoderdataPython |
3230285 | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 16 02:05:23 2022
@author: Sagi
"""
'''
Sample choice node text:
;-BLOCK-------------------------------------------------------------------------
*f20 # Label
gosub *regard_update
!sd
if %sceneskip==1 && %1020==1 skip 4
gosub *s20
mov %1020,1
skip 9
`You have already viewed this scene.
`Would you like to skip?
br
selgosub `1. Skip`, *skip20,
`2. Don't skip`, *s20
skip 3
*skip20
return
;gosub *s20
select `1. There's only a few minutes until homeroom. I have to head there right away.`, *f21,
`2. || I'm curious, so I'll go take a look.`, *f22
'''
import re
from Graph import *
class TextNode():
def __init__(self, label=None, text=None, children=None):
if label is not None:
self.label = label
else:
self.label = None
if text is not None:
self.text = text
else:
self.text = ""
if children is not None:
self.children = children
else:
self.children = []
def get_text(self):
if self.text:
return self.text
else:
return None
def get_label(self):
if self.label:
return self.label
else:
return None
def add_text(self, text):
self.text += text
def change_label(self, label):
self.label = label
def add_children(self, children):
self.children += children
class ChoiceNode(TextNode):
def add_choices(self, choices):
self.choices = choices
def get_choices(self):
if self.choices:
return self.choices
else:
return None
class TsukihimeNode(TextNode):
def get_labels(self, string):
return re.findall("\*.*(?=,)|\*.*(?=\s)|\*.*", string)
def parse_text(self):
if self.text is None:
print("No text to parse")
return -1
line_ctr = 0
lines = self.text.splitlines()
no_lines = len(lines)
while (line_ctr < no_lines):
if lines[line_ctr].find("select") != -1:
children = []
while (line_ctr < no_lines
and re.search("`[0-9].*`", lines[line_ctr])):
children += self.get_labels(lines[line_ctr])
line_ctr += 1
self.add_children(children)
elif lines[line_ctr].find("goto") != -1:
self.add_children(self.get_labels(lines[line_ctr]))
line_ctr += 1
class NscriptParser(Graph):
# method to parse the script
def parse(self):
nscript = open("./nsdec/NSDEC/result.txt", encoding="cp932")
line = nscript.readline()
header = open("./parsed_texts/header.txt", "w", encoding="cp932")
remaining = open("./parsed_texts/remaining.txt", "w", encoding="cp932")
choices = open("./parsed_texts/choices.txt", "w", encoding="cp932")
choice_nodes = []
nodes = []
nodes_present = False
while (line and line.strip() != "*start"):
header.writelines(line)
line = nscript.readline()
while (line and line.strip() != "; $Id: 4.txt 1282 2006-08-04 18:12:29Z chendo $"):
if re.match("\*f.*", line):
nodes_present = True
choice_nodes.append(TsukihimeNode(text=""))
if nodes_present:
choice_nodes[-1].add_text(line)
if re.match("^\*f", line):
choice_nodes[-1].change_label(line.strip())
choices.writelines(line)
line = nscript.readline()
while (line):
if re.match("^\*", line):
nodes.append(TextNode(line))
remaining.writelines(line)
line = nscript.readline()
nscript.close()
header.close()
remaining.close()
choices.close()
choice_nodes = list(filter(lambda x: x.get_label() is not None, choice_nodes))
for node in choice_nodes:
node.parse_text()
for node in choice_nodes:
self.graph.add_node(node.label)
for child in node.children:
if child not in self.graph:
self.graph.add_node(child)
self.graph.add_edge(node.label, child)
return choice_nodes
if __name__ == "__main__":
parser = NscriptParser()
choice_nodes = parser.parse()
leveled_tree = parser.get_leveled_tree()
output = parser.output_tree_sideways()
with open("ouput.txt", "w") as outfile:
outfile.write(output)
#parser.plot()
#parser.plot_pretty()
| StarcoderdataPython |
3305538 | import unittest
from more_itertools import (
one,
)
from azul import (
config,
)
from azul.es import (
ESClientFactory,
)
from azul.indexer import (
BundleFQID,
)
from azul.indexer.document import (
AggregateCoordinates,
CataloguedEntityReference,
ContributionCoordinates,
)
from azul.logging import (
configure_test_logging,
)
from indexer.test_hca_indexer import (
IndexerTestCase,
)
# noinspection PyPep8Naming
def setUpModule():
configure_test_logging()
class TestDataExtractorTestCase(IndexerTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.es_client = ESClientFactory.get()
def setUp(self) -> None:
super().setUp()
self.index_service.create_indices(self.catalog)
def tearDown(self) -> None:
self.index_service.delete_indices(self.catalog)
super().tearDown()
def test_hca_extraction(self):
bundle_fqids = [
BundleFQID('17a3d288-01a0-464a-9599-7375fda3353d', '2018-03-28T151023.074974Z'),
BundleFQID('2a87dc5c-0c3c-4d91-a348-5d784ab48b92', '2018-03-29T104041.822717Z'),
BundleFQID('4afbb0ea-81ad-49dc-9b12-9f77f4f50be8', '2018-03-29T090403.442059Z'),
BundleFQID('aaa96233-bf27-44c7-82df-b4dc15ad4d9d', '2018-11-04T113344.698028Z'),
BundleFQID('b0850e79-5544-49fe-b54d-e29b9fc3f61f', '2018-03-29T090340.934358Z'),
BundleFQID('c94a43f9-257f-4cd0-b2fe-eaf6d5d37d18', '2018-03-29T090343.782253Z')
]
for bundle_fqid in bundle_fqids:
self._index_canned_bundle(bundle_fqid)
for aggregate in True, False:
with self.subTest(aggregate=aggregate):
def index_name(entity_type):
return config.es_index_name(catalog=self.catalog,
entity_type=entity_type,
aggregate=aggregate)
total_projects = self.es_client.count(index=index_name('projects'), doc_type='doc')
# Three unique projects, six project contributions
self.assertEqual(3 if aggregate else 6, total_projects["count"])
total_files = self.es_client.count(index=index_name('files'), doc_type='doc')
self.assertEqual(776, total_files["count"])
total_samples = self.es_client.count(index=index_name('samples'), doc_type='doc')
self.assertEqual(129, total_samples["count"])
# When two processes point at a file (this is the case for most files in production)
# there was a bug where the files index contains duplicate dictionaries for the file.
#
def test_no_duplicate_files_in_specimen(self):
bundle_fqid = BundleFQID('8543d32f-4c01-48d5-a79f-1c5439659da3', '2018-03-29T143828.884167Z')
self._index_canned_bundle(bundle_fqid)
for aggregate in True, False:
with self.subTest(aggregate=aggregate):
entity = CataloguedEntityReference(catalog=self.catalog,
entity_type='samples',
entity_id='b3623b88-c369-46c9-a2e9-a16042d2c589')
if aggregate:
coordinates = AggregateCoordinates(entity=entity)
else:
coordinates = ContributionCoordinates(entity=entity,
bundle=bundle_fqid,
deleted=False)
result = self.es_client.get(index=coordinates.index_name,
doc_type=coordinates.type,
id=coordinates.document_id)
files = result['_source']['contents']['files']
num_files = 2 # fastqs
if aggregate:
self.assertEqual(num_files, one(files)['count'])
else:
file_ids = [f['uuid'] for f in files]
self.assertEqual(num_files, len(file_ids))
self.assertEqual(num_files, len(set(file_ids)))
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
4825826 | from flask_login import current_user, login_user, logout_user, login_required
from flask import render_template, redirect, url_for, flash, g, json
from app.models import Inventory, Event, User, Description
from app.forms import LoginForm
from app.auth import login_check
from app import app, login, db
from json2html import *
""" Grabs username of the user logged in """
@app.before_request
def before_request():
g.user = current_user
""" Login """
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
username, password = form.username.data, form.password.data
access = login_check(username, password)
if access is None:
flash('Invalid username or password')
return redirect(url_for('login'))
user = User.query.filter_by(username=access).first()
print("1. User is authenticated:", user.is_authenticated)
print("Current user:", current_user)
if user is None:
print("No user found")
return redirect(url_for('login'))
login_user(user, remember=form.remember_me.data)
print("2. User is authenticated:", user.is_authenticated)
print("Current user:", current_user)
return redirect(url_for('index'))
return render_template('login.html', form=form)
""" logout. Cant get to this page unless logged in """
@app.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('index'))
""" landing page - all detectors """
@app.route('/')
@app.route('/index')
@login_required
def index():
detectors = Inventory.query.order_by(Inventory.detectorid).all()
events = []
for detector in detectors:
if detector.isactive:
event = Event.query.filter_by(detectorid=detector.detectorid).order_by(Event.eventid.desc()).first()
events.append(event)
return render_template("index.html", detectors=detectors, events=events)
""" compact mode view """
@app.route('/view/compact')
@login_required
def compact():
detectors = Inventory.query.order_by(Inventory.detectorid).all()
events = []
for detector in detectors:
if detector.isactive:
event = Event.query.filter_by(detectorid=detector.detectorid).order_by(Event.eventid.desc()).first()
events.append(event)
return render_template("compact.html", detectors=detectors, events=events)
""" when clicking detector """
@app.route('/description/<int:id>')
@login_required
def description(id):
detector = Inventory.query.filter_by(detectorid=id).first()
if detector.isactive:
event = Event.query.filter_by(detectorid=detector.detectorid).order_by(Event.eventid.desc()).first()
if event is not None:
description = Description.query.filter_by(eventid=event.eventid).order_by(Description.eventid.desc()).first()
if description.contenttype == "json":
info = description.descriptiondetails
info = json2html.convert(json=info, table_attributes="id=\"info-table\" class=\"sortable rgb\"")
else:
info = description.descriptiondetails
return render_template("description.html", detector=detector, event=event, description=description, info=info)
""" When clicking history """
@app.route('/history/<int:id>')
@login_required
def history(id):
detector = Inventory.query.filter_by(detectorid=id).first()
if detector.isactive:
event = Event.query.filter_by(detectorid=detector.detectorid).order_by(Event.eventid.desc())
return render_template("history.html", detector=detector, event=event)
""" When clicking area """
@app.route('/<area>')
@login_required
def filter_area(area):
area = area
detectors = Inventory.query.filter_by(area=area).all()
events = []
for detector in detectors:
if detector.isactive:
event = Event.query.filter_by(detectorid=detector.detectorid).order_by(Event.eventid.desc()).first()
events.append(event)
return render_template("index.html", detectors=detectors, events=events)
| StarcoderdataPython |
3266472 | <reponame>rkwojdan/flair35
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import shutil
from flair.data import Dictionary
from flair.trainers.language_model_trainer import TextCorpus
def test_train_resume_language_model_training(resources_path, results_base_path, tasks_base_path):
dictionary = Dictionary.load(u'chars')
corpus = TextCorpus((resources_path / u'corpora/lorem_ipsum'),
dictionary, forward=True, character_level=True)
assert (corpus.test is not None)
assert (corpus.train is not None)
assert (corpus.valid is not None)
assert (len(corpus.train) == 2)
def test_generate_text_with_small_temperatures():
from flair.embeddings import FlairEmbeddings
language_model = FlairEmbeddings(u'news-forward-fast').lm
(text, likelihood) = language_model.generate_text(
temperature=0.01, number_of_characters=100)
assert (text is not None)
assert (len(text) >= 100)
| StarcoderdataPython |
1707600 | from django.conf.urls import url
from django.urls import include
from rest_framework_extensions.routers import (
ExtendedDefaultRouter as DefaultRouter
)
from .views import AuthorViewSet
router = DefaultRouter()
authors_router = router.register(
r'authors', AuthorViewSet, 'authors'
)
urlpatterns = [
url(r'', include(router.urls))
]
| StarcoderdataPython |
1720412 | <reponame>thecodeboy/tink
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tink.tools.testing.python.testing_server."""
import os
import signal
import subprocess
import time
from typing import Text
from absl import logging
from absl.testing import absltest
import grpc
import portpicker
from tink import aead
from proto.testing import testing_api_pb2
from proto.testing import testing_api_pb2_grpc
def _server_path() -> Text:
dir_path = os.path.dirname(os.path.abspath(__file__))
return os.path.join(dir_path, 'testing_server')
class TestingServerTest(absltest.TestCase):
_server = None
_channel = None
_keyset_stub = None
_aead_stub = None
@classmethod
def setUpClass(cls):
super().setUpClass()
port = portpicker.pick_unused_port()
cls._server = subprocess.Popen([
_server_path(), '-port', '%d' % port,
], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
logging.info('Server started on port %d with pid: %d.',
port, cls._server.pid)
cls._channel = grpc.secure_channel('[::]:%d' % port,
grpc.local_channel_credentials())
grpc.channel_ready_future(cls._channel).result()
cls._keyset_stub = testing_api_pb2_grpc.KeysetStub(cls._channel)
cls._aead_stub = testing_api_pb2_grpc.AeadStub(cls._channel)
@classmethod
def tearDownClass(cls):
cls._channel.close()
logging.info('Stopping server...')
cls._server.send_signal(signal.SIGINT)
time.sleep(2)
if cls._server.poll() is None:
cls._server.kill()
super().tearDownClass()
def test_generate_encrypt_decrypt(self):
t = time.time()
template = aead.aead_key_templates.AES128_GCM.SerializeToString()
gen_request = testing_api_pb2.GenerateKeysetRequest(template=template)
gen_response = self._keyset_stub.Generate(gen_request)
self.assertEmpty(gen_response.err)
keyset = gen_response.keyset
plaintext = b'The quick brown fox jumps over the lazy dog'
associated_data = b'associated_data'
enc_request = testing_api_pb2.AeadEncryptRequest(
keyset=keyset, plaintext=plaintext, associated_data=associated_data)
enc_response = self._aead_stub.Encrypt(enc_request)
self.assertEmpty(enc_response.err)
ciphertext = enc_response.ciphertext
dec_request = testing_api_pb2.AeadDecryptRequest(
keyset=keyset, ciphertext=ciphertext, associated_data=associated_data)
dec_response = self._aead_stub.Decrypt(dec_request)
self.assertEmpty(dec_response.err)
self.assertEqual(dec_response.plaintext, plaintext)
logging.info('Testing took %s s', time.time() - t)
def test_generate_decrypt_fail(self):
template = aead.aead_key_templates.AES128_GCM.SerializeToString()
gen_request = testing_api_pb2.GenerateKeysetRequest(template=template)
gen_response = self._keyset_stub.Generate(gen_request)
self.assertEmpty(gen_response.err)
keyset = gen_response.keyset
ciphertext = b'some invalid ciphertext'
associated_data = b'associated_data'
dec_request = testing_api_pb2.AeadDecryptRequest(
keyset=keyset, ciphertext=ciphertext, associated_data=associated_data)
dec_response = self._aead_stub.Decrypt(dec_request)
logging.info('Error in response: %s', dec_response.err)
self.assertNotEmpty(dec_response.err)
self.assertEmpty(dec_response.plaintext)
if __name__ == '__main__':
absltest.main()
| StarcoderdataPython |
195961 | <gh_stars>1-10
#!/usr/bin/env python
# ===--- generate_harness.py ----------------------------------------------===//
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ===----------------------------------------------------------------------===//
# Generate CMakeLists.txt and utils/main.swift from templates.
import jinja2
import os
import glob
import re
script_dir = os.path.dirname(os.path.realpath(__file__))
perf_dir = os.path.realpath(os.path.join(script_dir, '../..'))
single_source_dir = os.path.join(perf_dir, 'single-source')
multi_source_dir = os.path.join(perf_dir, 'multi-source')
template_map = {
'CMakeLists.txt_template': os.path.join(perf_dir, 'CMakeLists.txt'),
'main.swift_template': os.path.join(perf_dir, 'utils/main.swift')
}
ignored_run_funcs = ["Ackermann", "Fibonacci"]
template_loader = jinja2.FileSystemLoader(searchpath="/")
template_env = jinja2.Environment(loader=template_loader, trim_blocks=True,
lstrip_blocks=True)
if __name__ == '__main__':
# CMakeList single-source
tests = [os.path.basename(x).split('.')[0]
for x in glob.glob(os.path.join(single_source_dir, '*.swift'))]
# CMakeList multi-source
class multi_source_bench(object):
def __init__(self, path):
self.name = os.path.basename(path)
self.files = [x for x in os.listdir(path)
if x.endswith('.swift')]
if os.path.isdir(multi_source_dir):
multisource_benches = [
multi_source_bench(os.path.join(multi_source_dir, x))
for x in os.listdir(multi_source_dir)
if os.path.isdir(os.path.join(multi_source_dir, x))
]
else:
multisource_benches = []
# main.swift imports
imports = sorted(tests + [msb.name for msb in multisource_benches])
# main.swift run functions
def get_run_funcs(filepath):
content = open(filepath).read()
matches = re.findall(r'func run_(.*?)\(', content)
return filter(lambda x: x not in ignored_run_funcs, matches)
def find_run_funcs(dirs):
ret_run_funcs = []
for d in dirs:
for root, _, files in os.walk(d):
for name in filter(lambda x: x.endswith('.swift'), files):
run_funcs = get_run_funcs(os.path.join(root, name))
ret_run_funcs.extend(run_funcs)
return ret_run_funcs
run_funcs = sorted(
[(x, x)
for x in find_run_funcs([single_source_dir, multi_source_dir])],
key=lambda x: x[0]
)
# Replace originals with files generated from templates
for template_file in template_map:
template_path = os.path.join(script_dir, template_file)
template = template_env.get_template(template_path)
print template_map[template_file]
open(template_map[template_file], 'w').write(
template.render(tests=tests,
multisource_benches=multisource_benches,
imports=imports,
run_funcs=run_funcs)
)
| StarcoderdataPython |
3230531 | # coding=utf-8
import flask
from flask import request,jsonify
import werkzeug
import os
import tensorflow as tf
import getConfig
import numpy as np
import pickle
import requests
import json
from PIL import Image
gConfig = {}
gConfig = getConfig.get_config(config_file='config.ini')
app = flask.Flask("imgClassifierWeb")
def CNN_predict():
global secure_filename
file = gConfig['dataset_path'] + "batches.meta"
patch_bin_file = open(file, 'rb')
label_names_dict = pickle.load(patch_bin_file)["label_names"]
img = Image.open(os.path.join(app.root_path, secure_filename))
img = img.convert("RGB")
r, g, b = img.split()
r_arr = np.array(r)
g_arr = np.array(g)
b_arr = np.array(b)
img = np.concatenate((r_arr, g_arr, b_arr))
image = img.reshape([1, 32, 32, 3])/255
payload = json.dumps({"instances":image.tolist()})
predicted_class=requests.post('http://localhost:9000/v1/models/ImageClassifier:predict',data=payload)
predicted_class=np.array(json.loads(predicted_class.text)["predictions"])
prediction=tf.math.argmax(predicted_class[0]).numpy()
print(prediction)
return flask.render_template(template_name_or_list="prediction_result.html",predicted_class=label_names_dict[prediction])
app.add_url_rule(rule="/predict/", endpoint="predict", view_func=CNN_predict)
@app.route('/api', methods=['POST'])
def predict_api():
file = gConfig['dataset_path'] + "batches.meta"
patch_bin_file = open(file, 'rb')
label_names_dict = pickle.load(patch_bin_file)["label_names"]
img = Image.open( request.form['path'])
img = img.convert("RGB")
r, g, b = img.split()
r_arr = np.array(r)
g_arr = np.array(g)
b_arr = np.array(b)
img = np.concatenate((r_arr, g_arr, b_arr))
image = img.reshape([1, 32, 32, 3]) / 255
payload = json.dumps({"instances": image.tolist()})
predicted_class = requests.post('http://localhost:9000/v1/models/ImageClassifi:predict', data=payload)
predicted_class = np.array(json.loads(predicted_class.text)["predictions"])
prediction = tf.math.argmax(predicted_class[0]).numpy()
print(prediction)
return jsonify({'result':label_names_dict[prediction]})
def upload_image():
global secure_filename
if flask.request.method == "POST": # 设置request的模式为POST
img_file = flask.request.files["image_file"] # 获取需要分类的图片
secure_filename = werkzeug.secure_filename(img_file.filename) # 生成一个没有乱码的文件名
img_path = os.path.join(app.root_path, "predict_img/"+secure_filename) # 获取图片的保存路径
img_file.save(img_path) # 将图片保存在应用的根目录下
print("图片上传成功.")
"""
"""
return flask.redirect(flask.url_for(endpoint="predict"))
return "图片上传失败"
"""
"""
app.add_url_rule(rule="/upload/", endpoint="upload", view_func=upload_image, methods=["POST"])
def predirect_upload():
return flask.render_template(template_name_or_list="upload_image.html")
"""
"""
app.add_url_rule(rule="/", endpoint="homepage", view_func=predirect_upload)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8008, debug=False)
| StarcoderdataPython |
3286639 | <reponame>whitfin/spack<filename>var/spack/repos/builtin/packages/r-rsamtools/package.py
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RRsamtools(RPackage):
"""This package provides an interface to the 'samtools', 'bcftools', and
'tabix' utilities (see 'LICENCE') for manipulating SAM (Sequence
Alignment / Map), FASTA, binary variant call (BCF) and compressed
indexed tab-delimited (tabix) files."""
homepage = "https://bioconductor.org/packages/Rsamtools/"
git = "https://git.bioconductor.org/packages/Rsamtools.git"
version('1.32.2', commit='<PASSWORD>')
version('1.28.0', commit='<PASSWORD>')
depends_on('r-genomeinfodb', type=('build', 'run'))
depends_on('[email protected]:', when='@1.32.2', type=('build', 'run'))
depends_on('r-genomicranges', type=('build', 'run'))
depends_on('[email protected]:', when='@1.32.2', type=('build', 'run'))
depends_on('r-biostrings', type=('build', 'run'))
depends_on('[email protected]:', when='@1.32.2', type=('build', 'run'))
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('[email protected]:', when='@1.32.2', type=('build', 'run'))
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('[email protected]:', when='@1.32.2', type=('build', 'run'))
depends_on('r-iranges', type=('build', 'run'))
depends_on('[email protected]:', when='@1.32.2', type=('build', 'run'))
depends_on('r-xvector', type=('build', 'run'))
depends_on('[email protected]:', when='@1.32.2', type=('build', 'run'))
depends_on('r-zlibbioc', type=('build', 'run'))
depends_on('r-bitops', type=('build', 'run'))
depends_on('r-biocparallel', type=('build', 'run'))
depends_on('[email protected]:3.4.9', when='@1.28.0', type=('build', 'run'))
depends_on('[email protected]:3.5.9', when='@1.32.2', type=('build', 'run'))
| StarcoderdataPython |
3290628 | #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# Simulation class for threading Kernel objects
#
# macrospin Python package
# Authors: <NAME>
# Copyright: 2014-2015 Cornell University
#
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
import numpy as np
from threading import Thread
class Simulation(object):
""" Simulation object coordinates the Thread that runs the kernel
"""
FAILED, STOPPED, QUEUED, RUNNING = 0, 1, 2, 3
def __init__(self, kernel):
self.kernel = kernel
self.status = Simulation.QUEUED
self.thread = None
def run(self, time, timeout=None, thread_timeout=1e5):
""" Runs the simulation for a given simulation time in seconds
"""
if self.status == Simulation.RUNNING:
raise Exception("Simulation is already running")
elif self.status == Simulation.FAILED:
raise Exception("Can not run a simulation that has failed")
self.status = Simulation.RUNNING
self.thread = Thread(target=self.kernel, kwargs={
'time': time, 'timeout': timeout})
self.thread.start()
def isRunning(self):
""" Returns True if an existing Thread is running
"""
if self.thread is None:
return False
else:
return self.thread.isAlive()
def wait(self, thread_timeout):
self.thread.join(thread_timeout)
self.status = Simulation.STOPPED
def stop(self):
""" Stops the simulation even if running
"""
self.kernel.stop()
def resume(self, time, thread_timeout=1e5):
""" Resumes the simulation from the last moment orientation
"""
self.run(time, None, thread_timeout)
def stabilize(self, timeout=1e-4, thread_timeout=1e5):
""" Runs the simulation until the moment is stable or the timeout
is reached in simulation time
"""
self.run(None, timeout, thread_timeout)
class FieldSweep(object):
def __init__(self, kernel, fields):
self.kernel = kernel
self.fields = fields
@staticmethod
def loop(kernel, direction='x', start_field=-1e3, end_field=1e3,
points=1e3, reverse=True):
""" Returns a FieldSweep object with fields that go from the start_field
to the end_field along a specific direction, with the default option to
also include the reverse
"""
H = np.linspace(start_field, end_field, num=points, dtype=np.float32)
coordinates = {'x': 0, 'y': 1, 'z': 2}
if direction not in coordinates:
raise ValueError("Field sweep direction must be either x, y, or z")
if reverse:
fields = np.zeros((2*points, 3), dtype=np.float32)
fields[:points,coordinates[direction]] = H
fields[points:,coordinates[direction]] = H[::-1] # Reversed view
else:
fields = np.zeros((points, 3), dtype=np.float32)
fields[:,coordinates[direction]] = H
return FieldSweep(kernel, fields)
def run(self, steps=1000):
""" Runs through each field and stabilizes the moment, returning
the fields, stabilization time, and moment orientation
"""
size = self.fields.shape[0]
times = np.zeros((size, 1), dtype=np.float32)
moments = np.zeros((size, 3), dtype=np.float32)
self.kernel.reset()
for i, field in enumerate(self.fields):
ti = self.kernel.t_sec
self.kernel.hext = self.kernel.raw_parameters.normalize_field(field)
print self.kernel.hext, steps
self.kernel.relax(steps)
print "t:", ti, self.kernel.t_sec
times[i] = self.kernel.t_sec - ti
moments[i] = self.kernel.m
return self.fields, times, moments
| StarcoderdataPython |
3256177 | <reponame>mmwest55/FinalProjectCS499
# <NAME>
# 11/1/2020
# User Management System
# import additional files
from login import Login
from customer import Customer
import pandas as pd
# import databases, and database connections
import sqlite3
conn = sqlite3.connect("user_management.db")
cursor = conn.cursor()
# Welcome screen
def welcome():
""" Welcome screen for program """
print("========================================================")
print('======================== Wecome ========================')
print("========================================================")
print('================= User Management System ===============')
print("========================================================\n")
# Login function
def start():
"""Start function designed to implement the login screen."""
# Function to initiate login operations
log_req = input("Login to your account (Y/N) ")
if log_req.lower() == 'y': # This ensures that if the user types
# upper or lower case, it is detected
log_proc = Login()
if log_proc.user_find():
print('\nWelcome to the System\n')
else:
print('Please contact your administrator to setup your account')
else:
print ('Goodbye')
# Menu to be seen after proper login
def menu():
"""Menu to make selection"""
print("========================================================")
print('======================== Menu ==========================')
print("========================================================\n")
print('1. New customer: ')
print('2. Existing customer: ')
print('3. Exit\n')
customer_proc = Customer()
user_input = input('Please make your selection using a numerical value: ')
user_input = int(user_input)
# Checking to see what user input is, and make send to correct method.
if user_input == 1:
customer_proc.new_customer()
elif user_input == 2:
customer_proc.existing_customer()
elif user_input == 3:
return
else:
print('Invalid choice, please select the number corresponding to the selection:')
menu()
menu()
# Exit message
def exit_program():
""" Thank you message to exit program """
print('===============================================')
print("Thank you for using the User Management System!")
print('===============================================')
return
# Initialize main program
def program():
"""Program basic flow."""
welcome()
start()
menu()
exit_program()
# initial load of customers to database
customers = [(101, '<NAME>', '<EMAIL>', '1234 Yorktown Ave. New York, 10001', 'Vivint'),
(102, '<NAME>', '<EMAIL>', '1254 Something Ave. New York, 10001', 'Google'),
(103, '<NAME>', '<EMAIL>', '12554 York Ave. New York, 10001', 'Yahoo'),
(104, '<NAME>', '<EMAIL>', '1232 Penn Ave. New York, 10001', 'Apple'),
(105, '<NAME>', '<EMAIL>', '15553 Penn Ave. New York, 10001', 'Apple')]
# Start Program
program()
| StarcoderdataPython |
62729 | <gh_stars>0
"""
Complete game implementations/engines.
"""
| StarcoderdataPython |
3279403 | <filename>similar_said/get_word_similar_said.py
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''=================================================
@IDE :PyCharm
@Author :LuckyHuibo
@Date :2019/8/28 13:15
@Desc :
1、利用word2vec(模型是预训练好的)跟广度优先搜索算法获取跟“说”有关的词,保存到../data/words.txt
2、加载数据进行查看
=================================================='''
from gensim.models import Word2Vec
from collections import defaultdict
from config.file_path import said_path
import os
def get_related_words(initial_words, model):
"""
@initial_words
@model
"""
unseen = initial_words
seen = defaultdict(int)
max_size = 500
while unseen and len(seen) < max_size:
if len(seen) % 50 == 0:
print('seen length : {}'.format(len(seen)))
node = unseen.pop(0)
new_expanding = [w for w, s in model.most_similar(node, topn=20)]
unseen += new_expanding
seen[node] += 1
return seen
def get_words_said(model_path):
model = Word2Vec.load(model_path)
related_words = get_related_words(['说', '表示', '认为'], model)
related_words = sorted(related_words.items(), key=lambda x: x[1], reverse=True)
print(related_words)
said = [i[0] for i in related_words if i[1] >= 1]
return said
def save_said(wv_model_path, save_path):
said = get_words_said(wv_model_path)
string = '|'.join(said)
try:
with open(save_path, 'w', encoding='utf-8') as f:
f.write(string)
return True
except:
return False
def load_said(filename):
if os.path.exists(filename):
with open(filename, 'r', encoding='utf-8') as f:
string = f.readlines()
string = string[0].split('|')
return string
txt_path = os.path.join(said_path, "similar_said.txt")
txt_said = load_said(txt_path)
if __name__ == '__main__':
wv_model_path = "../data/zhwiki_news.word2vec"
result = save_said(wv_model_path=wv_model_path, save_path="similar_said.txt")
if result:
string = load_said("../data/words.txt")
print(string)
model = Word2Vec.load(wv_model_path)
said = model['说']
print(said)
| StarcoderdataPython |
3234103 | <filename>tests/optimizers/test_local_best.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Import modules
import pytest
import numpy as np
# Import from pyswarms
from pyswarms.single import LocalBestPSO
from pyswarms.utils.functions.single_obj import sphere
from .abc_test_optimizer import ABCTestOptimizer
class TestLocalBestOptimizer(ABCTestOptimizer):
@pytest.fixture
def optimizer(self):
return LocalBestPSO
@pytest.fixture
def optimizer_history(self, options):
opt = LocalBestPSO(10, 2, options)
opt.optimize(sphere, 1000)
return opt
@pytest.fixture
def optimizer_reset(self, options):
opt = LocalBestPSO(10, 2, options)
opt.optimize(sphere, 10)
opt.reset()
return opt
def test_local_correct_pos(self, options):
""" Test to check local optimiser returns the correct position corresponding to the best cost """
opt = LocalBestPSO(n_particles=10, dimensions=2, options=options)
cost, pos = opt.optimize(sphere, iters=5)
# find best pos from history
min_cost_idx = np.argmin(opt.cost_history)
min_pos_idx = np.argmin(sphere(opt.pos_history[min_cost_idx]))
assert np.array_equal(opt.pos_history[min_cost_idx][min_pos_idx], pos)
| StarcoderdataPython |
50558 | <reponame>bdastur/notes<filename>aws/scripts/sqstest.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
import boto3
import botocore
class SQS(unittest.TestCase):
def setUp(self):
env = os.environ.get('PROFILE_NAME', 'default')
if env == "default":
print "Using Default profile"
else:
print "Using %s profile" % env
try:
session = boto3.Session(profile_name=env,
region_name="us-east-1")
except botocore.exceptions.ProfileNotFound:
print "Profile %s not found!" % env
self.sqsclient = None
return
self.sqsclient = session.client('sqs')
self.queue_name = "brd-queue"
def test_create_standard_queue(self):
print "basic"
attributes = {
'DelaySeconds': '5',
'MaximumMessageSize': '1024',
'MessageRetentionPeriod': '60'
}
# Create a standard queue.
response = self.sqsclient.create_queue(
QueueName=self.queue_name,
Attributes=attributes
)
print "response: ", response
# Get Queue URL.
response = self.sqsclient.get_queue_url(
QueueName=self.queue_name
)
print "response: ", response['QueueUrl']
queue_url = response['QueueUrl']
# Delete queue.
response = self.sqsclient.delete_queue(
QueueUrl=queue_url
)
def test_send_message(self):
# Create a standard queue.
queue_name = "brd-testqueue-1"
response = self.sqsclient.create_queue(
QueueName=queue_name
)
print "Response: ", response
queue_url = response['QueueUrl']
# Send message.
response = self.sqsclient.send_message(
QueueUrl=queue_url,
MessageBody='This is a test message',
DelaySeconds=1
)
print "response for send msg: ", response
| StarcoderdataPython |
3331435 | <filename>django/gunicorn.conf.py
"""gunicorn WSGI server configuration."""
from multiprocessing import cpu_count
from os import environ
def max_workers():
return (2 * cpu_count()) + 1
bind = "0.0.0.0:" + environ.get("PORT", "8000")
max_requests = 1000
max_requests_jitter = 30
worker_class = "gevent"
workers = max_workers()
forwarded_allow_ips = "*"
loglevel = "info"
pythonpath = "/env/lib/python3.6/site-packages"
timeout = 120 # This could maybe be 30s like the nginx-level conf, but would that break the RQ queue?
| StarcoderdataPython |
3392325 | <filename>pyclient/pydeephaven/proto/table_pb2_grpc.py
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from pydeephaven.proto import table_pb2 as deephaven_dot_proto_dot_table__pb2
from pydeephaven.proto import ticket_pb2 as deephaven_dot_proto_dot_ticket__pb2
class TableServiceStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetExportedTableCreationResponse = channel.unary_unary(
'/io.deephaven.proto.backplane.grpc.TableService/GetExportedTableCreationResponse',
request_serializer=deephaven_dot_proto_dot_ticket__pb2.Ticket.SerializeToString,
response_deserializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
)
self.FetchTable = channel.unary_unary(
'/io.deephaven.proto.backplane.grpc.TableService/FetchTable',
request_serializer=deephaven_dot_proto_dot_table__pb2.FetchTableRequest.SerializeToString,
response_deserializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
)
self.FetchPandasTable = channel.unary_unary(
'/io.deephaven.proto.backplane.grpc.TableService/FetchPandasTable',
request_serializer=deephaven_dot_proto_dot_table__pb2.FetchPandasTableRequest.SerializeToString,
response_deserializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
)
self.FetchTableMap = channel.unary_unary(
'/io.deephaven.proto.backplane.grpc.TableService/FetchTableMap',
request_serializer=deephaven_dot_proto_dot_table__pb2.FetchTableMapRequest.SerializeToString,
response_deserializer=deephaven_dot_proto_dot_table__pb2.FetchTableMapResponse.FromString,
)
self.EmptyTable = channel.unary_unary(
'/io.deephaven.proto.backplane.grpc.TableService/EmptyTable',
request_serializer=deephaven_dot_proto_dot_table__pb2.EmptyTableRequest.SerializeToString,
response_deserializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
)
self.TimeTable = channel.unary_unary(
'/io.deephaven.proto.backplane.grpc.TableService/TimeTable',
request_serializer=deephaven_dot_proto_dot_table__pb2.TimeTableRequest.SerializeToString,
response_deserializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
)
self.DropColumns = channel.unary_unary(
'/io.deephaven.proto.backplane.grpc.TableService/DropColumns',
request_serializer=deephaven_dot_proto_dot_table__pb2.DropColumnsRequest.SerializeToString,
response_deserializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
)
self.Update = channel.unary_unary(
'/io.deephaven.proto.backplane.grpc.TableService/Update',
request_serializer=deephaven_dot_proto_dot_table__pb2.SelectOrUpdateRequest.SerializeToString,
response_deserializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
)
self.LazyUpdate = channel.unary_unary(
'/io.deephaven.proto.backplane.grpc.TableService/LazyUpdate',
request_serializer=deephaven_dot_proto_dot_table__pb2.SelectOrUpdateRequest.SerializeToString,
response_deserializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
)
self.View = channel.unary_unary(
'/io.deephaven.proto.backplane.grpc.TableService/View',
request_serializer=deephaven_dot_proto_dot_table__pb2.SelectOrUpdateRequest.SerializeToString,
response_deserializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
)
self.UpdateView = channel.unary_unary(
'/io.deephaven.proto.backplane.grpc.TableService/UpdateView',
request_serializer=deephaven_dot_proto_dot_table__pb2.SelectOrUpdateRequest.SerializeToString,
response_deserializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
)
self.Select = channel.unary_unary(
'/io.deephaven.proto.backplane.grpc.TableService/Select',
request_serializer=deephaven_dot_proto_dot_table__pb2.SelectOrUpdateRequest.SerializeToString,
response_deserializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
)
self.SelectDistinct = channel.unary_unary(
'/io.deephaven.proto.backplane.grpc.TableService/SelectDistinct',
request_serializer=deephaven_dot_proto_dot_table__pb2.SelectDistinctRequest.SerializeToString,
response_deserializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
)
self.Filter = channel.unary_unary(
'/io.deephaven.proto.backplane.grpc.TableService/Filter',
request_serializer=deephaven_dot_proto_dot_table__pb2.FilterTableRequest.SerializeToString,
response_deserializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
)
self.UnstructuredFilter = channel.unary_unary(
'/io.deephaven.proto.backplane.grpc.TableService/UnstructuredFilter',
request_serializer=deephaven_dot_proto_dot_table__pb2.UnstructuredFilterTableRequest.SerializeToString,
response_deserializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
)
self.Sort = channel.unary_unary(
'/io.deephaven.proto.backplane.grpc.TableService/Sort',
request_serializer=deephaven_dot_proto_dot_table__pb2.SortTableRequest.SerializeToString,
response_deserializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
)
self.Head = channel.unary_unary(
'/io.deephaven.proto.backplane.grpc.TableService/Head',
request_serializer=deephaven_dot_proto_dot_table__pb2.HeadOrTailRequest.SerializeToString,
response_deserializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
)
self.Tail = channel.unary_unary(
'/io.deephaven.proto.backplane.grpc.TableService/Tail',
request_serializer=deephaven_dot_proto_dot_table__pb2.HeadOrTailRequest.SerializeToString,
response_deserializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
)
self.HeadBy = channel.unary_unary(
'/io.deephaven.proto.backplane.grpc.TableService/HeadBy',
request_serializer=deephaven_dot_proto_dot_table__pb2.HeadOrTailByRequest.SerializeToString,
response_deserializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
)
self.TailBy = channel.unary_unary(
'/io.deephaven.proto.backplane.grpc.TableService/TailBy',
request_serializer=deephaven_dot_proto_dot_table__pb2.HeadOrTailByRequest.SerializeToString,
response_deserializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
)
self.Ungroup = channel.unary_unary(
'/io.deephaven.proto.backplane.grpc.TableService/Ungroup',
request_serializer=deephaven_dot_proto_dot_table__pb2.UngroupRequest.SerializeToString,
response_deserializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
)
self.MergeTables = channel.unary_unary(
'/io.deephaven.proto.backplane.grpc.TableService/MergeTables',
request_serializer=deephaven_dot_proto_dot_table__pb2.MergeTablesRequest.SerializeToString,
response_deserializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
)
self.CrossJoinTables = channel.unary_unary(
'/io.deephaven.proto.backplane.grpc.TableService/CrossJoinTables',
request_serializer=deephaven_dot_proto_dot_table__pb2.CrossJoinTablesRequest.SerializeToString,
response_deserializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
)
self.NaturalJoinTables = channel.unary_unary(
'/io.deephaven.proto.backplane.grpc.TableService/NaturalJoinTables',
request_serializer=deephaven_dot_proto_dot_table__pb2.NaturalJoinTablesRequest.SerializeToString,
response_deserializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
)
self.ExactJoinTables = channel.unary_unary(
'/io.deephaven.proto.backplane.grpc.TableService/ExactJoinTables',
request_serializer=deephaven_dot_proto_dot_table__pb2.ExactJoinTablesRequest.SerializeToString,
response_deserializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
)
self.LeftJoinTables = channel.unary_unary(
'/io.deephaven.proto.backplane.grpc.TableService/LeftJoinTables',
request_serializer=deephaven_dot_proto_dot_table__pb2.LeftJoinTablesRequest.SerializeToString,
response_deserializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
)
self.AsOfJoinTables = channel.unary_unary(
'/io.deephaven.proto.backplane.grpc.TableService/AsOfJoinTables',
request_serializer=deephaven_dot_proto_dot_table__pb2.AsOfJoinTablesRequest.SerializeToString,
response_deserializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
)
self.ComboAggregate = channel.unary_unary(
'/io.deephaven.proto.backplane.grpc.TableService/ComboAggregate',
request_serializer=deephaven_dot_proto_dot_table__pb2.ComboAggregateRequest.SerializeToString,
response_deserializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
)
self.Snapshot = channel.unary_unary(
'/io.deephaven.proto.backplane.grpc.TableService/Snapshot',
request_serializer=deephaven_dot_proto_dot_table__pb2.SnapshotTableRequest.SerializeToString,
response_deserializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
)
self.Flatten = channel.unary_unary(
'/io.deephaven.proto.backplane.grpc.TableService/Flatten',
request_serializer=deephaven_dot_proto_dot_table__pb2.FlattenRequest.SerializeToString,
response_deserializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
)
self.RunChartDownsample = channel.unary_unary(
'/io.deephaven.proto.backplane.grpc.TableService/RunChartDownsample',
request_serializer=deephaven_dot_proto_dot_table__pb2.RunChartDownsampleRequest.SerializeToString,
response_deserializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
)
self.Batch = channel.unary_stream(
'/io.deephaven.proto.backplane.grpc.TableService/Batch',
request_serializer=deephaven_dot_proto_dot_table__pb2.BatchTableRequest.SerializeToString,
response_deserializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
)
self.ExportedTableUpdates = channel.unary_stream(
'/io.deephaven.proto.backplane.grpc.TableService/ExportedTableUpdates',
request_serializer=deephaven_dot_proto_dot_table__pb2.ExportedTableUpdatesRequest.SerializeToString,
response_deserializer=deephaven_dot_proto_dot_table__pb2.ExportedTableUpdateMessage.FromString,
)
class TableServiceServicer(object):
"""Missing associated documentation comment in .proto file."""
def GetExportedTableCreationResponse(self, request, context):
"""
Request an ETCR for this ticket. Ticket must reference a Table.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def FetchTable(self, request, context):
"""
Fetches a Table from an existing source ticket and exports it to the local session result ticket.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def FetchPandasTable(self, request, context):
"""
Fetches a pandas table from an existing source ticket and exports it to the local session result ticket.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def FetchTableMap(self, request, context):
"""
Fetches a TableMap from an existing source ticket and exports it to the local session result ticket.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def EmptyTable(self, request, context):
"""
Create an empty table with the given column names and types.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def TimeTable(self, request, context):
"""
Create a time table with the given start time and period.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DropColumns(self, request, context):
"""
Drop columns from the parent table.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Update(self, request, context):
"""
Add columns to the given table using the given column specifications and the update table operation.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def LazyUpdate(self, request, context):
"""
Add columns to the given table using the given column specifications and the lazyUpdate table operation.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def View(self, request, context):
"""
Add columns to the given table using the given column specifications and the view table operation.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateView(self, request, context):
"""
Add columns to the given table using the given column specifications and the updateView table operation.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Select(self, request, context):
"""
Select the given columns from the given table.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SelectDistinct(self, request, context):
"""
Returns a new table definition with the unique tuples of the specified columns
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Filter(self, request, context):
"""
Filter parent table with structured filters.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UnstructuredFilter(self, request, context):
"""
Filter parent table with unstructured filters.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Sort(self, request, context):
"""
Sort parent table via the provide sort descriptors.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Head(self, request, context):
"""
Extract rows from the head of the parent table.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Tail(self, request, context):
"""
Extract rows from the tail of the parent table.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def HeadBy(self, request, context):
"""
Run the headBy table operation for the given group by columns on the given table.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def TailBy(self, request, context):
"""
Run the tailBy operation for the given group by columns on the given table.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Ungroup(self, request, context):
"""
Ungroup the given columns (all columns will be ungrouped if columnsToUngroup is empty or unspecified).
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MergeTables(self, request, context):
"""
Create a merged table from the given input tables. If a key column is provided (not null), a sorted
merged will be performed using that column.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CrossJoinTables(self, request, context):
"""
Returns the result of a cross join operation. Also known as the cartesian product.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def NaturalJoinTables(self, request, context):
"""
Returns the result of a natural join operation.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ExactJoinTables(self, request, context):
"""
Returns the result of an exact join operation.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def LeftJoinTables(self, request, context):
"""
Returns the result of a left join operation.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AsOfJoinTables(self, request, context):
"""
Returns the result of an as of join operation.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ComboAggregate(self, request, context):
"""
Returns the result of an aggregate table operation.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Snapshot(self, request, context):
"""
Snapshot rightId, triggered by leftId, and export the resulting new Table.
The left table's change events cause a new snapshot to be taken. The result table includes a
"snapshot key" which is a subset (possibly all) of the left table's columns. The
remaining columns in the result table come from right table, the table being snapshotted.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Flatten(self, request, context):
"""
Returns a new table with a flattened index.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RunChartDownsample(self, request, context):
"""*
Downsamples a table assume its contents will be rendered in a run chart, with each subsequent row holding a later
X value (i.e., sorted on that column). Multiple Y columns can be specified, as can a range of values for the X
column to support zooming in.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Batch(self, request, context):
"""
Batch a series of requests and send them all at once. This enables the user to create intermediate tables without
requiring them to be exported and managed by the client. The server will automatically release any tables when they
are no longer depended upon.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ExportedTableUpdates(self, request, context):
"""
Establish a stream of table updates for cheap notifications of table size updates.
New streams will flush updates for all existing table exports. An export id of zero will be sent to indicate all
exports have sent their refresh update. Table updates may be intermingled with initial refresh updates after their
initial update had been sent.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_TableServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetExportedTableCreationResponse': grpc.unary_unary_rpc_method_handler(
servicer.GetExportedTableCreationResponse,
request_deserializer=deephaven_dot_proto_dot_ticket__pb2.Ticket.FromString,
response_serializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.SerializeToString,
),
'FetchTable': grpc.unary_unary_rpc_method_handler(
servicer.FetchTable,
request_deserializer=deephaven_dot_proto_dot_table__pb2.FetchTableRequest.FromString,
response_serializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.SerializeToString,
),
'FetchPandasTable': grpc.unary_unary_rpc_method_handler(
servicer.FetchPandasTable,
request_deserializer=deephaven_dot_proto_dot_table__pb2.FetchPandasTableRequest.FromString,
response_serializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.SerializeToString,
),
'FetchTableMap': grpc.unary_unary_rpc_method_handler(
servicer.FetchTableMap,
request_deserializer=deephaven_dot_proto_dot_table__pb2.FetchTableMapRequest.FromString,
response_serializer=deephaven_dot_proto_dot_table__pb2.FetchTableMapResponse.SerializeToString,
),
'EmptyTable': grpc.unary_unary_rpc_method_handler(
servicer.EmptyTable,
request_deserializer=deephaven_dot_proto_dot_table__pb2.EmptyTableRequest.FromString,
response_serializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.SerializeToString,
),
'TimeTable': grpc.unary_unary_rpc_method_handler(
servicer.TimeTable,
request_deserializer=deephaven_dot_proto_dot_table__pb2.TimeTableRequest.FromString,
response_serializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.SerializeToString,
),
'DropColumns': grpc.unary_unary_rpc_method_handler(
servicer.DropColumns,
request_deserializer=deephaven_dot_proto_dot_table__pb2.DropColumnsRequest.FromString,
response_serializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.SerializeToString,
),
'Update': grpc.unary_unary_rpc_method_handler(
servicer.Update,
request_deserializer=deephaven_dot_proto_dot_table__pb2.SelectOrUpdateRequest.FromString,
response_serializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.SerializeToString,
),
'LazyUpdate': grpc.unary_unary_rpc_method_handler(
servicer.LazyUpdate,
request_deserializer=deephaven_dot_proto_dot_table__pb2.SelectOrUpdateRequest.FromString,
response_serializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.SerializeToString,
),
'View': grpc.unary_unary_rpc_method_handler(
servicer.View,
request_deserializer=deephaven_dot_proto_dot_table__pb2.SelectOrUpdateRequest.FromString,
response_serializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.SerializeToString,
),
'UpdateView': grpc.unary_unary_rpc_method_handler(
servicer.UpdateView,
request_deserializer=deephaven_dot_proto_dot_table__pb2.SelectOrUpdateRequest.FromString,
response_serializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.SerializeToString,
),
'Select': grpc.unary_unary_rpc_method_handler(
servicer.Select,
request_deserializer=deephaven_dot_proto_dot_table__pb2.SelectOrUpdateRequest.FromString,
response_serializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.SerializeToString,
),
'SelectDistinct': grpc.unary_unary_rpc_method_handler(
servicer.SelectDistinct,
request_deserializer=deephaven_dot_proto_dot_table__pb2.SelectDistinctRequest.FromString,
response_serializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.SerializeToString,
),
'Filter': grpc.unary_unary_rpc_method_handler(
servicer.Filter,
request_deserializer=deephaven_dot_proto_dot_table__pb2.FilterTableRequest.FromString,
response_serializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.SerializeToString,
),
'UnstructuredFilter': grpc.unary_unary_rpc_method_handler(
servicer.UnstructuredFilter,
request_deserializer=deephaven_dot_proto_dot_table__pb2.UnstructuredFilterTableRequest.FromString,
response_serializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.SerializeToString,
),
'Sort': grpc.unary_unary_rpc_method_handler(
servicer.Sort,
request_deserializer=deephaven_dot_proto_dot_table__pb2.SortTableRequest.FromString,
response_serializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.SerializeToString,
),
'Head': grpc.unary_unary_rpc_method_handler(
servicer.Head,
request_deserializer=deephaven_dot_proto_dot_table__pb2.HeadOrTailRequest.FromString,
response_serializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.SerializeToString,
),
'Tail': grpc.unary_unary_rpc_method_handler(
servicer.Tail,
request_deserializer=deephaven_dot_proto_dot_table__pb2.HeadOrTailRequest.FromString,
response_serializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.SerializeToString,
),
'HeadBy': grpc.unary_unary_rpc_method_handler(
servicer.HeadBy,
request_deserializer=deephaven_dot_proto_dot_table__pb2.HeadOrTailByRequest.FromString,
response_serializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.SerializeToString,
),
'TailBy': grpc.unary_unary_rpc_method_handler(
servicer.TailBy,
request_deserializer=deephaven_dot_proto_dot_table__pb2.HeadOrTailByRequest.FromString,
response_serializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.SerializeToString,
),
'Ungroup': grpc.unary_unary_rpc_method_handler(
servicer.Ungroup,
request_deserializer=deephaven_dot_proto_dot_table__pb2.UngroupRequest.FromString,
response_serializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.SerializeToString,
),
'MergeTables': grpc.unary_unary_rpc_method_handler(
servicer.MergeTables,
request_deserializer=deephaven_dot_proto_dot_table__pb2.MergeTablesRequest.FromString,
response_serializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.SerializeToString,
),
'CrossJoinTables': grpc.unary_unary_rpc_method_handler(
servicer.CrossJoinTables,
request_deserializer=deephaven_dot_proto_dot_table__pb2.CrossJoinTablesRequest.FromString,
response_serializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.SerializeToString,
),
'NaturalJoinTables': grpc.unary_unary_rpc_method_handler(
servicer.NaturalJoinTables,
request_deserializer=deephaven_dot_proto_dot_table__pb2.NaturalJoinTablesRequest.FromString,
response_serializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.SerializeToString,
),
'ExactJoinTables': grpc.unary_unary_rpc_method_handler(
servicer.ExactJoinTables,
request_deserializer=deephaven_dot_proto_dot_table__pb2.ExactJoinTablesRequest.FromString,
response_serializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.SerializeToString,
),
'LeftJoinTables': grpc.unary_unary_rpc_method_handler(
servicer.LeftJoinTables,
request_deserializer=deephaven_dot_proto_dot_table__pb2.LeftJoinTablesRequest.FromString,
response_serializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.SerializeToString,
),
'AsOfJoinTables': grpc.unary_unary_rpc_method_handler(
servicer.AsOfJoinTables,
request_deserializer=deephaven_dot_proto_dot_table__pb2.AsOfJoinTablesRequest.FromString,
response_serializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.SerializeToString,
),
'ComboAggregate': grpc.unary_unary_rpc_method_handler(
servicer.ComboAggregate,
request_deserializer=deephaven_dot_proto_dot_table__pb2.ComboAggregateRequest.FromString,
response_serializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.SerializeToString,
),
'Snapshot': grpc.unary_unary_rpc_method_handler(
servicer.Snapshot,
request_deserializer=deephaven_dot_proto_dot_table__pb2.SnapshotTableRequest.FromString,
response_serializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.SerializeToString,
),
'Flatten': grpc.unary_unary_rpc_method_handler(
servicer.Flatten,
request_deserializer=deephaven_dot_proto_dot_table__pb2.FlattenRequest.FromString,
response_serializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.SerializeToString,
),
'RunChartDownsample': grpc.unary_unary_rpc_method_handler(
servicer.RunChartDownsample,
request_deserializer=deephaven_dot_proto_dot_table__pb2.RunChartDownsampleRequest.FromString,
response_serializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.SerializeToString,
),
'Batch': grpc.unary_stream_rpc_method_handler(
servicer.Batch,
request_deserializer=deephaven_dot_proto_dot_table__pb2.BatchTableRequest.FromString,
response_serializer=deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.SerializeToString,
),
'ExportedTableUpdates': grpc.unary_stream_rpc_method_handler(
servicer.ExportedTableUpdates,
request_deserializer=deephaven_dot_proto_dot_table__pb2.ExportedTableUpdatesRequest.FromString,
response_serializer=deephaven_dot_proto_dot_table__pb2.ExportedTableUpdateMessage.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'io.deephaven.proto.backplane.grpc.TableService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class TableService(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def GetExportedTableCreationResponse(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/io.deephaven.proto.backplane.grpc.TableService/GetExportedTableCreationResponse',
deephaven_dot_proto_dot_ticket__pb2.Ticket.SerializeToString,
deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def FetchTable(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/io.deephaven.proto.backplane.grpc.TableService/FetchTable',
deephaven_dot_proto_dot_table__pb2.FetchTableRequest.SerializeToString,
deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def FetchPandasTable(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/io.deephaven.proto.backplane.grpc.TableService/FetchPandasTable',
deephaven_dot_proto_dot_table__pb2.FetchPandasTableRequest.SerializeToString,
deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def FetchTableMap(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/io.deephaven.proto.backplane.grpc.TableService/FetchTableMap',
deephaven_dot_proto_dot_table__pb2.FetchTableMapRequest.SerializeToString,
deephaven_dot_proto_dot_table__pb2.FetchTableMapResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def EmptyTable(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/io.deephaven.proto.backplane.grpc.TableService/EmptyTable',
deephaven_dot_proto_dot_table__pb2.EmptyTableRequest.SerializeToString,
deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def TimeTable(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/io.deephaven.proto.backplane.grpc.TableService/TimeTable',
deephaven_dot_proto_dot_table__pb2.TimeTableRequest.SerializeToString,
deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DropColumns(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/io.deephaven.proto.backplane.grpc.TableService/DropColumns',
deephaven_dot_proto_dot_table__pb2.DropColumnsRequest.SerializeToString,
deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Update(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/io.deephaven.proto.backplane.grpc.TableService/Update',
deephaven_dot_proto_dot_table__pb2.SelectOrUpdateRequest.SerializeToString,
deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def LazyUpdate(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/io.deephaven.proto.backplane.grpc.TableService/LazyUpdate',
deephaven_dot_proto_dot_table__pb2.SelectOrUpdateRequest.SerializeToString,
deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def View(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/io.deephaven.proto.backplane.grpc.TableService/View',
deephaven_dot_proto_dot_table__pb2.SelectOrUpdateRequest.SerializeToString,
deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateView(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/io.deephaven.proto.backplane.grpc.TableService/UpdateView',
deephaven_dot_proto_dot_table__pb2.SelectOrUpdateRequest.SerializeToString,
deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Select(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/io.deephaven.proto.backplane.grpc.TableService/Select',
deephaven_dot_proto_dot_table__pb2.SelectOrUpdateRequest.SerializeToString,
deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SelectDistinct(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/io.deephaven.proto.backplane.grpc.TableService/SelectDistinct',
deephaven_dot_proto_dot_table__pb2.SelectDistinctRequest.SerializeToString,
deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Filter(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/io.deephaven.proto.backplane.grpc.TableService/Filter',
deephaven_dot_proto_dot_table__pb2.FilterTableRequest.SerializeToString,
deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UnstructuredFilter(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/io.deephaven.proto.backplane.grpc.TableService/UnstructuredFilter',
deephaven_dot_proto_dot_table__pb2.UnstructuredFilterTableRequest.SerializeToString,
deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Sort(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/io.deephaven.proto.backplane.grpc.TableService/Sort',
deephaven_dot_proto_dot_table__pb2.SortTableRequest.SerializeToString,
deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Head(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/io.deephaven.proto.backplane.grpc.TableService/Head',
deephaven_dot_proto_dot_table__pb2.HeadOrTailRequest.SerializeToString,
deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Tail(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/io.deephaven.proto.backplane.grpc.TableService/Tail',
deephaven_dot_proto_dot_table__pb2.HeadOrTailRequest.SerializeToString,
deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def HeadBy(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/io.deephaven.proto.backplane.grpc.TableService/HeadBy',
deephaven_dot_proto_dot_table__pb2.HeadOrTailByRequest.SerializeToString,
deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def TailBy(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/io.deephaven.proto.backplane.grpc.TableService/TailBy',
deephaven_dot_proto_dot_table__pb2.HeadOrTailByRequest.SerializeToString,
deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Ungroup(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/io.deephaven.proto.backplane.grpc.TableService/Ungroup',
deephaven_dot_proto_dot_table__pb2.UngroupRequest.SerializeToString,
deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def MergeTables(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/io.deephaven.proto.backplane.grpc.TableService/MergeTables',
deephaven_dot_proto_dot_table__pb2.MergeTablesRequest.SerializeToString,
deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CrossJoinTables(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/io.deephaven.proto.backplane.grpc.TableService/CrossJoinTables',
deephaven_dot_proto_dot_table__pb2.CrossJoinTablesRequest.SerializeToString,
deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def NaturalJoinTables(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/io.deephaven.proto.backplane.grpc.TableService/NaturalJoinTables',
deephaven_dot_proto_dot_table__pb2.NaturalJoinTablesRequest.SerializeToString,
deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ExactJoinTables(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/io.deephaven.proto.backplane.grpc.TableService/ExactJoinTables',
deephaven_dot_proto_dot_table__pb2.ExactJoinTablesRequest.SerializeToString,
deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def LeftJoinTables(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/io.deephaven.proto.backplane.grpc.TableService/LeftJoinTables',
deephaven_dot_proto_dot_table__pb2.LeftJoinTablesRequest.SerializeToString,
deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def AsOfJoinTables(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/io.deephaven.proto.backplane.grpc.TableService/AsOfJoinTables',
deephaven_dot_proto_dot_table__pb2.AsOfJoinTablesRequest.SerializeToString,
deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ComboAggregate(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/io.deephaven.proto.backplane.grpc.TableService/ComboAggregate',
deephaven_dot_proto_dot_table__pb2.ComboAggregateRequest.SerializeToString,
deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Snapshot(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/io.deephaven.proto.backplane.grpc.TableService/Snapshot',
deephaven_dot_proto_dot_table__pb2.SnapshotTableRequest.SerializeToString,
deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Flatten(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/io.deephaven.proto.backplane.grpc.TableService/Flatten',
deephaven_dot_proto_dot_table__pb2.FlattenRequest.SerializeToString,
deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def RunChartDownsample(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/io.deephaven.proto.backplane.grpc.TableService/RunChartDownsample',
deephaven_dot_proto_dot_table__pb2.RunChartDownsampleRequest.SerializeToString,
deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Batch(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/io.deephaven.proto.backplane.grpc.TableService/Batch',
deephaven_dot_proto_dot_table__pb2.BatchTableRequest.SerializeToString,
deephaven_dot_proto_dot_table__pb2.ExportedTableCreationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ExportedTableUpdates(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/io.deephaven.proto.backplane.grpc.TableService/ExportedTableUpdates',
deephaven_dot_proto_dot_table__pb2.ExportedTableUpdatesRequest.SerializeToString,
deephaven_dot_proto_dot_table__pb2.ExportedTableUpdateMessage.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| StarcoderdataPython |
60071 | <reponame>marvinthepa/vcs_query<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf8 -*-
# TODO
# documentation
# http://www.ietf.org/rfc/rfc2426.txt
import vobject, sys, os, re
from getopt import gnu_getopt
try:
import cPickle as pickle
except:
import pickle
import logging
vobject_logger = logging.getLogger("vobject.base")
logger = logging.getLogger(__name__)
import locale
language, output_encoding = locale.getdefaultlocale()
def main(argv):
"""
usage: $0 [<options>] [<substr>]
only those lines that contain <substr> will be displayed
options:
-d --vcard-dir= specify directory with vcards
-s --starting-matches display lines which start with <substr> first
"""
vcard_dir = None
starting_matches = False
opts, args = gnu_getopt(argv, "d:s", ["vcard-dir=","starting-matches"])
for opt, val in opts:
if opt == "-d" or opt == "--vcard-dir":
vcard_dir = val
if opt == "-s" or opt == "--starting-matches":
starting_matches = True
if vcard_dir is None:
sys.exit("please specify a directory with vcards")
if not os.path.isdir(vcard_dir):
sys.exit("please specify a directory with vcards")
pattern = None
if len(args) > 0:
pattern = args[0].strip().lower() #re.compile(args[0].strip(), re.I)
print "vcs_query.py, see http://github.com/marvinthepa/vcs_query"
cache = VcardCache(vcard_dir)
entries = cache.get_entries()
entries.sort(key=str)
if starting_matches and pattern:
sortfunc = get_sortfunc(pattern)
entries.sort(cmp=sortfunc, key=str)
for vcard in entries:
if len(vcard.mail) > 0:
repr = str(vcard)
if not pattern or pattern in repr.lower(): #.search(repr):
print repr
def get_sortfunc(pattern):
def sortfunc(a,b):
if a.lower().startswith(pattern):
if b.lower().startswith(pattern):
return 0
else:
return -1
else:
if b.lower().startswith(pattern):
return 1
else:
return 0
return sortfunc
class VcardCache(object):
def __init__(self, vcard_dir):
self.cache_dir = os.path.expanduser("~/.cache/")
self.pickle_path = os.path.join(self.cache_dir, "vcard_query")
self.vcard_dir = vcard_dir
self.last_vcard_dir_timestamp, self.vcard_files = self._load()
self._update()
self._serialize()
def _load(self):
if os.path.isfile(self.pickle_path):
with open(self.pickle_path, "r") as f:
return pickle.load(f)
else:
return 0, {}
def _update(self):
if get_timestamp(self.vcard_dir) > self.last_vcard_dir_timestamp:
paths = os.listdir(self.vcard_dir)
paths = [ os.path.join(self.vcard_dir, p) for p in paths ]
paths = [ p for p in paths if os.path.isfile(p) ]
for key in self.vcard_files.keys():
if key not in paths:
del self.vcard_files[key]
for path in paths:
if not self.vcard_files.has_key(path) or self.vcard_files[path].needs_update():
self.vcard_files[path] = VcardFile(path)
self.vcards = []
for vcard_file in self.vcard_files.values():
self.vcards.extend(vcard_file.vcards)
def _serialize(self):
try:
if not os.path.isdir(self.cache_dir):
os.mkdir(self.cache_dir)
with open(self.pickle_path, "w") as f:
pickle.dump((self.last_vcard_dir_timestamp, self.vcard_files), f)
except IOError:
print "cannot write to cache file " + cache
def get_entries(self):
return self.vcards
class Vcard(object):
def __init__(self, component):
self.name = ""
self.mail = ""
if component.contents.has_key("fn"):
self.name = component.fn.value
if component.contents.has_key("email"):
self.mail = component.email.value
self.description = "" # TODO?
def __str__(self):
return self.mail.encode(output_encoding) +\
"\t" + self.name.encode(output_encoding)\
+ "\t" + self.description
class VcardFile(object):
def __init__(self, path):
self.path = path
self.timestamp = get_timestamp(path)
self._read_components(path)
def _read_components(self, path):
vobject_logger.setLevel(logging.FATAL)
with open(path) as f:
components = vobject.readComponents(f, ignoreUnreadable=True)
self.vcards = []
for component in components:
if component.name.lower() == u'vcard':
self.vcards.append( Vcard(component) )
# hack to parse full emails for contained vcards:
elif component.contents.has_key("vcard"):
self.vcards.append( Vcard(component.vcard) )
else:
logger.warning("no vcard in component: "
+ component.name.encode(output_encoding)
+ "from file " + path )
vobject_logger.setLevel(logging.ERROR)
def needs_update(self):
return get_timestamp(self.path) > self.timestamp
def __str__(self):
result = "\n".join(self.vcards)
def get_timestamp(path):
return os.stat(path).st_mtime
if __name__ == "__main__":
main(sys.argv[1:])
| StarcoderdataPython |
1671571 | #! /usr/bin/env python3
# Example wiring (LT-8900 on board to Raspberry Pi):
#
# LT-8900
# _--------------------------------------------------------_
# | VCC | RST | MISO | MOSI | SCK | CS | GND |
# |-------+-------+----- -+-------+-------+-------+--------|
# | 3.3v | Reset | SPI | SPI | SPI | SPI | Ground |
# | | | | | Clock | CE0 | |
# -___+___|___+___|___+___|___+___|___+___|___+___|___+____-
# | | | | | | |
# | | | | | | |
# | | | | | | |
# _---+-------+-------+-------+-------+-------+-------+----_
# | 3.3v | GPIO5 | MISO | MOSI | SCLK | CE0 | 0v |
# |-------+-------+-------+-------+-------+-------+--------|
# | P1-17 | P1-18 | P1-21 | P1-19 | P1-23 | P1-24 | P1-25 |
# -________________________________________________________-
# Raspberry Pi
import spidev
import time
import threading
import collections
class dummy_context_mgr():
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, traceback):
return False
class Radio:
_default_register_values = {
'format_config': {
'crc_enabled': 1,
'scramble_enabled': 0,
'packet_length_encoded': 1,
'auto_term_tx': 1,
'auto_ack': 1,
'pkt_fifo_polarity': 0,
'crc_initial_data': 0
},
'radio_state': {'tx_enabled': 0, 'rx_enabled': 0, 'channel': 76},
'undocumented_power': {'value': 0x6c90},
'power': {
'current': 8,
'reserved_1': 1,
'gain': 0
},
'rssi_power': {'mode': 0},
'crystal': {'trim_adjust': 0},
'packet_config': {
'preamble_len': 2,
'syncword_len': 1,
'trailer_len': 0,
'packet_type': 0,
'fec_type': 0,
'br_clock_sel': 0
},
'chip_power': {
'power_down': 0,
'sleep_mode': 0,
'br_clock_on_sleep': 0,
'rexmit_times': 10,
'miso_tri_opt': 0,
'scramble_value': 0
},
'thresholds': {
'fifo_empty_threshold': 8,
'fifo_full_threshold': 16,
'syncword_error_bits': 2
},
'scan_rssi': {'channel': 63, 'ack_time': 176},
'gain_block': {'enabled': 1},
'vco_calibrate': {'enabled': 0},
'scan_rssi_state': {'enabled': 0, 'channel_offset': 0, 'wait_time': 15}
}
_register_map = [
{'name': "Unknown"}, # 0
{'name': "Unknown"}, # 1
{'name': "Unknown"}, # 2
{ # 3
'name': 'phase_lock',
'reserved_1': [13, 15],
'rf_synth_lock': [12, 12],
'reserved_2': [0, 11]
},
{'name': "Unknown"}, # 4
{'name': "Unknown"}, # 5
{ # 6
'name': "raw_rssi",
'raw_rssi': [10, 15],
'reserved_1': [0, 9]
},
{ # 7
'name': "radio_state",
'reserved_1': [9, 15],
'tx_enabled': [8, 8],
'rx_enabled': [7, 7],
'channel': [0, 6]
},
{ # 8
'name': "undocumented_power",
'value': [0, 15]
},
{ # 9
'name': "power",
'current': [12, 15],
'reserved_1': [11, 11],
'gain': [7, 10],
'reserved_2': [0, 6]
},
{ # 10
'name': "gain_block",
'reserved_1': [1, 15],
'enabled': [0, 0]
},
{ # 11
'name': "rssi_power",
'reserved_1': [9, 15],
'mode': [8, 8],
'reserved_2': [0, 7]
},
{'name': "Unknown"}, # 12
{'name': "Unknown"}, # 13
{'name': "Unknown"}, # 14
{'name': "Unknown"}, # 15
{'name': "Unknown"}, # 16
{'name': "Unknown"}, # 17
{'name': "Unknown"}, # 18
{'name': "Unknown"}, # 19
{'name': "Unknown"}, # 20
{'name': "Unknown"}, # 21
{'name': "Unknown"}, # 22
{ # 23
'name': "vco_calibrate",
'reserved_1': [3, 15],
'enabled': [2, 2],
'reserved_2': [0, 1]
},
{'name': "Unknown"}, # 24
{'name': "Unknown"}, # 25
{'name': "Unknown"}, # 26
{ # 27
'name': "crystal",
'reserved_1': [6, 15],
'trim_adjust': [0, 5]
},
{'name': "Unknown"}, # 28
{ # 29
'name': "minor_version",
'reserved_1': [8, 15],
'rf': [4, 7],
'reserved_2': [3, 3],
'digital': [0, 2]
},
{ # 30
'name': "manufacture_1",
'manuf_code_low': [0, 15]
},
{ # 31
'name': "manufacture_2",
'rf_code': [12, 15],
'manuf_code_high': [0, 11]
},
{ # 32
'name': "packet_config",
'preamble_len': [13, 15],
'syncword_len': [11, 12],
'trailer_len': [8, 10],
'packet_type': [6, 7],
'fec_type': [4, 5],
'br_clock_sel': [1, 3],
'reserved_1': [0, 0]
},
{ # 33
'name': "vco_pa_delays",
'vco_on_delay': [8, 15],
'pa_off_delay': [6, 7],
'pa_tx_delay': [0, 5]
},
{ # 34
'name': "tx_packet_delays",
'packet_control_direct': [15, 15],
'tx_cw_delay': [8, 14],
'reserved_1': [6, 7],
'tx_sw_on_delay': [0, 5]
},
{ # 35
'name': "chip_power",
'power_down': [15, 15],
'sleep_mode': [14, 14],
'reserved_1': [13, 13],
'br_clock_on_sleep': [12, 12],
'rexmit_times': [8, 11],
'miso_tri_opt': [7, 7],
'scramble_value': [0, 6]
},
{ # 36
'name': "syncword_0",
'value': [0, 15]
},
{ # 37
'name': "syncword_1",
'value': [0, 15]
},
{ # 38
'name': "syncword_2",
'value': [0, 15]
},
{ # 39
'name': "syncword_3",
'value': [0, 15]
},
{ # 40
'name': "thresholds",
'fifo_empty_threshold': [11, 15],
'fifo_full_threshold': [6, 10],
'syncword_error_bits': [0, 5]
},
{ # 41
'name': "format_config",
'crc_enabled': [15, 15],
'scramble_enabled': [14, 14],
'packet_length_encoded': [13, 13],
'auto_term_tx': [12, 12],
'auto_ack': [11, 11],
'pkt_fifo_polarity': [10, 10],
'reserved_1': [8, 9],
'crc_initial_data': [0, 7]
},
{ # 42
'name': "scan_rssi",
'channel': [10, 15],
'reserved_1': [8, 9],
'ack_time': [0, 7]
},
{ # 43
'name': "scan_rssi_state",
'enabled': [15, 15],
'channel_offset': [8, 14],
'wait_time': [0, 7]
},
{'name': "Unknown"}, # 44
{'name': "Unknown"}, # 45
{'name': "Unknown"}, # 46
{'name': "Unknown"}, # 47
{ # 48
'name': "status",
'crc_error': [15, 15],
'fec_error': [14, 14],
'framer_status': [8, 13],
'syncword_rx': [7, 7],
'packet_flag': [6, 6],
'fifo_flag': [5, 5],
'reserved_1': [0, 4]
},
{'name': "Unknown"}, # 49
{ # 50
'name': "fifo",
'value': [0, 15]
},
{'name': "Unknown"}, # 51
{ # 52
'name': "fifo_state",
'clear_write': [15, 15],
'reserved_1': [14, 14],
'write_ptr': [8, 13],
'clear_read': [7, 7],
'reserved_2': [6, 6],
'read_ptr': [0, 5]
}
]
def __init__(self, spi_bus, spi_dev, config = None):
spi = spidev.SpiDev()
spi.open(spi_bus, spi_dev)
self._spi = spi
self._dequeue_thread = None
self._last_syncword = None
self._software_tx_queue = {}
self._software_tx_queue_next_time = {}
self.configure(config, update = False)
if len(self._register_map) != 53:
raise ValueError('Inconsistent register map!')
return None
def __del__(self):
self._debug('Deleting object')
self._config['use_software_tx_queue'] = False
self._spi.close()
def _debug(self, message):
if 'debug_log_command' in self._config:
self._config['debug_log_command'](message)
return None
def _info(self, message):
log_command = None
if 'info_log_command' in self._config:
log_command = self._config['info_log_command']
elif 'debug_log_command' in self._config:
log_command = self._config['debug_log_command']
if log_command is None:
return None
log_command(message)
return None
def _error(self, message):
log_command = None
if 'error_log_command' in self._config:
log_command = self._config['error_log_command']
elif 'info_log_command' in self._config:
log_command = self._config['info_log_command']
elif 'debug_log_command' in self._config:
log_command = self._config['debug_log_command']
if log_command is None:
return None
log_command(message)
return None
def _get_mutex(self, real_mutex = True):
if not real_mutex:
return dummy_context_mgr()
mutex = self._config.get('mutex', dummy_context_mgr())
return mutex
def _reset_device(self):
self._info("Resetting radio {}".format(__name__))
reset_command = self._config.get('reset_command', None)
if reset_command is None:
return None
reset_command()
return None
def _should_use_queue(self):
if 'use_software_tx_queue' in self._config:
return self._config['use_software_tx_queue']
return False
def _register_name(self, reg_number):
return self._register_map[reg_number]['name']
def _register_number(self, reg_string):
reg_string_orig = reg_string
if isinstance(reg_string, int):
return reg_string
if reg_string.isnumeric():
return int(reg_string)
for reg_number, reg_info in enumerate(self._register_map):
if reg_info['name'] == reg_string:
return reg_number
raise NameError("Invalid register value {}".format(reg_string_orig))
def _check_radio(self):
value1 = self.get_register(0);
value2 = self.get_register(1);
if value1 == 0x6fe0 and value2 == 0x5681:
return True
self._debug(f'Expected 0x6fe0, 0x5681 and got 0x{value1:04x}, 0x{value2:04x}')
return False
def _get_default_register_value(self, register):
return self._default_register_values.get(register, {})
def _set_default_register_values(self):
self._last_format_config = {}
for register_name, register_value in self._default_register_values.items():
if register_name == 'format_config':
self._apply_packet_format_config({})
continue
self.put_register_bits(register_name, register_value)
return True
def _put_register_high_low(self, reg, high, low, delay = None):
if delay is None:
delay = 10
reg = self._register_number(reg)
result = self._spi.xfer([reg, high, low], self._spi.max_speed_hz, delay)
if reg & 0x80 == 0x80:
self._debug(" regRead[%02X] = %s" % ((reg & 0x7f), result))
else:
self._debug("regWrite[%02X:0x%02X%02X] = %s" % (reg, high, low, result))
return result
def put_register(self, reg, value, delay = None):
high = (value >> 8) & 0xff
low = value & 0xff
return self._put_register_high_low(reg, high, low, delay = delay)
def put_register_bits(self, reg, bits_dict, delay = None):
# Convert register to an integer
reg = self._register_number(reg)
# Lookup register in the register map
register_info = self._register_map[reg]
# Create a dictionary to hold the parsed results
value = 0
for key in bits_dict:
if key == "name":
continue
bit_range = register_info[key]
mask = ((1 << (bit_range[1] - bit_range[0] + 1)) - 1) << bit_range[0]
key_value = (bits_dict[key] << bit_range[0]) & mask
value = value | key_value
result = self.put_register(reg, value, delay = delay)
return result
def get_register(self, reg):
# Convert register to an integer
reg = self._register_number(reg)
# Reading of a register is indicated by setting high bit
read_reg = reg | 0b10000000
# Put the request with space for the reply
value = self._put_register_high_low(read_reg, 0, 0)
# The reply is stored in the lower two bytes
result = value[1] << 8 | value[2]
# Return result
return result
def get_register_bits(self, reg, value = None):
# Convert register to an integer
reg = self._register_number(reg)
# Get the register's value (unless one was supplied)
if value is None:
value = self.get_register(reg)
# Lookup register in the register map
register_info = self._register_map[reg]
# Create a dictionary to hold the parsed results
result = {'name': register_info['name']}
for key in register_info:
if key == "name":
continue
bit_range = register_info[key]
mask = ((1 << (bit_range[1] - bit_range[0] + 1)) - 1) << bit_range[0]
key_value = (value & mask) >> bit_range[0]
result[key] = key_value
# Return the filled in structure
return result
def configure(self, config, update = True):
if config is None:
config = {}
if update:
self._config.update(config)
else:
self._config = config
with self._get_mutex():
self._spi.max_speed_hz = self._config.get('frequency', 4000000)
self._spi.bits_per_word = self._config.get('bits_per_word', 8)
self._spi.cshigh = self._config.get('csigh', False)
self._spi.no_cs = self._config.get('no_cs', False)
self._spi.lsbfirst = self._config.get('lsbfirst', False)
self._spi.threewire = self._config.get('threewire', False)
self._spi.mode = self._config.get('mode', 1)
# If using a queue, start a thread to run the queue
if self._should_use_queue():
if self._dequeue_thread is None:
self._dequeue_thread = threading.Thread(target = self._run_queue, daemon = True)
self._software_tx_queue_mutex = threading.Lock()
self._dequeue_thread.start()
else:
if self._dequeue_thread is not None:
self._debug("Joining existing thread to wait for termination")
self._dequeue_thread.join()
self._dequeue_thread = None
self._software_tx_queue_mutex = None
return None
def initialize(self):
self._reset_device()
self._set_default_register_values()
if not self._check_radio():
return False
return True
def _reinitialize(self):
self.initialize()
self.set_syncword(self._last_syncword, submit_queue = None, force = True)
self._apply_packet_format_config(self._last_format_config)
def set_channel(self, channel):
state = self.get_register_bits('radio_state')
state['channel'] = channel
self.put_register_bits('radio_state', state, delay = 130)
return state
def set_syncword(self, syncword, force = False, submit_queue = '__DEFAULT__'):
# If queuing is being used, just store this message
if submit_queue is not None and self._should_use_queue():
self._enqueue(submit_queue, syncword, None, None, post_delay = 0)
return None
# Do not set the syncword again if it's not needed
if not force:
if self._last_syncword is not None:
if syncword == self._last_syncword:
return None
self._last_syncword = syncword
packet_config = self.get_register_bits('packet_config')
packet_config['syncword_len'] = len(syncword) - 1
self.put_register_bits('packet_config', packet_config)
if len(syncword) == 1:
self.put_register("syncword_0", syncword[0])
elif len(syncword) == 2:
self.put_register("syncword_0", syncword[1])
self.put_register("syncword_3", syncword[0])
elif len(syncword) == 3:
self.put_register("syncword_0", syncword[2])
self.put_register("syncword_2", syncword[1])
self.put_register("syncword_3", syncword[0])
elif len(syncword) == 4:
self.put_register("syncword_0", syncword[3])
self.put_register("syncword_1", syncword[2])
self.put_register("syncword_2", syncword[1])
self.put_register("syncword_3", syncword[0])
elif len(syncword) > 4:
raise ValueError("SyncWord length must be less than 5")
return None
def fill_fifo(self, message, include_length = True, lock = True):
new_message = [self._register_number('fifo')]
if include_length:
new_message = new_message + [len(message)]
new_message = new_message + message
log_message = new_message.copy()
delay = 10 * len(message)
# Transfer the message
with self._get_mutex(lock):
result = self._spi.xfer(new_message, self._spi.max_speed_hz, delay)
self._debug("Writing: {} = {}".format(log_message, result))
need_reset = False
for check_result in result:
if check_result != 1:
need_reset = True
if need_reset:
self._error("While transmitting we got an error, reinitializing everything")
self._reinitialize()
return new_message
def transmit(self, message, channel = None, lock = True, post_delay = 0, syncword = None, submit_queue = '__DEFAULT__', format_config = None):
# If we are using a radio transmit queue, just queue this message
# (unless we are called from the dequeue procedure)
if submit_queue is not None and self._should_use_queue():
if syncword is None:
syncword = self._last_syncword
self._enqueue(submit_queue, syncword, message, channel, post_delay = post_delay, format_config = format_config)
return True
sent_packet = True
with self._get_mutex(lock):
# Set the syncword
if syncword is not None:
self.set_syncword(syncword, submit_queue = None)
# Apply any format changes
radio_format_config = self._apply_packet_format_config(format_config)
self._debug("Radio format_config = {}".format(radio_format_config))
# Determine if the length should be included
if radio_format_config['packet_length_encoded'] == 1:
include_length = True
else:
include_length = False
if radio_format_config['auto_term_tx'] == 1:
manual_terminate = False
else:
manual_terminate = True
if channel is None:
state = self.get_register_bits('radio_state')
channel = state['channel']
# Initialize the transmitter
self.put_register_bits('radio_state', {
'tx_enabled': 0,
'rx_enabled': 0,
'channel': 0
})
self.put_register_bits('fifo_state', {
'clear_read': 1,
'clear_write': 1
})
# Format message to send to fifo
self.fill_fifo(message, include_length = include_length, lock = False)
# Tell the radio to transmit the FIFO buffer to the specified channel
self.put_register_bits('radio_state', {
'tx_enabled': 1,
'rx_enabled': 0,
'channel': channel
}, delay = 1000)
while not manual_terminate:
radio_status = self.get_register_bits('status')
self._debug("radio_status={}".format(radio_status))
if radio_status['packet_flag'] == 1:
break
if radio_status['framer_status'] == 0:
sent_packet = False
break
time.sleep(0.001)
# Stop transmitting, if needed
if manual_terminate:
self.put_register_bits('radio_state', {
'tx_enabled': 0,
'rx_enabled': 0,
'channel': channel
})
if post_delay != 0:
time.sleep(post_delay)
return sent_packet
def multi_transmit(self, message, channels, retries = 3, delay = 0.1, syncword = None, submit_queue = '__DEFAULT__', format_config = None):
if len(channels) == 0 or retries == 0:
self._error("Asked to send the message {} a total of zero times ({} channels, {} retries)".format(message, channels, retries))
# Wait at-least 650 microseconds between frames
min_delay = 650.0 / 1000000.0
post_delay = min_delay
final_delay = delay
for channel_idx in range(len(channels)):
if channel_idx == (len(channels) - 1):
retries -= 1
channel = channels[channel_idx]
for i in range(retries):
if not self.transmit(message, channel, post_delay = post_delay, syncword = syncword, submit_queue = submit_queue, format_config = format_config):
return False
if not self.transmit(message, channel, post_delay = final_delay, syncword = syncword, submit_queue = submit_queue, format_config = format_config):
return False
return True
def _enqueue(self, submit_queue, syncword, message, channel, post_delay = 0, format_config = None):
if not self._should_use_queue():
raise ValueError('internal error: _enqueue called with queueing disabled')
with self._software_tx_queue_mutex:
if submit_queue not in self._software_tx_queue:
self._software_tx_queue[submit_queue] = collections.deque([])
self._software_tx_queue[submit_queue].append({
'syncword': syncword,
'message': message,
'channel': channel,
'post_delay': post_delay,
'format_config': format_config
})
return None
def _run_queue(self):
self._debug("Started run_queue process")
sleep_time = 0
while True:
if sleep_time != 0:
self._debug("Sleeping for {} seconds".format(sleep_time))
time.sleep(sleep_time)
with self._software_tx_queue_mutex:
for queue in self._software_tx_queue:
if len(self._software_tx_queue[queue]) != 0:
self._debug("Running the queue named {}: {} items left".format(queue, len(self._software_tx_queue[queue])))
try:
[processed_items, remaining_items] = self._run_queue_once()
except Exception as error_info:
self._error("Failed to run queue: {}".format(error_info.args))
processed_items = 0
remaining_items = 0
self._debug("Completed running the queue, did {} items and {} items left (continue queue = {})".format(processed_items, remaining_items, self._should_use_queue()))
if remaining_items == 0:
# If the queue is empty and we are no longer queuing
# events, exit this function (which should be joined)
if not self._should_use_queue():
self._debug("Request to stop run_queue process, exiting")
return None
# If there are no events, wait a bit
# longer before trying again
sleep_time = 0.5
continue
if processed_items == 0:
# If we processed no items, but there are items left to
# process, back off slightly
if sleep_time == 0:
sleep_time = 0.001
else:
sleep_time = min(0.5, sleep_time * 2)
continue
# If there are more events to process, try again
sleep_time = 0
return None
def _run_queue_once(self):
to_transmit = []
remaining_items = 0
now = time.time()
with self._software_tx_queue_mutex:
for submit_queue in self._software_tx_queue:
# Determine if we should run this queue yet
if submit_queue not in self._software_tx_queue_next_time:
self._software_tx_queue_next_time[submit_queue] = now
queue_next_time = self._software_tx_queue_next_time[submit_queue]
if now < queue_next_time:
remaining_items += len(self._software_tx_queue[submit_queue])
continue
# Record how many items to pop off this queue
pop_items = 0
for item in self._software_tx_queue[submit_queue]:
pop_items += 1
# If the last item we're about to transmit requires a delay, make
# a note of it in the queue time and don't pull anything else
# from this queue
item['submit_queue'] = submit_queue
if item['post_delay'] != 0:
break
# Pop off the items to transmit in this run into a list
if pop_items != 0:
self._debug("Found {} items to transmit in the {} queue".format(pop_items, submit_queue))
while pop_items != 0:
to_transmit.append(self._software_tx_queue[submit_queue].popleft())
pop_items -= 1
remaining_items += len(self._software_tx_queue[submit_queue])
to_transmit_ordered = {}
default_syncword = None
for item in to_transmit:
syncword = item['syncword']
channel = item['channel']
format_config = item['format_config']
message = item['message']
if syncword is not None:
default_syncword = syncword
else:
syncword = default_syncword
item['syncword'] = syncword
if message is None or channel is None:
continue
key = str([syncword, channel])
if key not in to_transmit_ordered:
to_transmit_ordered[key] = []
to_transmit_ordered[key].append(item)
self._debug("Getting ready to transmit {} items".format(len(to_transmit)))
with self._get_mutex():
for (key, items) in to_transmit_ordered.items():
for item in items:
self._debug("Transmitting item {}".format(item))
syncword = item['syncword']
channel = item['channel']
format_config = item['format_config']
message = item['message']
self.transmit(message, channel, lock = False, submit_queue = None, syncword = syncword, post_delay = 0, format_config = format_config)
self._software_tx_queue_next_time[item['submit_queue']] = time.time() + item['post_delay']
return [len(to_transmit), remaining_items]
def start_listening(self, channel):
# Initialize the receiver
self.stop_listening()
# Go into listening mode
self.put_register_bits('radio_state', {
'tx_enabled': 0,
'rx_enabled': 1,
'channel': channel
})
return True
def stop_listening(self):
# Initialize the receiver
self.put_register_bits('radio_state', {
'tx_enabled': 0,
'rx_enabled': 0,
'channel': 0
})
self.put_register_bits('fifo_state', {
'clear_read': 1,
'clear_write': 1
})
return True
def _apply_packet_format_config(self, format_config):
# Apply radio format configuration difference from baseline
radio_format_config = self._get_default_register_value('format_config').copy()
# If a configuration was supplied, update what we want to apply
if format_config is not None:
radio_format_config.update(format_config)
if radio_format_config == self._last_format_config:
return radio_format_config
self._last_format_config = radio_format_config
self.put_register_bits('format_config', radio_format_config, delay = 5000)
new_config = self.get_register_bits('format_config')
self._info("Updated format_config to be {}".format(new_config))
return radio_format_config
def receive(self, channel = None, wait = False, length = None, format_config = None, wait_time = 0.1):
# If a length is supplied, assume that the packet is not length encoded
# but allow the user to override that by supplying a format config
if length is not None:
if format_config is None:
format_config = {}
if 'packet_length_encoded' not in format_config:
format_config = format_config.copy()
format_config['packet_length_encoded'] = 0
with self._get_mutex():
# Apply the current configuration, if it is already applied
# this will be a no-op
self._apply_packet_format_config(format_config)
if wait:
if channel is None:
state = self.get_register_bits('radio_state')
channel = state['channel']
self.start_listening(channel)
message = []
crc_error_count = 0
while True:
radio_status = self.get_register_bits('status')
self._debug("radio_status={}".format(radio_status))
if radio_status['crc_error'] == 1:
crc_error_count += 1
if crc_error_count > 30:
self._reinitialize()
self.start_listening(channel)
continue
crc_error_count = 0
if radio_status['packet_flag'] == 0:
if wait:
time.sleep(wait_time)
continue
else:
self._unlock_radio()
return None
# Data is available, read it from the FIFO register
# The first result will include the length
fifo_data = self.get_register('fifo')
if length is not None:
message_length = length
message += [fifo_data >> 8]
message_length -= 1
else:
message_length = fifo_data >> 8
if message_length == 0:
self.start_listening(channel)
continue
# Keep track of the total message length to truncate it
final_message_length = message_length
message += [fifo_data & 0xff]
message_length -= 1
# Read subsequent bytes from the FIFO register until
# there are no more bytes to read
while message_length > 0:
fifo_data = self.get_register('fifo')
message += [fifo_data >> 8, fifo_data & 0xff]
message_length -= 2
# Truncate the message to its final size, since we have
# to read in 16-bit words, we may have an extra byte
message = message[0:final_message_length]
break
return message
| StarcoderdataPython |
1787156 | <reponame>odeumgg/warhound
class OneIndexedList(list):
"""
This class exists to hold data which is one-indexed.
It pads the zero index with None, and will behave incorrectly if this
pad is ever removed. Suggested use is append-only.
When iterating over this list, the first item will be discarded by the
iterator.
Consequently, if you want to enumerate with index, you should do:
enumerate(one_indexed_list, start=1)
"""
def __init__(self):
super(OneIndexedList, self).append(None)
def __len__(self):
return super(OneIndexedList, self).__len__() - 1
def __getitem__(self, key):
if key == 0:
raise IndexError('IndexError: list index out of range')
return super(OneIndexedList, self).__getitem__(key)
def __iter__(self):
return iter(self[1:])
def __repr__(self):
return repr(self[1:])
def mk_oil():
return OneIndexedList()
| StarcoderdataPython |
180297 | <filename>Chapter05/code/chapter5_05/contacts_view.py
import tkinter as tk
from contact import Contact
class ContactList(tk.Frame):
def __init__(self, master, **kwargs):
super().__init__(master)
self.lb = tk.Listbox(self, **kwargs)
scroll = tk.Scrollbar(self, command=self.lb.yview)
self.lb.config(yscrollcommand=scroll.set)
scroll.pack(side=tk.RIGHT, fill=tk.Y)
self.lb.pack(side=tk.LEFT, fill=tk.BOTH, expand=1)
def insert(self, contact, index=tk.END):
text = "{}, {}".format(contact.last_name, contact.first_name)
self.lb.insert(index, text)
def delete(self, index):
self.lb.delete(index, index)
def update(self, contact, index):
self.delete(index)
self.insert(contact, index)
def bind_doble_click(self, callback):
handler = lambda _: callback(self.lb.curselection()[0])
self.lb.bind("<Double-Button-1>", handler)
class ContactForm(tk.LabelFrame):
fields = ("<NAME>", "<NAME>", "Email", "Phone")
def __init__(self, master, **kwargs):
super().__init__(master, text="Contact", padx=10, pady=10, **kwargs)
self.frame = tk.Frame(self)
self.entries = list(map(self.create_field, enumerate(self.fields)))
self.frame.pack()
def create_field(self, field):
position, text = field
label = tk.Label(self.frame, text=text)
entry = tk.Entry(self.frame, width=25)
label.grid(row=position, column=0, pady=5)
entry.grid(row=position, column=1, pady=5)
return entry
def load_details(self, contact):
values = (contact.last_name, contact.first_name,
contact.email, contact.phone)
for entry, value in zip(self.entries, values):
entry.delete(0, tk.END)
entry.insert(0, value)
def get_details(self):
values = [e.get() for e in self.entries]
try:
return Contact(*values)
except ValueError as e:
mb.showerror("Validation error", str(e), parent=self)
def clear(self):
for entry in self.entries:
entry.delete(0, tk.END)
class NewContact(tk.Toplevel):
def __init__(self, parent):
super().__init__(parent)
self.contact = None
self.form = ContactForm(self)
self.btn_add = tk.Button(self, text="Confirm", command=self.confirm)
self.form.pack(padx=10, pady=10)
self.btn_add.pack(pady=10)
def confirm(self):
self.contact = self.form.get_details()
if self.contact:
self.destroy()
def show(self):
self.grab_set()
self.wait_window()
return self.contact
class UpdateContactForm(ContactForm):
def __init__(self, master, **kwargs):
super().__init__(master, **kwargs)
self.btn_save = tk.Button(self, text="Save")
self.btn_delete = tk.Button(self, text="Delete")
self.btn_save.pack(side=tk.RIGHT, ipadx=5, padx=5, pady=5)
self.btn_delete.pack(side=tk.RIGHT, ipadx=5, padx=5, pady=5)
def bind_save(self, callback):
self.btn_save.config(command=callback)
def bind_delete(self, callback):
self.btn_delete.config(command=callback)
class ContactsView(tk.Tk):
def __init__(self):
super().__init__()
self.title("SQLite Contacts list")
self.list = ContactList(self, height=15)
self.form = UpdateContactForm(self)
self.btn_new = tk.Button(self, text="Add new contact")
self.list.pack(side=tk.LEFT, padx=10, pady=10)
self.form.pack(padx=10, pady=10)
self.btn_new.pack(side=tk.BOTTOM, pady=5)
def set_ctrl(self, ctrl):
self.btn_new.config(command=ctrl.create_contact)
self.list.bind_doble_click(ctrl.select_contact)
self.form.bind_save(ctrl.update_contact)
self.form.bind_delete(ctrl.delete_contact)
def add_contact(self, contact):
self.list.insert(contact)
def update_contact(self, contact, index):
self.list.update(contact, index)
def remove_contact(self, index):
self.form.clear()
self.list.delete(index)
def get_details(self):
return self.form.get_details()
def load_details(self, contact):
self.form.load_details(contact)
| StarcoderdataPython |
17051 | <filename>async-functions.py<gh_stars>0
'''
<NAME>
Credit to Sentdex (https://pythonprogramming.net/)
'''
import asyncio
async def find_divisibles(inrange, div_by):
# Define division function with async functionality
print("finding nums in range {} divisible by {}".format(inrange, div_by))
located = []
for i in range(inrange):
if i % div_by == 0:
located.append(i)
if i % 50000 == 0:
await asyncio.sleep(0.00001)
print("Done w/ nums in range {} divisible by {}".format(inrange, div_by))
return located
async def main():
# Example functions to run concurrently
divs1 = loop.create_task(find_divisibles(508000, 34113))
divs2 = loop.create_task(find_divisibles(10052, 3210))
divs3 = loop.create_task(find_divisibles(500, 3))
# Activate async operation
await asyncio.wait([divs1, divs2, divs3])
return divs1, divs2, divs3
if __name__ == '__main__':
try:
loop = asyncio.get_event_loop()
loop.set_debug(1)
d1, d2, d3 = loop.run_until_complete(main())
print(d1.result())
except Exception as e:
pass
finally:
loop.close() | StarcoderdataPython |
17047 | <reponame>mrocklin/pygdf<gh_stars>1-10
from setuptools import setup
import versioneer
packages = ['pygdf',
'pygdf.tests',
]
install_requires = [
'numba',
]
setup(name='pygdf',
description="GPU Dataframe",
version=versioneer.get_version(),
classifiers=[
# "Development Status :: 4 - Beta",
"Intended Audience :: Developers",
# "Operating System :: OS Independent",
"Programming Language :: Python",
# "Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
],
# Include the separately-compiled shared library
author="<NAME>, Inc.",
packages=packages,
package_data={
'pygdf.tests': ['data/*.pickle'],
},
install_requires=install_requires,
license="BSD",
cmdclass=versioneer.get_cmdclass(),
)
| StarcoderdataPython |
1706393 | # Copyright 2016 A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from a10_neutron_lbaas.tests.unit.v2 import fake_objs
from a10_neutron_lbaas.tests.unit.v2 import test_base
import a10_neutron_lbaas.a10_exceptions as a10_ex
class TestPools(test_base.HandlerTestBase):
def test_sanity(self):
pass
def test_create(self):
methods = {
'ROUND_ROBIN':
self.a.last_client.slb.service_group.ROUND_ROBIN,
'LEAST_CONNECTIONS':
self.a.last_client.slb.service_group.LEAST_CONNECTION,
'SOURCE_IP':
self.a.last_client.slb.service_group.WEIGHTED_LEAST_CONNECTION,
}
protocols = {
'TCP': self.a.last_client.slb.service_group.TCP,
'UDP': self.a.last_client.slb.service_group.UDP,
}
persistences = [None, 'SOURCE_IP', 'HTTP_COOKIE', 'APP_COOKIE']
listeners = [False, True]
for p in protocols.keys():
for m in methods.keys():
for pers in persistences:
for listener in listeners:
self.a.reset_mocks()
saw_exception = False
pool = fake_objs.FakePool(p, m, pers, listener)
try:
self.a.pool.create(None, pool)
except a10_ex.UnsupportedFeature as e:
if pers == 'APP_COOKIE':
saw_exception = True
else:
raise e
self.print_mocks()
# (self.a.last_client.slb.service_group.create.
# assert_called_with(
# pool.id,
# axapi_args={"service_group": {}},
# lb_method=methods(m),
# protocol=protocols(p)))
if not saw_exception:
n = str(self.a.last_client.mock_calls).index(
'slb.service_group.create')
self.assertTrue(n >= 0)
if pers == 'SOURCE_IP':
(self.a.last_client.slb.template.
src_ip_persistence.create.
assert_called_with(pool.id))
elif pers == 'HTTP_COOKIE':
(self.a.last_client.slb.template.
cookie_persistence.create.
assert_called_with(pool.id))
elif pers == 'APP_COOKIE':
(self.a.last_client.slb.template.
cookie_persistence.create.
assert_called_with(pool.id))
def test_create_with_template(self):
template = {
"service-group": {
"template-server": "sg1",
"template-port": "sg1",
"template-policy": "sg1"
}
}
exp_template = {
"template-server": "sg1",
"template-port": "sg1",
"template-policy": "sg1"
}
for k, v in self.a.config.get_devices().items():
v['templates'] = template
pers1 = None
pool = fake_objs.FakePool('TCP', 'ROUND_ROBIN', pers1, True)
self.a.pool.create(None, pool)
self.a.last_client.slb.service_group.create.assert_called_with(
pool.id,
axapi_args={"service_group": {}},
lb_method=mock.ANY,
config_defaults=mock.ANY,
protocol=mock.ANY,
service_group_templates=exp_template)
def test_update(self):
pers1 = None
pers2 = None
old_pool = fake_objs.FakePool('TCP', 'LEAST_CONNECTIONS', pers1, True)
pool = fake_objs.FakePool('TCP', 'ROUND_ROBIN', pers2, True)
self.a.pool.update(None, pool, old_pool)
self.print_mocks()
self.a.last_client.slb.service_group.update.assert_called_with(
pool.id,
axapi_args={"service_group": {}},
lb_method=mock.ANY,
config_defaults=mock.ANY,
protocol=mock.ANY,
service_group_templates=None)
def test_update_with_template(self):
template = {
"service-group": {
"template-server": "sg1",
"template-port": "sg1",
"template-policy": "sg1"
}
}
exp_template = {
"template-server": "sg1",
"template-port": "sg1",
"template-policy": "sg1"
}
for k, v in self.a.config.get_devices().items():
v['templates'] = template
pers1 = None
pers2 = None
old_pool = fake_objs.FakePool('TCP', 'LEAST_CONNECTIONS', pers1, True)
pool = fake_objs.FakePool('TCP', 'ROUND_ROBIN', pers2, True)
self.a.pool.update(None, pool, old_pool)
self.a.last_client.slb.service_group.update.assert_called_with(
pool.id,
axapi_args={"service_group": {}},
lb_method=mock.ANY,
config_defaults=mock.ANY,
protocol=mock.ANY,
service_group_templates=exp_template)
def test_delete(self):
members = [[], [fake_objs.FakeMember()]]
hms = [None, fake_objs.FakeHM('PING')]
persistences = [None, 'SOURCE_IP', 'HTTP_COOKIE']
listeners = [False, True]
for m in members:
for hm in hms:
for pers in persistences:
for lst in listeners:
self.a.reset_mocks()
pool = fake_objs.FakePool('TCP', 'ROUND_ROBIN',
pers, lst,
members=m,
hm=hm)
self.a.pool.neutron.member_count.return_value = 1
self.a.pool.delete(None, pool)
self.print_mocks()
(self.a.last_client.slb.service_group.delete.
assert_called_with(pool.id))
if pers == 'SOURCE_IP':
(self.a.last_client.slb.template.
src_ip_persistence.delete.
assert_called_with(pool.id))
elif pers == 'HTTP_COOKIE':
(self.a.last_client.slb.template.
cookie_persistence.delete.
assert_called_with(pool.id))
def _test_stats(self):
pool = fake_objs.FakePool('TCP', 'ROUND_ROBIN', None, False)
actual = self.a.pool.stats(None, pool)
return pool, actual
def test_stats_calls_service_group_stats(self):
pool, actual = self._test_stats()
(self.a.last_client.slb.service_group.stats.
assert_called_with(pool.id))
def test_stats_returns_stats(self):
pool, actual = self._test_stats()
self.assertIn("stats", actual)
def test_stats_returns_members(self):
pool, actual = self._test_stats()
self.assertIn("members", actual)
def _test_create_expressions(self, os_name, pattern, expressions=None):
self.a.config.get_service_group_expressions = self._get_expressions_mock
expressions = expressions or self.a.config.get_service_group_expressions()
expected = expressions.get(pattern, {}).get("json", None) or ""
p = 'TCP'
m = fake_objs.FakePool(p, 'ROUND_ROBIN', None)
m.name = os_name
handler = self.a.pool
handler.create(None, m)
s = str(self.a.last_client.mock_calls)
self.assertIn("service_group.create", s)
self.assertIn(str(expected), s)
def test_create_expressions_none(self):
self._test_create_expressions("mypool", None, {})
def test_create_expressions_match_beginning(self):
self._test_create_expressions("securepool", self.EXPR_BEGIN)
def test_create_expressions_match_end(self):
self._test_create_expressions("poolweb", self.EXPR_END)
def test_create_expressions_match_charclass(self):
self._test_create_expressions("poolwwpool", self.EXPR_CLASS)
def test_create_expressions_nomatch(self):
self.a.config.get_service_group_expressions = self._get_expressions_mock
expressions = self.a.config.get_service_group_expressions()
expected = expressions["beginning"]
p = 'TCP'
m = fake_objs.FakePool(p, 'ROUND_ROBIN', None)
m.name = "thepool"
handler = self.a.pool
handler.create(None, m)
s = str(self.a.last_client.mock_calls)
self.assertIn("service_group.create", s)
self.assertNotIn(str(expected), s)
def test_create_empty_name_noexception(self):
self.a.config.get_service_group_expressions = self._get_expressions_mock
expressions = self.a.config.get_service_group_expressions()
expected = expressions["beginning"]
p = 'TCP'
m = fake_objs.FakePool(p, 'ROUND_ROBIN', None)
m.name = None
handler = self.a.pool
handler.create(None, m)
s = str(self.a.last_client.mock_calls)
self.assertIn("service_group.create", s)
self.assertNotIn(str(expected), s)
| StarcoderdataPython |
1658099 | import pytest
from django.utils import timezone
from expenses.models import Expenses
from factories.house import HouseFactory
from house.models import House, Country, City
AMOUNT = '100'
DATE = timezone.now()
CATEGORY = 'Clothing'
HOUSE_NAME = "House 1"
HOUSE_NAME_1 = "House one"
HOUSE_NAME_2 = "House two"
HOUSE_PUBLIC = True
COUNTRY = "Colorado"
CITY = "South Park"
HOUSE_PARENT_PROFESSION_1 = "Teacher"
HOUSE_PARENT_PROFESSION_2 = "Student"
HOUSE_INCOME = 10_000
HOUSE_CHILDREN = 1
@pytest.fixture
def generate_country():
return Country.create_country(name=COUNTRY)
@pytest.fixture
def generate_city(generate_country):
return City.create_city(name=CITY, country=generate_country)
@pytest.fixture
def generate_house(generate_country, generate_city):
house = House.create_house(
# house_id=HOUSE_ID,
name=HOUSE_NAME,
public=HOUSE_PUBLIC,
country=generate_country,
city=generate_city,
parent_profession_1=HOUSE_PARENT_PROFESSION_1,
parent_profession_2=HOUSE_PARENT_PROFESSION_2,
income=HOUSE_INCOME,
children=HOUSE_CHILDREN,
)
return house
@pytest.fixture
def generate_expense(generate_house):
return Expenses.create_expense(
house_name=generate_house,
amount=AMOUNT,
date=DATE,
category=CATEGORY
)
@pytest.mark.django_db()
class TestExpensesModel:
def test_new_expense(self, generate_expense):
assert generate_expense.amount == AMOUNT
assert generate_expense.date == DATE
assert generate_expense.category == CATEGORY
def test_save_expense(self, generate_expense):
generate_expense.save()
assert generate_expense in Expenses.objects.all()
def test_delete_expense(self, generate_expense):
generate_expense.save()
generate_expense.house_name.delete()
assert generate_expense not in Expenses.objects.all()
def _helper_create_houses():
house_1 = HouseFactory(name=HOUSE_NAME_1)
house_2 = HouseFactory(name=HOUSE_NAME_2)
Expenses(house_name=house_1, amount=100, date=DATE, category=Expenses.Category.FOOD).save()
Expenses(house_name=house_1, amount=200, date=DATE, category=Expenses.Category.FOOD).save()
Expenses(house_name=house_2, amount=300, date=DATE, category=Expenses.Category.FOOD).save()
Expenses(house_name=house_2, amount=300, date=DATE, category=Expenses.Category.KIDS).save()
Expenses(house_name=house_2, amount=400, date=DATE, category=Expenses.Category.KIDS).save()
@pytest.mark.django_db()
class TestExpensesFunctions:
def test_average_expenses_of_houses_by_categories(self):
_helper_create_houses()
houses = House.objects.all()
avg_expenses_by_categories = Expenses.average_expenses_of_houses_by_categories(houses=houses)
assert avg_expenses_by_categories[0]['average'] == 200
assert avg_expenses_by_categories[1]['average'] == 350
| StarcoderdataPython |
3296650 | <reponame>CLARIN-PL/personalized-nlp<filename>personalized_nlp/datasets/emotions/emotions.py
from typing import List
import pandas as pd
import os
from personalized_nlp.settings import STORAGE_DIR
from personalized_nlp.utils.data_splitting import split_texts
from personalized_nlp.datasets.datamodule_base import BaseDataModule
class EmotionsDataModule(BaseDataModule):
def __init__(
self,
language: str = 'english',
split_sizes: List[float] = [0.55, 0.15, 0.15, 0.15],
normalize=False,
**kwargs,
):
super().__init__(**kwargs)
self.data_dir = STORAGE_DIR / 'emotions_data'
self.split_sizes = split_sizes
self.language = language
self.annotation_column = ['OCZEKIWANIE',
'POBUDZENIE EMOCJONALNE',
'RADOŚĆ',
'SMUTEK',
'STRACH',
'WSTRĘT',
'ZASKOCZENIE',
'ZAUFANIE',
'ZNAK EMOCJI',
'ZŁOŚĆ']
self.text_column = 'text'
if self.language != 'polish':
self.text_column = f'text_{self.language}'
self.word_stats_annotation_column = 'POBUDZENIE EMOCJONALNE'
self.embeddings_path = STORAGE_DIR / \
f'emotions_data/embeddings/text_id_to_emb_{self.embeddings_type}_{language}.p'
self.train_split_names = ['present', 'past']
self.val_split_names = ['future1']
self.test_split_names = ['future2']
self.normalize = normalize
os.makedirs(self.data_dir / 'embeddings', exist_ok=True)
@property
def class_dims(self):
return [5] * 8 + [7, 5]
@property
def texts_clean(self):
return self.data[self.text_column].to_list()
def prepare_data(self) -> None:
self.data = pd.read_csv(
self.data_dir / 'texts' /'cawi2_texts_multilang.csv')
self.data.loc[:, 'text'] = self.data.loc[:, 'text_' + self.language]
self.annotations = pd.read_csv(
self.data_dir / 'texts' / 'cawi2_annotations.csv').dropna()
self.annotators = pd.read_csv(
self.data_dir / 'texts' / 'cawi2_annotators.csv')
if self.normalize:
self.normalize_labels()
self._assign_splits()
personal_df = self.annotations_with_data.loc[self.annotations_with_data.split == 'past']
self.compute_annotator_biases(personal_df)
def normalize_labels(self):
annotation_column = self.annotation_column
df = self.annotations
mins = df.loc[:, annotation_column].values.min(axis=0)
df.loc[:, annotation_column] = (df.loc[:, annotation_column] - mins)
maxes = df.loc[:, annotation_column].values.max(axis=0)
df.loc[:, annotation_column] = df.loc[:, annotation_column] / maxes
def _assign_splits(self):
self.data = split_texts(self.data, self.split_sizes)
| StarcoderdataPython |
3357939 | import os
import logging
import ferris
log = logging.getLogger(__name__)
_log = logging.getLogger()
_log.addHandler(logging.StreamHandler())
_log.setLevel(logging.DEBUG)
class Client(ferris.Client):
async def on_ready(self):
log.info("Starting test.")
g = await self.create_guild(name='test')
g = await self.fetch_guild(g.id)
log.info(repr(g))
await g.edit(name='test_edit')
log.info("Create and Fetch and edit guild works.")
c = await g.create_channel(name='test')
c = await self.fetch_channel(c.id)
log.info(repr(c))
await c.edit(name='test_edit')
m = await c.send("Test.")
log.info(repr(m))
await m.edit(content='test_edit')
log.info("Create, Fetch, Edit channel, send, edit message works.")
u = await self.fetch_user(self.user.id)
log.info(repr(u))
log.info("Fetch user works.")
i = await g.create_invite()
log.info(repr(i))
log.info("Create invite works.")
await m.delete()
await c.delete()
await g.delete()
log.info("Delete message, channel, guild works.")
log.info("Test done, all passed")
await self.close()
client = Client()
client.run(token=os.getenv('TOKEN'))
| StarcoderdataPython |
3234445 | from django.db import models
from django.urls import reverse
from django.contrib.auth.models import User
import uuid
class TeachingMethod(models.Model):
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
help_text='Уникальное ID для данного метода'
)
title = models.CharField(
max_length=1000,
help_text='Название педагогического приёма',
verbose_name='Заголовок'
)
author = models.ForeignKey(User, on_delete=models.CASCADE)
description = models.TextField(
help_text='Описание педагогического приёма',
verbose_name='Описание'
)
lesson_part = models.ForeignKey(
'LessonPart',
on_delete=models.SET_NULL,
null=True,
verbose_name='Фрагмент урока'
)
def __str__(self):
return self.title
def get_preview(self):
len_preview = 200
if len(self.description) > len_preview:
preview_string = self.description[:len_preview] + '...'
else:
preview_string = self.description
return preview_string
def get_reading_time(self):
return len(self.description.replace(' ', '')) // 120
def get_absolute_url(self):
return reverse('method_detail', args=[str(self.id)])
class Meta:
verbose_name = 'Метод обучения'
verbose_name_plural = 'Методы обучения'
class LessonPart(models.Model):
name = models.CharField(
max_length=1000,
help_text='Название раздела'
)
description = models.TextField(
help_text='Описание раздела'
)
url = models.CharField(
max_length=500,
help_text='Ссылка на раздел'
)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('method_list', args=[str(self.url)])
class Meta:
verbose_name = 'Фрагмент урока'
verbose_name_plural = 'Фрагменты урока'
| StarcoderdataPython |
1756966 | """Training routines for LSTM model."""
import os
from collections import OrderedDict
import numpy as np
from tqdm import tqdm
import torch
from torch.autograd import Variable
import torch.optim as optim
import torch.multiprocessing as mp
from torch.nn.utils import clip_grad_norm
import utils, criterion
class Trainer():
"""Trainer object for training, validation and testing the model."""
def __init__(self, model, criterion, opt, optim_state):
"""Setup optim_state, optimizer and logger."""
self.model = model
self.criterion = criterion
self.optim_state = optim_state
self.opt = opt
# Only set weight decay to weights
params_dict = dict(self.model.named_parameters())
params = []
for key, value in params_dict.items():
if 'weight' in key and ('fc' in key or 'out' in key
or 'attention' in key):
params += [{'params': value, 'weight_decay': opt.weightDecay}]
else:
params += [{'params': value, 'weight_decay': 0.0}]
# Set optimizer
if opt.optimizer == 'SGD':
self.optimizer = optim.SGD(params, lr=opt.LR,
momentum=opt.momentum)
elif opt.optimizer == 'Adam':
self.optimizer = optim.Adam(params, lr=opt.LR,
betas=(0.9, 0.999), eps=1e-8)
else:
raise NotImplementedError
# Set new optim_state if retrain, restore if exist
if self.optim_state is None:
self.optim_state = {'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'epoch': -1, 'initial_lr': self.opt.LR}
log_option = 'w+'
else:
self.model.load_state_dict(self.optim_state['state_dict'])
self.optimizer.load_state_dict(self.optim_state['optimizer'])
log_option = 'a+'
self.optimizer.param_groups[0]['initial_lr'] = \
self.optim_state['initial_lr']
# Laerning rate scheduler
if opt.LRDecay == 'anneal':
self.scheduler = optim.lr_scheduler.ExponentialLR(
self.optimizer, gamma=opt.LRDParam,
last_epoch=self.optim_state['epoch'])
elif opt.LRDecay == 'stepwise':
self.scheduler = optim.lr_scheduler.StepLR(
self.optimizer, step_size=3, gamma=opt.LRDParam,
last_epoch=self.optim_state['epoch'])
elif opt.LRDecay == 'newbob':
self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
self.optimizer, factor=opt.LRDParam, patience=1)
else:
assert opt.LRDecay == 'none'
self.logger = \
{'train': open(os.path.join(opt.resume, 'train.log'), log_option),
'val': open(os.path.join(opt.resume, 'val.log'), log_option),
'test': open(os.path.join(opt.resume, 'test.log'), log_option)}
def xent_onebest(self, output, indices, reference):
"""Compute confidence score binary cross entropy."""
assert len(indices) == len(reference), "inconsistent one-best sequence."
loss, count = 0, 0
pred_onebest, ref_onebest = [], []
if indices:
# Extract scores on one-best and apply sigmoid
prediction = [output[i] for i in indices]
for pred, ref in zip(prediction, reference):
if ref is not None:
count += 1
pred_onebest.append(pred)
ref_onebest.append(ref)
one_best_pred = Variable(torch.Tensor(pred_onebest))
one_best_ref = Variable(torch.Tensor(ref_onebest))
loss_fn = criterion.create_criterion()
loss = loss_fn(one_best_pred, one_best_ref).data[0]
return loss, count, pred_onebest, ref_onebest
def xent(self, output, ignore, reference):
"""Compute confidence score binary cross entropy."""
loss, count = 0, 0
all_pred, all_ref = [], []
for i, (pred, ref) in enumerate(zip(output, reference)):
if i not in ignore:
count += 1
all_pred.append(pred)
all_ref.append(float(ref))
all_pred_t = Variable(torch.Tensor(all_pred))
all_ref_t = Variable(torch.Tensor(all_ref))
loss_fn = criterion.create_criterion()
loss = loss_fn(all_pred_t, all_ref_t).data[0]
return loss, count, all_pred, all_ref
@staticmethod
def mean(value, count):
"""Deal with zero count."""
if count == 0:
assert value == 0
return 0
else:
return value / count
@staticmethod
def moving_average(avg, total_count, val, count):
"""Compute the weighted average"""
all_val = avg * total_count + val
all_counts = total_count + count
return Trainer.mean(all_val, all_counts)
def forward_one_lattice(self, lattice, target, index, results, update):
"""Forward through one single lattice on CPU."""
if lattice.edge_num == 0 or not target.ref:
results[index] = [(0, 0), (0, 0), ([], [])]
else:
if update:
self.optimizer.zero_grad()
lattice.edges = Variable(torch.from_numpy(lattice.edges).float())
target_t = Variable(
torch.from_numpy(target.target).float().view(-1, 1))
output = self.model.forward(lattice)
target_length = target_t.size(0)
# Error signals on all arcs but filter out arcs need to be ignored
target_back = []
output_back = []
count = 0
for i in range(target_length):
if i not in lattice.ignore:
count += 1
target_back.append(target_t[i])
output_back.append(output[i])
target_back = torch.cat(target_back).view(-1, 1)
output_back = torch.cat(output_back).view(-1, 1)
loss = self.criterion(output_back, target_back) / count
# Error signals on one-best path
assert len(target.indices) == len(target.ref), \
"inconsistent one-best sequence"
target_back_onebest = []
output_back_onebest = []
count_onebest = 0
pred_onebest, ref_onebest = [], []
if target.indices:
for j in target.indices:
count_onebest += 1
target_back_onebest.append(target_t[j])
output_back_onebest.append(output[j])
pred_onebest.append(output[j].data[0])
ref_onebest.append(target.target[j])
target_back_onebest = torch.cat(target_back_onebest).view(-1, 1)
output_back_onebest = torch.cat(output_back_onebest).view(-1, 1)
loss_onebest = self.criterion(
output_back_onebest, target_back_onebest) / count_onebest
# update the network as a combination of losses
if update:
total_loss = loss_onebest if self.opt.onebest else loss
total_loss.backward()
clip_grad_norm(self.model.parameters(), self.opt.clip)
self.optimizer.step()
if self.opt.onebest:
all_loss, all_count, all_pred, all_ref = self.xent_onebest(
output.data.view(-1), target.indices, target.ref)
assert all_count == count_onebest, \
"inconsistent count on onebest"
else:
all_loss, all_count, all_pred, all_ref = self.xent(
output.data.view(-1), lattice.ignore, target.target)
results[index] = [(loss.data[0]*count, count),
(loss_onebest.data[0]*count_onebest, count_onebest),
(all_pred, all_ref)]
def train(self, train_loader, epoch, val_loss):
"""Training mode."""
if self.opt.LRDecay in ['anneal', 'stepwise']:
self.scheduler.step()
elif self.opt.LRDecay == 'newbob':
self.scheduler.step(val_loss)
self.model.train()
avg_loss, total_count = 0, 0
avg_loss_onebest, total_count_onebest = 0, 0
wrapper = tqdm(train_loader, dynamic_ncols=True)
# Looping through batches
for lattices, targets in wrapper:
assert len(lattices) == len(targets), \
"Data and targets with different lengths."
batch_loss, batch_count = 0, 0
batch_loss_onebest, batch_count_onebest = 0, 0
# CPU Hogwild training
# Each process is one training sample in a mini-batch
processes = []
manager = mp.Manager()
results = manager.list([None] * len(lattices))
# Fork processes
for j, (lattice, target) in enumerate(zip(lattices, targets)):
fork = mp.Process(target=self.forward_one_lattice,
args=(lattice, target, j, results, True))
fork.start()
processes.append(fork)
# Wait until all processes are finished
for fork in processes:
fork.join()
# Colelct loss stats
for result in results:
batch_loss += result[0][0]
batch_count += result[0][1]
batch_loss_onebest += result[1][0]
batch_count_onebest += result[1][1]
# Compute average losses and increment counters
avg_loss = Trainer.moving_average(
avg_loss, total_count, batch_loss, batch_count)
avg_loss_onebest = Trainer.moving_average(
avg_loss_onebest, total_count_onebest,
batch_loss_onebest, batch_count_onebest)
total_count += batch_count
total_count_onebest += batch_count_onebest
learning_rate = self.optimizer.param_groups[0]['lr']
# Set tqdm display elements
wrapper.set_description("".ljust(7) + 'Train')
postfix = OrderedDict()
postfix['allarc'] = '%.4f' %Trainer.mean(batch_loss, batch_count)
postfix['allarcAvg'] = '%.4f' %avg_loss
postfix['onebest'] = '%.4f' %Trainer.mean(
batch_loss_onebest, batch_count_onebest)
postfix['onebestAvg'] = '%.4f' %avg_loss_onebest
postfix['lr'] = '%.5f' %learning_rate
wrapper.set_postfix(ordered_dict=postfix)
self.optim_state['epoch'] = epoch - 1
self.logger['train'].write('%d %f %f\n' %(epoch, avg_loss, avg_loss_onebest))
print("".ljust(7) + "Training loss".ljust(16)
+ utils.color_msg('%.4f' %(avg_loss_onebest if self.opt.onebest \
else avg_loss)))
return avg_loss_onebest if self.opt.onebest else avg_loss
def val(self, val_loader, epoch):
"""Validation mode."""
self.model.eval()
avg_loss, total_count = 0, 0
avg_loss_onebest, total_count_onebest = 0, 0
wrapper = tqdm(val_loader, dynamic_ncols=True)
# Looping though batches
for lattices, targets in wrapper:
assert len(lattices) == len(targets), \
"Data and targets with different lengths."
batch_loss, batch_count = 0, 0
batch_loss_onebest, batch_count_onebest = 0, 0
processes = []
manager = mp.Manager()
results = manager.list([None] * len(lattices))
# Fork processes
for j, (lattice, target) in enumerate(zip(lattices, targets)):
fork = mp.Process(target=self.forward_one_lattice,
args=(lattice, target, j, results, False))
fork.start()
processes.append(fork)
# Wait until all processes are finished
for fork in processes:
fork.join()
# Colelct loss stats
for result in results:
batch_loss += result[0][0]
batch_count += result[0][1]
batch_loss_onebest += result[1][0]
batch_count_onebest += result[1][1]
# Compute average losses and increment counters
avg_loss = Trainer.moving_average(
avg_loss, total_count, batch_loss, batch_count)
avg_loss_onebest = Trainer.moving_average(
avg_loss_onebest, total_count_onebest,
batch_loss_onebest, batch_count_onebest)
total_count += batch_count
total_count_onebest += batch_count_onebest
wrapper.set_description("".ljust(7) + 'val'.ljust(5))
postfix = OrderedDict()
postfix['allarc'] = '%.4f' %Trainer.mean(batch_loss, batch_count)
postfix['allarcAvg'] = '%.4f' %avg_loss
postfix['onebest'] = '%.4f' %Trainer.mean(
batch_loss_onebest, batch_count_onebest)
postfix['onebestAvg'] = '%.4f' %avg_loss_onebest
wrapper.set_postfix(ordered_dict=postfix)
self.logger['val'].write('%d %f %f\n' %(epoch, avg_loss, avg_loss_onebest))
print("".ljust(7) + "Validation loss".ljust(16)
+ utils.color_msg('%.4f' %(avg_loss_onebest if self.opt.onebest \
else avg_loss)))
return avg_loss_onebest if self.opt.onebest else avg_loss
def test(self, val_loader, epoch):
"""Testing mode."""
self.model.eval()
# import pdb; pdb.set_trace()
prediction = []
reference = []
posteriors = []
avg_loss, total_count = 0, 0
avg_loss_onebest, total_count_onebest = 0, 0
wrapper = tqdm(val_loader, dynamic_ncols=True)
for lattices, targets in wrapper:
assert len(lattices) == len(targets), \
"Data and targets with different lengths."
batch_loss, batch_count = 0, 0
batch_loss_onebest, batch_count_onebest = 0, 0
processes = []
manager = mp.Manager()
results = manager.list([None] * len(lattices))
# Fork processes
for j, (lattice, target) in enumerate(zip(lattices, targets)):
fork = mp.Process(target=self.forward_one_lattice,
args=(lattice, target, j, results, False))
fork.start()
processes.append(fork)
# Wait until all processes are finished
for fork in processes:
fork.join()
# Colelct loss stats
for result in results:
batch_loss += result[0][0]
batch_count += result[0][1]
batch_loss_onebest += result[1][0]
batch_count_onebest += result[1][1]
prediction += result[2][0]
reference += result[2][1]
# Compute average losses and increment counters
avg_loss = Trainer.moving_average(
avg_loss, total_count, batch_loss, batch_count)
avg_loss_onebest = Trainer.moving_average(
avg_loss_onebest, total_count_onebest,
batch_loss_onebest, batch_count_onebest)
total_count += batch_count
total_count_onebest += batch_count_onebest
wrapper.set_description("".ljust(7) + 'Test epoch %i' %epoch)
postfix = OrderedDict()
postfix['allarc'] = '%.4f' %Trainer.mean(batch_loss, batch_count)
postfix['allarcAvg'] = '%.4f' %avg_loss
postfix['onebest'] = '%.4f' %Trainer.mean(
batch_loss_onebest, batch_count_onebest)
postfix['onebestAvg'] = '%.4f' %avg_loss_onebest
wrapper.set_postfix(ordered_dict=postfix)
for lattice, target in zip(lattices, targets):
for i, edge_data in enumerate(lattice.edges):
if self.opt.onebest:
if i in target.indices:
posteriors.append(edge_data[-1])
else:
if i not in lattice.ignore:
posteriors.append(edge_data[-1])
assert len(posteriors) == len(prediction), "wrong lengths"
self.logger['test'].write('%f %f\n' %(avg_loss, avg_loss_onebest))
print("".ljust(7) + "Test loss".ljust(16)
+ utils.color_msg('%.4f' %(avg_loss_onebest if self.opt.onebest \
else avg_loss)))
prediction = np.array(prediction)
reference = np.array(reference)
posteriors = np.array(posteriors)
if self.opt.onebest:
return avg_loss_onebest, prediction, reference, posteriors
else:
return avg_loss, prediction, reference, posteriors
def create_trainer(model, criterion, opt, optim_state):
"""New Trainer object."""
trainer = Trainer(model, criterion, opt, optim_state)
return trainer
| StarcoderdataPython |
1675166 | <filename>python/openassetio/hostAPI/terminology.py<gh_stars>10-100
#
# Copyright 2013-2021 The Foundry Visionmongers Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
@namespace openassetio.hostAPI.terminology
This module provides utilities for a @ref host that simplify the
integration of a @ref manager "manager's" custom terminology into its
user-facing components.
"""
from .._core.audit import auditApiCall
## @namespace openassetio.hostAPI.terminology
#
# The terminology mapping mechanism allows Managers to customize
# terminology used within the host application.
# The Mapper class allows for more efficient access to term
# mapping during a Host session, and for the host to provide
# additional terminology keys.
#
## @name Terminology dict keys
## @{
kTerm_Asset = 'asset'
kTerm_Assets = 'assets'
kTerm_Manager = 'manager'
kTerm_Publish = 'publish'
kTerm_Publishing = 'publishing'
kTerm_Published = 'published'
kTerm_Shot = 'shot'
kTerm_Shots = 'shots'
## @}
## Default terminology for the API.
# Hosts may choose to add additional terminology keys when
# constructing a Mapper, but there is no expectation that
# any given manager would customize keys other than the
# defaultTerminology.
defaultTerminology = {
kTerm_Asset: 'Asset',
kTerm_Assets: 'Assets',
kTerm_Manager: 'Asset Manager',
kTerm_Publish: 'Publish',
kTerm_Publishing: 'Publishing',
kTerm_Published: 'Published',
kTerm_Shot: 'Shot',
kTerm_Shots: 'Shots',
}
class Mapper:
"""
The Mapper class provides string substitution methods and lookups to
determine the correct terminology for the supplied @ref manager.
"""
def __init__(self, manager, terminology=defaultTerminology):
"""
Constructs a new Mapper using terminology overrides defined
by the supplied Manager. The manager is queried during
construction and it's results cached for the lifetime of the
Mapper.
@param manager Manager, A Manager instance, whose terminology
should be applied by the mapper.
@param terminology A dict of terms that will be substituted by
this instance. If left unspecified, the default API terminology
will be used. Hosts may take a copy of this dictionary and
append to it before passing to the Mapper to allow managers
to customize additional host-specific terms. There is no
expectation that a manager would handle new terms without
specific knowledge in advance.
"""
# As we take a copy, the default of the shared
# dict isn't dangerous.
# pylint: disable=dangerous-default-value
self.__terminology = dict(terminology)
self.__updateTerminology(manager)
@auditApiCall("Terminology")
def replaceTerms(self, sourceStr):
"""
Substitutes any valid terminology tokens in the input string
with those appropriate to the current Manager. These tokens are
as per python format convention, using the constants defined in
@ref openassetio.hostAPI.terminology under kTerm_*. For
example:
@li "{publish} to {manager}..."
@warning Escaping brace literals with `{{` is not currently
supported.
@param sourceStr a string to substitute.
@return str The input string with all applicable terms
substituted, any remaining braces ({}) will be removed.
"""
try:
# Make sure we dont abort if there is an unknown string
sourceStr = sourceStr.format(**self.__terminology)
except KeyError:
pass
return sourceStr.replace("{", "").replace("}", "")
@auditApiCall("Terminology")
def term(self, key, default=''):
"""
Returns the term corresponding to the supplied key, @ref
openassetio.hostAPI.terminology under kTerm_*
@return str or the supplied default if the key is unknown.
"""
return self.__terminology.get(key, default)
def __updateTerminology(self, manager):
# Get any custom strings from the manager that we should use in the UI,
# this is to allow a consistent terminology across implementations of a
# specific asset management system.
manager.updateTerminology(self.__terminology)
self.__terminology[kTerm_Manager] = manager.displayName()
| StarcoderdataPython |
1763426 | # 2020.07.06
# Problem Statement:
# https://leetcode.com/problems/add-two-numbers/
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
# declare the head node
head = ListNode()
# declare a changing node, function as a 'pointer'
ptr = head
# initialize carrier = 0
add_one = False
# iterate until both lists are gone through
while l1 is not None or l2 is not None:
# check if already gone through
if l1 is None:
add_1 = 0
else:
add_1 = l1.val
l1 = l1.next # move to the next
if l2 is None:
add_2 = 0
else:
add_2 = l2.val
l2 = l2.next # move to the next
if not add_one:
if add_1 + add_2 >= 10:
ptr.val = add_1 + add_2 - 10
add_one = True
else:
ptr.val = add_1 + add_2
add_one = False
else:
if add_1 + add_2 + 1 >= 10:
ptr.val = add_1 + add_2 + 1 - 10
add_one = True
else:
ptr.val = add_1 + add_2 + 1
add_one = False
# if still have numbers not gone through
if (l1 is not None) or (l2 is not None):
ptr.next = ListNode()
ptr = ptr.next
# have gone through both lists
elif add_one:
ptr.next = ListNode(val = 1, next = None)
return head | StarcoderdataPython |
1611742 | <reponame>MrBurtyyy/yoti-python-sdk
from yoti_python_sdk.sandbox.attribute import SandboxAttribute
from yoti_python_sdk import config
import base64
class YotiTokenResponse(object):
def __init__(self, token):
self.__token = token
@property
def token(self):
"""
The token to be used by the Client
:return: the token
"""
return self.__token
class YotiTokenRequest(object):
def __init__(self, remember_me_id=None, sandbox_attributes=None):
if remember_me_id is None:
remember_me_id = ""
if sandbox_attributes is None:
sandbox_attributes = []
self.remember_me_id = remember_me_id
self.sandbox_attributes = sandbox_attributes
def __dict__(self):
return {
"remember_me_id": self.remember_me_id,
"profile_attributes": self.sandbox_attributes,
}
@staticmethod
def builder():
"""
Creates an instance of the yoti token request builder
:return: instance of YotiTokenRequestBuilder
"""
return YotiTokenRequestBuilder()
class YotiTokenRequestBuilder(object):
def __init__(self):
self.remember_me_id = None
self.attributes = []
def with_remember_me_id(self, remember_me_id):
"""
Sets the remember me id on the builder
:param remember_me_id: the remember me id
:return: the updated builder
"""
self.remember_me_id = remember_me_id
return self
def with_attribute(self, sandbox_attribute):
"""
Appends a SandboxAttribute to the list of attributes on the builder
:param SandboxAttribute sandbox_attribute:
:return: the updated builder
"""
self.attributes.append(sandbox_attribute)
return self
def with_given_names(self, value, anchors=None):
"""
Creates and appends a SandboxAttribute for given names
:param str value: the value
:param list[SandboxAnchor] anchors: optional list of anchors
:return:
"""
attribute = self.__create_attribute(
config.ATTRIBUTE_GIVEN_NAMES, value, anchors
)
return self.with_attribute(attribute)
def with_family_name(self, value, anchors=None):
"""
Creates and appends a SandboxAttribute for family name
:param str value: the value
:param list[SandboxAnchor] anchors: optional list of anchors
:return:
"""
attribute = self.__create_attribute(
config.ATTRIBUTE_FAMILY_NAME, value, anchors
)
return self.with_attribute(attribute)
def with_full_name(self, value, anchors=None):
"""
Creates and appends a SandboxAttribute for full name
:param str value: the value
:param list[SandboxAnchor] anchors: optional list of anchors
:return:
"""
attribute = self.__create_attribute(config.ATTRIBUTE_FULL_NAME, value, anchors)
return self.with_attribute(attribute)
def with_date_of_birth(self, value, anchors=None):
"""
Creates and appends a SandboxAttribute for date of birth
:param str value: the value
:param list[SandboxAnchor] anchors: optional list of anchors
:return:
"""
attribute = self.__create_attribute(
config.ATTRIBUTE_DATE_OF_BIRTH, value, anchors
)
return self.with_attribute(attribute)
def with_age_verification(self, age_verification):
"""
Creates and appends a SandboxAttribute with a given age verification
:param SandboxAgeVerification age_verification: the age verification
:return:
"""
return self.with_attribute(age_verification.to_attribute())
def with_gender(self, value, anchors=None):
"""
Creates and appends a SandboxAttribute for gender
:param str value: the value
:param list[SandboxAnchor] anchors: optional list of anchors
:return:
"""
attribute = self.__create_attribute(config.ATTRIBUTE_GENDER, value, anchors)
return self.with_attribute(attribute)
def with_phone_number(self, value, anchors=None):
"""
Creates and appends a SandboxAttribute for phone number
:param str value: the value
:param list[SandboxAnchor] anchors: optional list of anchors
:return:
"""
attribute = self.__create_attribute(
config.ATTRIBUTE_PHONE_NUMBER, value, anchors
)
return self.with_attribute(attribute)
def with_nationality(self, value, anchors=None):
"""
Creates and appends a SandboxAttribute for nationality
:param str value: the value
:param list[SandboxAnchor] anchors: optional list of anchors
:return:
"""
attribute = self.__create_attribute(
config.ATTRIBUTE_NATIONALITY, value, anchors
)
return self.with_attribute(attribute)
def with_postal_address(self, value, anchors=None):
"""
Creates and appends a SandboxAttribute for postal address
:param str value: the value
:param list[SandboxAnchor] anchors: optional list of anchors
:return:
"""
attribute = self.__create_attribute(
config.ATTRIBUTE_POSTAL_ADDRESS, value, anchors
)
return self.with_attribute(attribute)
def with_structured_postal_address(self, value, anchors=None):
"""
Creates and appends a SandboxAttribute for structured postal address
:param str value: the value
:param list[SandboxAnchor] anchors: optional list of anchors
:return:
"""
attribute = self.__create_attribute(
config.ATTRIBUTE_STRUCTURED_POSTAL_ADDRESS, value, anchors
)
return self.with_attribute(attribute)
def with_selfie(self, value, anchors=None):
"""
Creates and appends a SandboxAttribute for a selfie
:param str value: the value
:param list[SandboxAnchor] anchors: optional list of anchors
:return:
"""
base64_selfie = base64.b64encode(value).decode("utf-8")
return self.with_base64_selfie(base64_selfie, anchors)
def with_base64_selfie(self, value, anchors=None):
"""
Creates and appends a SandboxAttribute for given names
:param str value: base64 encoded value
:param list[SandboxAnchor] anchors: optional list of anchors
:return:
"""
attribute = self.__create_attribute(config.ATTRIBUTE_SELFIE, value, anchors)
return self.with_attribute(attribute)
def with_email_address(self, value, anchors=None):
"""
Creates and appends a SandboxAttribute for email address
:param str value: the value
:param list[SandboxAnchor] anchors: optional list of anchors
:return:
"""
attribute = self.__create_attribute(
config.ATTRIBUTE_EMAIL_ADDRESS, value, anchors
)
return self.with_attribute(attribute)
def with_document_details(self, value, anchors=None):
"""
Creates and appends a SandboxAttribute for document details
:param str value: the value
:param list[SandboxAnchor] anchors: optional list of anchors
:return:
"""
attribute = self.__create_attribute(
config.ATTRIBUTE_DOCUMENT_DETAILS, value, anchors
)
return self.with_attribute(attribute)
def build(self):
"""
Creates an instance of YotiTokenRequest using the supplied values
:return: instance of YotiTokenRequest
"""
return YotiTokenRequest(self.remember_me_id, self.attributes)
@staticmethod
def __create_attribute(name, value, anchors=None):
return SandboxAttribute(name, value, anchors)
| StarcoderdataPython |
3218682 | import argparse
import glob
import multiprocessing
import re
from functools import partial
from pathlib import Path
import librosa
import numpy
from become_yukarin import SuperResolution
from become_yukarin.config.sr_config import create_from_json as create_config
from become_yukarin.dataset.dataset import AcousticFeatureProcess
from become_yukarin.dataset.dataset import WaveFileLoadProcess
parser = argparse.ArgumentParser()
parser.add_argument('model_names', nargs='+')
parser.add_argument('-md', '--model_directory', type=Path, default=Path('/mnt/dwango/hiroshiba/become-yukarin/'))
parser.add_argument('-iwd', '--input_wave_directory', type=Path,
default=Path('/mnt/dwango/hiroshiba/become-yukarin/dataset/yukari-wave/yukari-news/'))
parser.add_argument('-it', '--iteration', type=int)
parser.add_argument('-g', '--gpu', type=int)
args = parser.parse_args()
model_directory = args.model_directory # type: Path
input_wave_directory = args.input_wave_directory # type: Path
it = args.iteration
gpu = args.gpu
paths_test = list(Path('./test_data_sr/').glob('*.wav'))
def extract_number(f):
s = re.findall("\d+", str(f))
return int(s[-1]) if s else -1
def process(p: Path, super_resolution: SuperResolution):
param = config.dataset.param
wave_process = WaveFileLoadProcess(
sample_rate=param.voice_param.sample_rate,
top_db=None,
)
acoustic_feature_process = AcousticFeatureProcess(
frame_period=param.acoustic_feature_param.frame_period,
order=param.acoustic_feature_param.order,
alpha=param.acoustic_feature_param.alpha,
f0_estimating_method=param.acoustic_feature_param.f0_estimating_method,
)
try:
if p.suffix in ['.npy', '.npz']:
p = glob.glob(str(input_wave_directory / p.stem) + '.*')[0]
p = Path(p)
input = acoustic_feature_process(wave_process(str(p)))
wave = super_resolution(input.spectrogram, acoustic_feature=input, sampling_rate=param.voice_param.sample_rate)
librosa.output.write_wav(str(output / p.stem) + '.wav', wave.wave, wave.sampling_rate, norm=True)
except:
import traceback
print('error!', str(p))
traceback.format_exc()
for model_name in args.model_names:
base_model = model_directory / model_name
config = create_config(base_model / 'config.json')
#input_paths = list(sorted([Path(p) for p in glob.glob(str(config.dataset.input_glob))]))
#numpy.random.RandomState(config.dataset.seed).shuffle(input_paths)
#path_train = input_paths[0]
#path_test = input_paths[-1]
if it is not None:
model_path = base_model / 'predictor_{}.npz'.format(it)
else:
model_paths = base_model.glob('predictor_*.npz')
model_path = list(sorted(model_paths, key=extract_number))[-1]
print(model_path)
super_resolution = SuperResolution(config, model_path, gpu=gpu)
output = Path('./output').absolute() / base_model.name
output.mkdir(exist_ok=True)
#paths = [path_train, path_test] + paths_test
paths = paths_test
process_partial = partial(process, super_resolution=super_resolution)
if gpu is None:
pool = multiprocessing.Pool()
pool.map(process_partial, paths)
else:
list(map(process_partial, paths))
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.