id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
1690926
|
from time import perf_counter
import atexit
from performer.formatter import Formatter
class benchmark(object):
_instance = None
funcs = []
loop_counts = []
logs = []
def __init__(self, loop_count):
self.loop_counts.append(loop_count)
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = super().__new__(cls)
atexit.register(cls.__print_logs)
return cls._instance
def __call__(self, func):
loop_count = self.loop_counts[-1]
def wrapper(*args, **kwargs):
start = perf_counter()
result = func(*args, **kwargs)
for _ in range(loop_count):
func(*args, **kwargs)
total_sec = perf_counter() - start
if func not in self.funcs:
self.logs.append([
Formatter.func_name(func.__name__),
Formatter.return_value(result),
Formatter.loop_count(loop_count),
Formatter.average(total_sec / loop_count),
Formatter.total(total_sec),
])
self.funcs.append(func)
return result
return wrapper
@classmethod
def __print_logs(cls):
for log in cls.logs:
print('\n' + '\n'.join(log))
|
StarcoderdataPython
|
3277103
|
<gh_stars>10-100
from kdbinsert import KdbInsert
from optparse import OptionParser
import sys
from aselite import read_any
from config import *
from local_db import LocalDB
class LocalInsert(KdbInsert):
def __init__(self):
pass
# This function will overload the default insert_into_db function
def insert_into_db(self, **args):
# create instance of database
db = LocalDB(args['kdbname'], args['nf'], args['dc'], args['mac'])
# test if process is already in database
name = db.get_name(args['s'].get_chemical_symbols())
saddle_list = db.get_saddles(name)
for db_saddle in saddle_list:
if len(args['s']) != len(db_saddle[0]):
continue
if self.getMappings(args['s'], db_saddle[0], args['nf'], args['dc']) is not None:
print "SQL duplicate of", name, "with id:", db_saddle[1]
return "SQL duplicate of " + name + " with id: " + str(db_saddle[1])
# add process to db
db.add_process(args['or'], args['os'], args['op'], args['om'],
args['r'], args['s'], args['p'], args['m'], args['ma'])
# Indicate that the process was inserted successfully.
#print "good"
print "KDB insert success"
return "good"
if __name__ == "__main__":
insert_sub_class = LocalInsert()
# Parse command line options.
parser = OptionParser(usage="%prog [options] reactant saddle product mode")
parser.add_option("-o", "--mode", dest="mode",
help="optional mode file",
default=None)
parser.add_option("-n", "--nf", dest="nf", action="store", type="float",
help="neighbor fudge parameter",
default=None)
parser.add_option("-c", "--dc", dest="dc", action="store", type="float",
help="distance cutoff parameter",
default=None)
parser.add_option("-m", "--mac", dest="mac", action="store", type="float",
help="mobile atom cutoff parameter",
default=None)
options, args = parser.parse_args()
# Make sure we get the reactant, saddle, product, and mode files.
if len(args) < 3:
parser.print_help()
sys.exit()
# Load the reactant, saddle, product, and mode files.
reactant = read_any(args[0])
saddle = read_any(args[1])
product = read_any(args[2])
mode = None
if options.mode is not None:
mode = insert_sub_class.load_mode(options.mode)
# load previous params
db = LocalDB(KDB_NAME)
params = db.get_params()
if options.nf is None:
options.nf = params['nf']
if options.dc is None:
options.dc = params['dc']
if options.mac is None:
options.mac = params['mac']
# run the insert standard insert function.
insert_sub_class.insert(reactant, saddle, product, mode=mode, nf=options.nf, dc=options.dc, mac=options.mac,
kdbname=KDB_NAME)
|
StarcoderdataPython
|
1676106
|
<filename>monkNamespace.py
#!/usr/bin/python
import monkDebug as debug
import monkNode as Node
class Namespace(Node.Node):
def __init__(self, stack=[], file="", lineNumber=0, documentation=[]):
if len(stack) != 2:
debug.error("Can not parse namespace : " + str(stack))
Node.Node.__init__(self, 'namespace', stack[1], file, lineNumber, documentation)
# enable sub list
self.sub_list = []
debug.verbose("find namespace : " + self.to_str())
def to_str(self) :
return "namespace " + self.name + " { ... };"
|
StarcoderdataPython
|
6584730
|
"""
@author: <NAME>
@since: 5/11/2017
https://www.hackerrank.com/challenges/delete-a-node-from-a-linked-list/problem
Passed :)
"""
def Delete(head, position):
# position guaranteed to be in range of the list.
if position == 0: # Remember to handle the edge cases.
return head.next
node = head
for i in range(position - 1):
# Want to end the loop right before you need to remove the node.
# So loop until the position - 1th node.
node = node.next
node.next = node.next.next
return head
|
StarcoderdataPython
|
5124482
|
# __init__.py
from .responses import *
from .distributions import *
__all__ = ['responses', 'distributions']
|
StarcoderdataPython
|
11224972
|
from unittest.mock import MagicMock
import pytest
import snowflake.connector as sf
from prefect.tasks.snowflake import SnowflakeQuery
class TestSnowflakeQuery:
def test_construction(self):
task = SnowflakeQuery(
account="test", user="test", password="<PASSWORD>", warehouse="test"
)
assert task.autocommit is None
def test_query_string_must_be_provided(self):
task = SnowflakeQuery(
account="test", user="test", password="<PASSWORD>", warehouse="test"
)
with pytest.raises(ValueError, match="A query string must be provided"):
task.run()
def test_execute_error_must_pass_through(self, monkeypatch):
snowflake_module_connect_method = MagicMock()
connection = MagicMock(spec=sf.SnowflakeConnection)
cursor = MagicMock(spec=sf.DictCursor)
# link all the mocks together appropriately
snowflake_module_connect_method.return_value = connection
connection.cursor = cursor
# database cursors can be ugly to mock given the use of __enter__
cursor.return_value.__enter__.return_value.execute.side_effect = sf.DatabaseError(
"Invalid query"
)
snowflake_connector_module = MagicMock(connect=snowflake_module_connect_method)
monkeypatch.setattr(
"prefect.tasks.snowflake.snowflake.sf", snowflake_connector_module
)
task = SnowflakeQuery(
account="test", user="test", password="<PASSWORD>", warehouse="test"
)
with pytest.raises(sf.errors.DatabaseError, match="Invalid query"):
task.run(query="SELECT * FROM foo")
|
StarcoderdataPython
|
3409284
|
<reponame>Kookabura/scrapyd
import unittest
from datetime import datetime
from decimal import Decimal
from scrapy.http import Request
from scrapyd.sqlite import SqlitePriorityQueue, JsonSqlitePriorityQueue, \
PickleSqlitePriorityQueue, SqliteDict, JsonSqliteDict, PickleSqliteDict
class SqliteDictTest(unittest.TestCase):
dict_class = SqliteDict
test_dict = {'hello': 'world', 'int': 1, 'float': 1.5}
def test_basic_types(self):
test = self.test_dict
d = self.dict_class()
d.update(test)
self.failUnlessEqual(list(d.items()), list(test.items()))
d.clear()
self.failIf(d.items())
def test_in(self):
d = self.dict_class()
self.assertFalse('test' in d)
d['test'] = 123
self.assertTrue('test' in d)
def test_keyerror(self):
d = self.dict_class()
self.assertRaises(KeyError, d.__getitem__, 'test')
def test_replace(self):
d = self.dict_class()
self.assertEqual(d.get('test'), None)
d['test'] = 123
self.assertEqual(d.get('test'), 123)
d['test'] = 456
self.assertEqual(d.get('test'), 456)
class JsonSqliteDictTest(SqliteDictTest):
dict_class = JsonSqliteDict
test_dict = SqliteDictTest.test_dict.copy()
test_dict.update({'list': ['a', 'world'], 'dict': {'some': 'dict'}})
class PickleSqliteDictTest(JsonSqliteDictTest):
dict_class = PickleSqliteDict
test_dict = JsonSqliteDictTest.test_dict.copy()
test_dict.update({'decimal': Decimal("10"), 'datetime': datetime.now()})
def test_request_persistance(self):
r1 = Request("http://www.example.com", body="some")
d = self.dict_class()
d['request'] = r1
r2 = d['request']
self.failUnless(isinstance(r2, Request))
self.failUnlessEqual(r1.url, r2.url)
self.failUnlessEqual(r1.body, r2.body)
class SqlitePriorityQueueTest(unittest.TestCase):
queue_class = SqlitePriorityQueue
supported_values = ["bytes", u"\xa3", 123, 1.2, True]
def setUp(self):
self.q = self.queue_class()
def test_empty(self):
self.failUnless(self.q.pop() is None)
def test_one(self):
msg = "a message"
self.q.put(msg)
self.failIf("_id" in msg)
self.failUnlessEqual(self.q.pop(), msg)
self.failUnless(self.q.pop() is None)
def test_multiple(self):
msg1 = "first message"
msg2 = "second message"
self.q.put(msg1)
self.q.put(msg2)
out = []
out.append(self.q.pop())
out.append(self.q.pop())
self.failUnless(msg1 in out)
self.failUnless(msg2 in out)
self.failUnless(self.q.pop() is None)
def test_priority(self):
msg1 = "message 1"
msg2 = "message 2"
msg3 = "message 3"
msg4 = "message 4"
self.q.put(msg1, priority=1.0)
self.q.put(msg2, priority=5.0)
self.q.put(msg3, priority=3.0)
self.q.put(msg4, priority=2.0)
self.failUnlessEqual(self.q.pop(), msg2)
self.failUnlessEqual(self.q.pop(), msg3)
self.failUnlessEqual(self.q.pop(), msg4)
self.failUnlessEqual(self.q.pop(), msg1)
def test_iter_len_clear(self):
self.failUnlessEqual(len(self.q), 0)
self.failUnlessEqual(list(self.q), [])
msg1 = "message 1"
msg2 = "message 2"
msg3 = "message 3"
msg4 = "message 4"
self.q.put(msg1, priority=1.0)
self.q.put(msg2, priority=5.0)
self.q.put(msg3, priority=3.0)
self.q.put(msg4, priority=2.0)
self.failUnlessEqual(len(self.q), 4)
self.failUnlessEqual(list(self.q), \
[(msg2, 5.0), (msg3, 3.0), (msg4, 2.0), (msg1, 1.0)])
self.q.clear()
self.failUnlessEqual(len(self.q), 0)
self.failUnlessEqual(list(self.q), [])
def test_remove(self):
self.failUnlessEqual(len(self.q), 0)
self.failUnlessEqual(list(self.q), [])
msg1 = "good message 1"
msg2 = "bad message 2"
msg3 = "good message 3"
msg4 = "bad message 4"
self.q.put(msg1)
self.q.put(msg2)
self.q.put(msg3)
self.q.put(msg4)
self.q.remove(lambda x: x.startswith("bad"))
self.failUnlessEqual(list(self.q), [(msg1, 0.0), (msg3, 0.0)])
def test_types(self):
for x in self.supported_values:
self.q.put(x)
self.failUnlessEqual(self.q.pop(), x)
class JsonSqlitePriorityQueueTest(SqlitePriorityQueueTest):
queue_class = JsonSqlitePriorityQueue
supported_values = SqlitePriorityQueueTest.supported_values + [
["a", "list", 1],
{"a": "dict"},
]
class PickleSqlitePriorityQueueTest(JsonSqlitePriorityQueueTest):
queue_class = PickleSqlitePriorityQueue
supported_values = JsonSqlitePriorityQueueTest.supported_values + [
Decimal("10"),
datetime.now(),
]
def test_request_persistance(self):
r1 = Request("http://www.example.com", body="some")
self.q.put(r1)
r2 = self.q.pop()
self.failUnless(isinstance(r2, Request))
self.failUnlessEqual(r1.url, r2.url)
self.failUnlessEqual(r1.body, r2.body)
|
StarcoderdataPython
|
113412
|
<filename>core/utils/network/sms.py
from twilio.rest import TwilioRestClient
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "AC32a3c49700934481addd5ce1659f04d2"
auth_token = ""
client = TwilioRestClient(account_sid, auth_token)
message = client.sms.messages.create(body="Jenny please?! I love you <3",
to="+14159352345", # Replace with your phone number
from_="+14158141829") # Replace with your Twilio number
print message.sid
|
StarcoderdataPython
|
4961156
|
<reponame>marctrommen/docarchive_scan_client<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Update meta data of existing PDF document from JSON file, therfore in the
# background do ...
# ... check if PDF file exists and delete it
# ... add all PNG files as single pages to the PDF file
# ... set meta data of PDF file with content from JSON file
# -----------------------------------------------------------------------------
# AUTHOR ........ <NAME> (mailto:<EMAIL>)
# LAST CHANGE ... 2021-12-31
# -----------------------------------------------------------------------------
import os
import fpdf
import json
from PIL import Image
# -----------------------------------------------------------------------------
# file extensions
PNG_EXTENSION = ".png"
PDF_EXTENSION = ".pdf"
JSON_EXTENSION = ".json"
# -----------------------------------------------------------------------------
def get_environment():
args={}
args["SCAN_WORKING_DIRECTORY"] = os.environ["SCAN_WORKING_DIRECTORY"]
args["SCAN_DOCUMENT_ID"] = os.environ["SCAN_DOCUMENT_ID"]
return args
# -----------------------------------------------------------------------------
def get_files_from_directory(path_to_files, filter_file_extension):
filenames = []
for filename in os.listdir(path_to_files):
if filename.endswith(filter_file_extension):
filename=os.path.join(path_to_files, filename)
filenames.append(filename)
filenames.sort()
return filenames
# -----------------------------------------------------------------------------
def add_metadata_to_pdf(pdf_document, json_document):
# load document metadata from JSON file
metadata = {}
with open(json_document, "r") as fileObject:
metadata = json.load(fileObject)
if not metadata:
raise RuntimeError("JSON file should not be empty!")
# metadata
pdf_document.set_author("<NAME>")
pdf_document.set_creator("Scan Workflow with PyFPDF library")
pdf_document.set_keywords(" ".join(metadata["keywords"]))
pdf_document.set_subject(metadata["title"])
pdf_document.set_title(metadata["id"])
pdf_document.set_display_mode("fullpage", "continuous")
return
# -----------------------------------------------------------------------------
def add_scans_to_pdf(pdf_document, args):
filename_list = get_files_from_directory(args["SCAN_WORKING_DIRECTORY"], PNG_EXTENSION)
pdf_document.set_margins(left=0.0, top=0.0, right=0.0)
for filename in filename_list:
image = Image.open(filename)
width, height = image.size
if (width < height):
# format = portrait / hochkant
pdf_document.add_page(orientation = 'P')
pdf_document.image(filename, x = 0, y = 0, w = 210, h = 296, type = 'png')
else:
# format = landscap / quer
pdf_document.add_page(orientation = 'L')
pdf_document.image(filename, x = 0, y = 0, w = 296, type = 'png')
return
# -----------------------------------------------------------------------------
# main program
# -----------------------------------------------------------------------------
if __name__ == '__main__':
args = get_environment()
origin_path = args["SCAN_WORKING_DIRECTORY"]
pdf_path = os.path.join(
origin_path, args["SCAN_DOCUMENT_ID"] + PDF_EXTENSION)
json_path = os.path.join(
origin_path, args["SCAN_DOCUMENT_ID"] + JSON_EXTENSION)
pdf_document = fpdf.FPDF(orientation="P", unit="mm", format="A4")
add_metadata_to_pdf(pdf_document, json_path)
add_scans_to_pdf(pdf_document, args)
# close document
pdf_document.close()
pdf_document.output(pdf_path, 'F')
|
StarcoderdataPython
|
6519668
|
<reponame>LeKSuS-04/Capture-The-Flag
n = 0x9ffa2a58ad286990fc5fe97b669e8cb2752e81fafa5ac774ea856d8ca124089ba4b06fe21a5d588c1dcb9602838d32cd70e50b85dec21fa79944543176c7a3b8b804ab754af2978f23b09f2905103dd5a4c748df8d9e9a079a5b38f6f69051b3c6582ebc2d2d199b3a97cb7e58af79b90fe08884626d188e194816bd51960a45
e = 0x3
c = 0x10652cdfaa6a6f6f688b98219cd32ce42c4d4df94afaea31cd94dfac50678b1f50f3ab1fd389f9998b6727ffd1a2c06ee6bde21ae85daef63fd0fa694a93f3674dc3f9ea0f2e3283a3d9897137aea12458aa3b8f96c61f3bf74a510bab7e7d8b7af52290d2621f1e06e52e6a7be4896c6465
def int_to_bytes(x: int) -> bytes:
return x.to_bytes((x.bit_length() + 7) // 8, 'big')
def cube_root(n: int) -> int:
l, r = 1, n
while l < r - 1:
m = (l + r) // 2
res = m ** 3
if (res <= n):
l = m
else:
r = m
return l
m = cube_root(c)
print(int_to_bytes(m).decode())
|
StarcoderdataPython
|
6691284
|
# Copyright 2018 <NAME> and <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject
# to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import colorsys
from itertools import combinations
from colormath.color_objects import LabColor, sRGBColor
from colormath.color_conversions import convert_color
import numpy as np
import pycountry
def is_iterable(x):
""" Check if a value is iterable """
try:
iter(x)
return True
except:
return False
def flatten_list(l):
if l and is_iterable(l) and is_iterable(l[0]) and not (isinstance(l[0], str) or isinstance(l[0], unicode)):
return [item for sublist in l for item in sublist]
else:
return l
munsell_hue_labels = np.array(['R', 'YR', 'Y', 'GY', 'G', 'BG', 'B', 'PB', 'P', 'RP'])
def munsell_buckets(hues, labels = False, color = 'right', normalizer = 100.0):
"""
Returns corresponding color in munsell bucket
Source http://www.farbkarten-shop.de/media/products/0944505001412681032.pdf
:param hues: hues to discretize
:param labels: if true returns string name rather than bucket
:param normalizer: divisor constant to normalize if values not already between 0.0 and 1.0
:return: representative hue (and optionally) the label for the bucket
"""
if not is_iterable(hues):
raise ValueError("hues must be iterable")
if not color in ['left', 'mid', 'right']:
raise ValueError("color must be one of 'left', 'mid', 'right'")
munsell_bounds = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
# bring to 0.0 to 1.0 if necessary
hues = np.array(hues)
if max(hues) > 1.0:
hues = hues / normalizer
bucketed = np.digitize(hues, bins = munsell_bounds)
# make zero indexed
bucketed -= 1
# reassign values of 1 to the first bucket
bucketed[np.where(hues == 1.0)] = 0
if not labels:
return bucketed
else:
return bucketed, munsell_hue_labels[bucketed]
def _get_hue_pair_map():
""" Order agnostic mapping to munsell hues ( e.g R-P, P-R both map to same value )"""
pairs = list(combinations(munsell_hue_labels, 2))
# self maps
pairs += [(h, h) for h in munsell_hue_labels]
pairs = {p:p for p in pairs}
# reverses pairs
pairs.update({(h2, h1):mapped for (h1, h2), mapped in pairs.iteritems()})
return pairs
munsell_pair_map = _get_hue_pair_map()
def get_full_country_name(iso_code, override = None):
"""
Get country name for 2 letter iso country code used in specimen data as unicode
:param iso_code:
:param override: we may prefer some mappings, or some may be old and not in the countries data, so try override first
:return:
"""
if not override is None and iso_code in override:
return unicode(override[iso_code])
else:
return unicode(pycountry.countries.get(alpha_2 = iso_code).name)
def rgb_to_lab(r, g, b):
rgb = sRGBColor(r, g, b)
lab = convert_color(rgb, LabColor)
# scale to fit Mathematica scale
return tuple(val / 100.0 for val in lab.get_value_tuple())
def lab_to_rgb(l, a, b):
# undo the / 100.0 shown above
lab = LabColor(l * 100.0, a * 100.0, b * 100.0)
rgb = convert_color(lab, sRGBColor)
# scale to fit Mathematica scale
return tuple(rgb.get_value_tuple())
def df_rgb_to_lab(df):
rgb = list('rgb')
df = df[rgb]
f_star = lambda x: list(rgb_to_lab(*x))
return df.apply(f_star, axis=1).rename(columns=dict(zip(rgb, 'lab')))
def mat_rgb_to_lab(mat):
f_star = lambda x: list(rgb_to_lab(*x))
return np.apply_along_axis(f_star, 1, mat)
def mat_lab_to_rgb(mat):
f_star = lambda x: list(lab_to_rgb(*x))
return np.apply_along_axis(f_star, 1, mat)
def prefix(p, l):
return ['%s%s' % (p, e) for e in l]
def to_csv_str(vals):
return ','.join(map(str, vals))
|
StarcoderdataPython
|
11329532
|
directions = {
"up": {"x": 0, "y": 1},
"down": {"x": 0, "y": -1},
"right": {"x": 1, "y": 0},
"left": {"x": -1, "y": 0}
}
|
StarcoderdataPython
|
6516077
|
from django.apps import AppConfig
class LoggedConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "logged"
|
StarcoderdataPython
|
9640909
|
# # !/usr/bin/python3
# # -*- coding: utf-8 -*-
# """
# @Author : <NAME>
# @Version :
# ------------------------------------
# @File : models.py
# @Description :
# @CreateTime : 2022/2/14 12:39
# ------------------------------------
# @ModifyTime :
# """
# from tortoise import fields, Tortoise
# from tortoise.contrib.pydantic import pydantic_model_creator
# from tortoise.models import Model
#
#
# # 公共抽象模型
# class AbstractModel(Model):
# id = fields.IntField(pk=True, index=True, description="ID")
# # 软删除,0表示未删除,1表示删除
# is_delete = fields.BooleanField(description="是否已删除", default='0')
# create_time = fields.DatetimeField(auto_now_add=True, description="创建时间")
# modified_time = fields.DatetimeField(auto_now=True, description="修改时间")
#
#
# class User(AbstractModel):
# username = fields.CharField(max_length=32, description="登录名称")
# password = fields.CharField(max_length=255, description="用户密码")
# name = fields.CharField(max_length=25, description="员工姓名")
# email = fields.CharField(max_length=50, description="邮箱", null=True)
# mobile = fields.CharField(max_length=11, description="手机", null=True)
# department = fields.CharField(max_length=20, description="部门", null=True)
# avatar = fields.CharField(max_length=255, default="/static/default.jpg", description="用户头像")
# ip = fields.CharField(max_length=255, description="登录ip")
# # 状态,1表示启用,0表示停用
# is_active = fields.BooleanField(default='1', description="是否激活登录账号")
# # 状态,1表示启用,2表示停用
# status = fields.IntField(description="是否在职", default='1')
# last_login_date = fields.DatetimeField(auto_now=True, description="最后登录时间")
#
# # 查询集最大递归层级
# class PydanticMeta:
# max_recursion = 1
#
#
# class Project(AbstractModel):
# name = fields.CharField(max_length=20, description="项目名称")
# desc = fields.TextField(description="项目描述", null=True)
# status = fields.IntField(description="项目状态", default='1')
#
# # 查询集最大递归层级
# class PydanticMeta:
# max_recursion = 1
#
#
# class Story(AbstractModel):
# name = fields.CharField(max_length=20, description="需求名称")
# project = fields.ForeignKeyField('models.Project', related_name='story_router', description="项目ID")
# type = fields.CharEnumField(StoryType, default=StoryType.Demand,
# description="需求类型:Demand = '需求',Optimization = '优化',Bug = '缺陷',Other = '其他'")
# desc = fields.TextField(description="需求描述", null=True)
# story_path = fields.CharField(max_length=255, description="需求链接", null=True, default="")
# story_priority = fields.CharEnumField(PriorityType, default=PriorityType.Default,
# description="优先级:Urgent = '紧急', High = '高' ,Middle = '中',Low = '低',"
# "insignificant = '无关紧要', Default = '空'")
# status = fields.CharEnumField(ReceiveStatus, default=ReceiveStatus.Not_Started,
# description="任务状态:Not_Started = '未开始', In_Progress = '进行中', Pause = '暂停', "
# "Finished = '完成',Cancel = '取消'")
# review_time = fields.CharField(max_length=32, description="评审时间", null=True)
# confirm_time = fields.CharField(max_length=32, description="交底时间", null=True)
# deleted = fields.IntField(description="是否已删除", default='0')
# remark = fields.TextField(description="备注", null=True)
#
# class PydanticMeta:
# max_recursion = 2
#
#
# class Task(AbstractModel):
# name = fields.CharField(max_length=20, description="任务名称")
# story = fields.ForeignKeyField('models.Story', related_name='task', description="需求ID")
# task_priority = fields.CharEnumField(PriorityType, default=PriorityType.Default,
# description="优先级:Urgent = '紧急', High = '高' ,Middle = '中',Low = '低',"
# "insignificant = '无关紧要', Default = '空'")
# story_name = fields.ManyToManyField('models.Staff', related_name='task', through='task_story', description="产品员工ID")
# dev_name = fields.ManyToManyField('models.Staff', related_name='task1', through='task_dev', description="开发员工ID")
# tester_name = fields.ManyToManyField('models.Staff', related_name='task2', through='task_tester',
# description="测试员工ID")
# test_time = fields.CharField(max_length=32, description="提测时间", null=True)
# online_time = fields.CharField(max_length=32, description="上线时间", null=True)
# server = fields.CharField(max_length=255, description="发布服务", null=True)
# status = fields.CharEnumField(ReceiveStatus, default=ReceiveStatus.Not_Started,
# description="任务状态:Not_Started = '未开始', In_Progress = '进行中', Pause = "
# "'暂停', Finished = '完成',Cancel = '取消'")
# delay = fields.IntField(description="是否延期", null=True)
# deleted = fields.IntField(description="是否已删除", default='0')
# remark = fields.TextField(description="备注", null=True)
#
# class PydanticMeta:
# max_recursion = 2
#
#
# class Push(AbstractModel):
# name = fields.CharField(max_length=20, description="推送名称")
# project = fields.ForeignKeyField('models.Project', related_name='push', description="项目ID")
# receive = fields.CharEnumField(ReceiveType, default=ReceiveType.Dingding,
# description="接收方式:Dingding = '钉钉', Email = '邮件', Wechat = '微信'")
# at_name = fields.ManyToManyField(model_name='models.Staff', related_name='push', through='push_staff',
# description="通知自定义ID")
# access_token = fields.CharField(max_length=255, description="webhook", null=True)
# secret = fields.CharField(max_length=255, description="secret", null=True)
# at_all = fields.IntField(description="通知所有人", default='0')
# is_active = fields.IntField(description="是否激活", default='1')
# deleted = fields.IntField(description="是否已删除", default='0')
#
# class PydanticMeta:
# max_recursion = 2
#
#
# class DbSetting(AbstractModel):
# id = fields.IntField(pk=True)
# connect_name = fields.CharField(max_length=32, description='连接名称')
# host = fields.CharField(max_length=32)
# port = fields.IntField(description='端口')
# user = fields.CharField(max_length=32)
# password = fields.CharField(max_length=32)
# db_name = fields.CharField(max_length=512)
# env = fields.CharField(max_length=32, description='环境')
# app = fields.CharField(max_length=32, description='应用名称')
#
# class PydanticMeta:
# max_recursion = 2
#
#
# # 解决pydantic_model_creator 生成的模型中 缺少外键关联字段
# Tortoise.init_models(["db.models"], "models")
#
# # 数据库配置相关
# DbSetting_Pydantic = pydantic_model_creator(DbSetting, name="DbSetting")
# DbSettingIn_Pydantic = pydantic_model_creator(DbSetting, name="DbSettingIn", exclude_readonly=True)
#
# # 返回模型
# User_Pydantic = pydantic_model_creator(Staff, name="Staff", exclude=("password",))
#
# # 输入模型 exclude_readonly 只读字段 非必填
# UserIn_Pydantic = pydantic_model_creator(Staff, name="StaffIn", exclude=("avatar",), exclude_readonly=True)
#
# # 员工相关
# Staff_Pydantic = pydantic_model_creator(Staff, name="Staff")
# StaffIn_Pydantic = pydantic_model_creator(Staff, name="StaffIn", exclude_readonly=True)
#
# # 项目相关
# Project_Pydantic = pydantic_model_creator(Project, name="Project")
# ProjectIn_Pydantic = pydantic_model_creator(Project, name="ProjectIn", exclude_readonly=True)
#
# # 需求相关
# Story_Pydantic = pydantic_model_creator(Story, name="Story")
# StoryIn_Pydantic = pydantic_model_creator(Story, name="StoryIn", exclude_readonly=True)
#
# # 任务相关
# Task_Pydantic = pydantic_model_creator(Task, name="Task")
# TaskIn_Pydantic = pydantic_model_creator(Task, name="TaskIn", exclude_readonly=True)
#
# # 推送相关
# Push_Pydantic = pydantic_model_creator(Push, name="Push")
# PushIn_Pydantic = pydantic_model_creator(Push, name="PushIn", exclude_readonly=True)
|
StarcoderdataPython
|
4836467
|
import requests, json
from unittest import TestCase
from tests.config import *
class TestNPCAPI(TestCase):
"""Test the NPC endpoint - where it differs from the PC endpoint"""
headers = {"Content-Type": "application/json"}
player_headers = {"Content-Type": "application/json"}
@classmethod
def setUpClass(cls):
response = requests.post(LOGIN_URL, json.dumps(admin_login), headers=cls.headers)
cls.headers["auth"] = response.json()["auth"]
cls.player_headers['auth'] = requests.post(LOGIN_URL, json.dumps(player_login), headers=cls.headers).json()['auth']
response = requests.post(CREATE_NPC_URL, json.dumps(WORKING_NPC), headers=cls.headers)
cls.URL = BASE_URL + response.json()["URI"]
def test_invalid_security(self):
post_data = {"security": "not_a_level"}
response = requests.put(self.URL, json.dumps(post_data), headers=self.headers)
self.assertEqual(response.status_code, 400)
def test_get_not_allowed(self):
response = requests.get(self.URL, headers=self.player_headers)
self.assertEqual(response.status_code, 403)
def test_put_not_allowed(self):
response = requests.put(self.URL, json.dumps(WORKING_NPC), headers=self.player_headers)
self.assertEqual(response.status_code, 403)
def test_delete_not_allowed(self):
response = requests.delete(self.URL, headers=self.player_headers)
self.assertEqual(response.status_code, 403)
|
StarcoderdataPython
|
129906
|
<filename>aiida/transport/__init__.py
# -*- coding: utf-8 -*-
import aiida.common
from aiida.common.exceptions import InternalError
from aiida.common.extendeddicts import FixedFieldsAttributeDict
import os,re,fnmatch,sys # for glob commands
__copyright__ = u"Copyright (c), 2015, ECOLE POLYTECHNIQUE FEDERALE DE LAUSANNE (Theory and Simulation of Materials (THEOS) and National Centre for Computational Design and Discovery of Novel Materials (NCCR MARVEL)), Switzerland and ROBERT BOSCH LLC, USA and 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Python Software Foundation. All rights reserved."
__license__ = "MIT license, and Python license, see LICENSE.txt file"
__version__ = "0.4.0"
__contributors__ = "<NAME>, <NAME>, <NAME>"
magic_check = re.compile('[*?[]')
def TransportFactory(module):
"""
Used to return a suitable Transport subclass.
:param str module: name of the module containing the Transport subclass
:return: the transport subclass located in module 'module'
"""
from aiida.common.pluginloader import BaseFactory
return BaseFactory(module, Transport, "aiida.transport.plugins")
class FileAttribute(FixedFieldsAttributeDict):
"""
A class, resembling a dictionary, to describe the attributes of a file,
that is returned by get_attribute().
Possible keys: st_size, st_uid, st_gid, st_mode, st_atime, st_mtime
"""
_valid_fields = (
'st_size',
'st_uid',
'st_gid',
'st_mode',
'st_atime',
'st_mtime',
)
class TransportInternalError(InternalError):
"""
Raised if there is a transport error that is raised to an internal error (e.g.
a transport method called without opening the channel first).
"""
pass
class Transport(object):
"""
Abstract class for a generic transport (ssh, local, ...)
Contains the set of minimal methods
"""
# To be defined in the subclass
# See the ssh or local plugin to see the format
_valid_auth_params = None
def __init__(self, *args, **kwargs):
"""
__init__ method of the Transport base class.
"""
self._logger = aiida.common.aiidalogger.getChild('transport').getChild(
self.__class__.__name__)
self._logger_extra = None
def __enter__(self):
"""
For transports that require opening a connection, opens
all required channels (used in 'with' statements)
"""
self.open()
return self
def __exit__(self, type, value, traceback):
"""
Closes connections, if needed (used in 'with' statements).
"""
self.close()
def open(self):
"""
Opens a local transport channel
"""
raise NotImplementedError
def close(self):
"""
Closes the local transport channel
"""
raise NotImplementedError
def __repr__(self):
return '<{}: {}>'.format(self.__class__.__name__, str(self))
# redefine this in each subclass
def __str__(self):
return "[Transport class or subclass]"
def _set_logger_extra(self, logger_extra):
"""
Pass the data tha should be passed automatically to self.logger
as 'extra' keyword. This is typically useful if you pass data
obtained using get_dblogger_extra in aiida.djsite.utils, to automatically
log also to the DbLog table.
:param logger_extra: data that you want to pass as extra to the
self.logger. To write to DbLog, it should be created by the
aiida.djsite.utils.get_dblogger_extra function. Pass None if you
do not want to have extras passed.
"""
self._logger_extra = logger_extra
@classmethod
def get_short_doc(self):
"""
Return the first non-empty line of the class docstring, if available
"""
# Remove empty lines
docstring = self.__doc__
if not docstring:
return "No documentation available"
doclines = [i for i in docstring.splitlines() if i.strip()]
if doclines:
return doclines[0].strip()
else:
return "No documentation available"
@classmethod
def get_valid_transports(cls):
"""
:return: a list of existing plugin names
"""
from aiida.common.pluginloader import existing_plugins
return existing_plugins(Transport, "aiida.transport.plugins")
@classmethod
def get_valid_auth_params(cls):
"""
Return the internal list of valid auth_params
"""
if cls._valid_auth_params is None:
raise NotImplementedError
else:
return cls._valid_auth_params
@property
def logger(self):
"""
Return the internal logger.
If you have set extra parameters using _set_logger_extra(), a
suitable LoggerAdapter instance is created, bringing with itself
also the extras.
"""
try:
import logging
from aiida.djsite.utils import get_dblogger_extra
if self._logger_extra is not None:
return logging.LoggerAdapter(logger=self._logger,
extra=self._logger_extra)
else:
return self._logger
except AttributeError:
raise InternalError("No self._logger configured for {}!")
def chdir(self,path):
"""
Change directory to 'path'
:param str path: path to change working directory into.
:raises: IOError, if the requested path does not exist
:rtype: string
"""
# #TODO: understand if we want this behavior: this is emulated
# by paramiko, and we should emulate it also for the local
# transport, since we do not want a global chdir for the whole
# code (the same holds for get_pwd).
# However, it could be useful to execute by default the
# codes from that specific directory.
raise NotImplementedError
def chmod(self,path,mode):
"""
Change permissions of a path.
:param str path: path to file
:param int mode: new permissions
"""
raise NotImplementedError
def chown(self,path,uid,gid):
"""
Change the owner (uid) and group (gid) of a file.
As with python's os.chown function, you must pass both arguments,
so if you only want to change one, use stat first to retrieve the
current owner and group.
:param str path: path to the file to change the owner and group of
:param int uid: new owner's uid
:param int gid: new group id
"""
raise NotImplementedError
def copy(self,remotesource,remotedestination,*args,**kwargs):
"""
Copy a file or a directory from remote source to remote destination
(On the same remote machine)
:param str remotesource: path of the remote source directory / file
:param str remotedestination: path of the remote destination directory / file
:raises: IOError, if one of src or dst does not exist
"""
raise NotImplementedError
def copyfile(self,remotesource,remotedestination,*args,**kwargs):
"""
Copy a file from remote source to remote destination
(On the same remote machine)
:param str remotesource: path of the remote source directory / file
:param str remotedestination: path of the remote destination directory / file
:raises IOError: if one of src or dst does not exist
"""
raise NotImplementedError
def copytree(self,remotesource,remotedestination,*args,**kwargs):
"""
Copy a folder from remote source to remote destination
(On the same remote machine)
:param str remotesource: path of the remote source directory / file
:param str remotedestination: path of the remote destination directory / file
:raise IOError: if one of src or dst does not exist
"""
raise NotImplementedError
def _exec_command_internal(self,command, **kwargs):
"""
Execute the command on the shell, similarly to os.system.
Enforce the execution to be run from the cwd (as given by
self.getcwd), if this is not None.
If possible, use the higher-level
exec_command_wait function.
:param str command: execute the command given as a string
:return: stdin, stdout, stderr and the session, when this exists \
(can be None).
"""
raise NotImplementedError
def exec_command_wait(self,command, **kwargs):
"""
Execute the command on the shell, waits for it to finish,
and return the retcode, the stdout and the stderr.
Enforce the execution to be run from the pwd (as given by
self.getcwd), if this is not None.
:param str command: execute the command given as a string
:return: a list: the retcode (int), stdout (str) and stderr (str).
"""
raise NotImplementedError
def get(self, remotepath, localpath, *args, **kwargs):
"""
Retrieve a file or folder from remote source to local destination
dst must be an absolute path (src not necessarily)
:param remotepath: (str) remote_folder_path
:param localpath: (str) local_folder_path
"""
raise NotImplementedError
def getfile(self, remotepath, localpath, *args, **kwargs):
"""
Retrieve a file from remote source to local destination
dst must be an absolute path (src not necessarily)
:param str remotepath: remote_folder_path
:param str localpath: local_folder_path
"""
raise NotImplementedError
def gettree(self, remotepath, localpath, *args, **kwargs):
"""
Retrieve a folder recursively from remote source to local destination
dst must be an absolute path (src not necessarily)
:param str remotepath: remote_folder_path
:param str localpath: local_folder_path
"""
raise NotImplementedError
def getcwd(self):
"""
Get working directory
:return: a string identifying the current working directory
"""
raise NotImplementedError
def get_attribute(self,path):
"""
Return an object FixedFieldsAttributeDict for file in a given path,
as defined in aiida.common.extendeddicts
Each attribute object consists in a dictionary with the following keys:
* st_size: size of files, in bytes
* st_uid: user id of owner
* st_gid: group id of owner
* st_mode: protection bits
* st_atime: time of most recent access
* st_mtime: time of most recent modification
:param str path: path to file
:return: object FixedFieldsAttributeDict
"""
raise NotImplementedError
def get_mode(self,path):
"""
Return the portion of the file's mode that can be set by chmod().
:param str path: path to file
:return: the portion of the file's mode that can be set by chmod()
"""
import stat
return stat.S_IMODE(self.get_attribute(path).st_mode)
def isdir(self,path):
"""
True if path is an existing directory.
:param str path: path to directory
:return: boolean
"""
raise NotImplementedError
def isfile(self,path):
"""
Return True if path is an existing file.
:param str path: path to file
:return: boolean
"""
raise NotImplementedError
def listdir(self, path='.',pattern=None):
"""
Return a list of the names of the entries in the given path.
The list is in arbitrary order. It does not include the special
entries '.' and '..' even if they are present in the directory.
:param str path: path to list (default to '.')
:param str pattern: if used, listdir returns a list of files matching
filters in Unix style. Unix only.
:return: a list of strings
"""
raise NotImplementedError
def makedirs(self,path,ignore_existing=False):
"""
Super-mkdir; create a leaf directory and all intermediate ones.
Works like mkdir, except that any intermediate path segment (not
just the rightmost) will be created if it does not exist.
:param str path: directory to create
:param bool ignore_existing: if set to true, it doesn't give any error
if the leaf directory does already exist
:raises: OSError, if directory at path already exists
"""
raise NotImplementedError
def mkdir(self,path,ignore_existing=False):
"""
Create a folder (directory) named path.
:param str path: name of the folder to create
:param bool ignore_existing: if True, does not give any error if the
directory already exists
:raises: OSError, if directory at path already exists
"""
raise NotImplementedError
def normalize(self,path='.'):
"""
Return the normalized path (on the server) of a given path.
This can be used to quickly resolve symbolic links or determine
what the server is considering to be the "current folder".
:param str path: path to be normalized
:raise IOError: if the path can't be resolved on the server
"""
raise NotImplementedError
def put(self, localpath, remotepath, *args, ** kwargs):
"""
Put a file or a directory from local src to remote dst.
src must be an absolute path (dst not necessarily))
Redirects to putfile and puttree.
:param str localpath: path to remote destination
:param str remotepath: absolute path to local source
"""
raise NotImplementedError
def putfile(self, localpath, remotepath, *args, ** kwargs):
"""
Put a file from local src to remote dst.
src must be an absolute path (dst not necessarily))
:param str localpath: path to remote file
:param str remotepath: absolute path to local file
"""
raise NotImplementedError
def puttree(self, localpath, remotepath, *args, ** kwargs):
"""
Put a folder recursively from local src to remote dst.
src must be an absolute path (dst not necessarily))
:param str localpath: path to remote folder
:param str remotepath: absolute path to local folder
"""
raise NotImplementedError
def remove(self,path):
"""
Remove the file at the given path. This only works on files;
for removing folders (directories), use rmdir.
:param str path: path to file to remove
:raise IOError: if the path is a directory
"""
raise NotImplementedError
def rename(self,oldpath,newpath):
"""
Rename a file or folder from oldpath to newpath.
:param str oldpath: existing name of the file or folder
:param str newpath: new name for the file or folder
:raises IOError: if oldpath/newpath is not found
:raises ValueError: if oldpath/newpath is not a valid string
"""
raise NotImplementedError
def rmdir(self,path):
"""
Remove the folder named path.
This works only for empty folders. For recursive remove, use rmtree.
:param str path: absolute path to the folder to remove
"""
raise NotImplementedError
def rmtree(self,path):
"""
Remove recursively the content at path
:param str path: absolute path to remove
"""
raise NotImplementedError
def gotocomputer_command(self, remotedir):
"""
Return a string to be run using os.system in order to connect
via the transport to the remote directory.
Expected behaviors:
* A new bash session is opened
* A reasonable error message is produced if the folder does not exist
:param str remotedir: the full path of the remote directory
"""
raise NotImplementedError
def symlink(self,remotesource,remotedestination):
"""
Create a symbolic link between the remote source and the remote
destination.
:param remotesource: remote source
:param remotedestination: remote destination
"""
raise NotImplementedError
def whoami(self):
"""
Get the remote username
:return: list of username (str),
retval (int),
stderr (str)
"""
#TODO : add tests for this method
command = 'whoami'
retval, username, stderr = self.exec_command_wait(command)
if retval == 0:
if stderr.strip():
self.logger.warning("There was nonempty stderr in the whoami "
"command: {}".format(stderr))
return username.strip()
else:
self.logger.error("Problem executing whoami. Exit code: {}, stdout: '{}', "
"stderr: '{}'".format(retval, username, stderr))
raise IOError("Error while executing whoami. Exit code: {}".format(retval) )
def path_exists(self,path):
"""
Returns True if path exists, False otherwise.
"""
raise NotImplementedError
# The following definitions are almost copied and pasted
# from the python module glob.
def glob(self,pathname):
"""Return a list of paths matching a pathname pattern.
The pattern may contain simple shell-style wildcards a la fnmatch.
"""
return list(self.iglob(pathname))
def iglob(self,pathname):
"""Return an iterator which yields the paths matching a pathname pattern.
The pattern may contain simple shell-style wildcards a la fnmatch.
"""
if not self.has_magic(pathname):
#if os.path.lexists(pathname): # ORIGINAL
# our implementation
if self.path_exists(pathname):
yield pathname
return
dirname, basename = os.path.split(pathname)
if not dirname:
# for name in self.glob1(os.curdir, basename): # ORIGINAL
for name in self.glob1(self.getcwd(), basename):
yield name
return
if self.has_magic(dirname):
dirs = self.iglob(dirname)
else:
dirs = [dirname]
if self.has_magic(basename):
glob_in_dir = self.glob1
else:
glob_in_dir = self.glob0
for dirname in dirs:
for name in glob_in_dir(dirname, basename):
yield os.path.join(dirname, name)
# These 2 helper functions non-recursively glob inside a literal directory.
# They return a list of basenames. `glob1` accepts a pattern while `glob0`
# takes a literal basename (so it only has to check for its existence).
def glob1(self,dirname, pattern):
if not dirname:
# dirname = os.curdir # ORIGINAL
dirname = self.getcwd()
if isinstance(pattern, unicode) and not isinstance(dirname, unicode):
dirname = unicode(dirname, sys.getfilesystemencoding() or
sys.getdefaultencoding())
try:
#names = os.listdir(dirname)
#print dirname
names = self.listdir(dirname)
except os.error:
return []
except IOError:
return []
if pattern[0] != '.':
names = filter(lambda x: x[0] != '.', names)
return fnmatch.filter(names, pattern)
def glob0(self,dirname, basename):
if basename == '':
# `os.path.split()` returns an empty basename for paths ending with a
# directory separator. 'q*x/' should match only directories.
# if os.path.isdir(dirname):
if self.isdir(dirname):
return [basename]
else:
# if os.path.lexists(os.path.join(dirname, basename)):
if self.path_exists(os.path.join(dirname, basename)):
return [basename]
return []
def has_magic(self,s):
return magic_check.search(s) is not None
|
StarcoderdataPython
|
5177970
|
<reponame>Plamenna/proba<filename>python/configGenieGenerator.py
import ROOT
# configure the GenieGenerator
def config(GenieGen):
fGeo = ROOT.gGeoManager
top = fGeo.GetTopVolume()
# positions for nu events inside the nutau detector volume
muSpectrometer = top.FindNode("volMagneticSpectrometer_1")
muSpectrometerTrans = muSpectrometer.GetMatrix().GetTranslation()
# upper and lower yokes:
# volFeYoke_1, volFeYoke_2, volFeYoke1_1 (in UpYoke) and volFeYoke_3, volFeYoke_4, volFeYoke1_1 (in LowYoke).
yokes = ["volUpYoke_1","volLowYoke_1","volArm2MS_1"]
vols = ["volFeYoke_1", "volFeYoke_2", "volFeYoke1_1","volFeYoke_3", "volFeYoke_4","volIron_12","volIron_23"]
dVec = {}
box = {}
for anode in muSpectrometer.GetNodes():
if anode.GetName() in yokes:
aTrans = anode.GetMatrix().GetTranslation()
for bnode in anode.GetNodes():
if bnode.GetName() in vols:
bTrans = bnode.GetMatrix().GetTranslation()
nm = anode.GetName()+'/'+bnode.GetName()
dVec[nm] = ROOT.TVector3()
x = []
for k in range(3): x.append(aTrans[k]+bTrans[k]+muSpectrometerTrans[k])
dVec[nm].SetXYZ(x[0],x[1],x[2])
sbnode = bnode.GetVolume().GetShape()
box[nm]=ROOT.TVector3(sbnode.GetDX(),sbnode.GetDY(),sbnode.GetDZ())
print "Debug muonSpectro ",nm,dVec[nm],box[nm]
length = dVec["volArm2MS_1/volIron_23"].Z()-dVec["volArm2MS_1/volIron_12"].Z()
zpos = ( dVec["volArm2MS_1/volIron_12"].Z()+dVec["volArm2MS_1/volIron_23"].Z() )/2.
box["volArm2MS_1/volIron_12-23"] = ROOT.TVector3(box["volArm2MS_1/volIron_12"].X(),box["volArm2MS_1/volIron_12"].Y(),length)
dVec["volArm2MS_1/volIron_12-23"] = ROOT.TVector3(0,0,zpos)
rc = box.pop("volArm2MS_1/volIron_23")
rc = box.pop("volArm2MS_1/volIron_12")
if GenieGen=='debug':
for aVol in box:
print '%50s %6.2F %6.2F %6.2F %5.2F %7.2F %7.2F '%(aVol,box[aVol].X(),box[aVol].Y(),box[aVol].Z(),dVec[aVol].X(),dVec[aVol].Y(),dVec[aVol].Z())
else:
for aVol in box:
GenieGen.AddBox(dVec[aVol],box[aVol])
|
StarcoderdataPython
|
1911355
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from scaelum.registry import HOOKS
from ..hooks import Hook
@HOOKS.register_module
class DistributedTimerHelperHook(Hook):
def before_run(self, runner):
runner._timer.clean_prev_file()
def after_run(self, runner):
runner._timer.clean_prev_file()
|
StarcoderdataPython
|
81377
|
# https://docs.aws.amazon.com/code-samples/latest/catalog/python-secretsmanager-secrets_manager.py.html
import boto3
from abc import ABC
import logging
import json
class SecretsManager(ABC):
def __init__(self, secret_id: str):
self._secret_id = secret_id
self._logger = logging.getLogger(SecretsManager.__name__)
def get_secrets(self):
session = boto3.session.Session()
region_name = session.region_name
session = boto3.session.Session()
client = session.client(service_name="secretsmanager", region_name=region_name)
try:
get_secret_value_response = client.get_secret_value(
SecretId=self._secret_id
)
except Exception as e:
self._logger.error(e)
raise e
self._logger.info(f"Secret with id: {self._secret_id} recovered successfully")
secret = get_secret_value_response["SecretString"]
return json.loads(secret)
class TwitterSecretsManager(SecretsManager):
def __init__(self):
super().__init__("sentinel/api_keys/twitter")
class RedditSecretsManager(SecretsManager):
def __init__(self):
super().__init__("sentinel/api_keys/reddit")
class GoogleNewsSecretsManager(SecretsManager):
def __init__(self):
super().__init__("sentinel/api_keys/google_news")
|
StarcoderdataPython
|
295423
|
<gh_stars>1-10
import os
import shutil
import subprocess
import sys
import pytest
from kaggle_runner import utils
from kaggle_runner.runners import coordinator
@pytest.fixture(scope="module")
def runner_configs():
return [
{"port":23454, "size": 384, "network": "intercept", "AMQPURL": utils.AMQPURL()},
{"port":23454, "size": 384, "network": "intercept-resnet", "AMQPURL": utils.AMQPURL()},
]
class TestCoordinator:
coordinator = None
tmp_path = "."
@classmethod
def setup_class(cls):
cls.tmp_path = ".runners"
cls.coordinator = coordinator.Coordinator(cls.tmp_path, "Test Runner")
print("setup_class called once for the class")
@classmethod
def teardown_class(cls):
print("teardown_class called once for the class")
def setup_method(self, method):
if os.path.exists(self.tmp_path):
shutil.rmtree(self.tmp_path)
os.mkdir(self.tmp_path)
print("setup_method called for every method")
def teardown_method(self, method):
# shutil.rmtree(self.tmp_path) # for debug
print("teardown_method called for every method")
def test_generate_runner(self, runner_configs):
self.coordinator.create_runner(runner_configs[1], 19999, False)
# ret = self.coordinator.run_local(path)
# assert ret.returncode == 0
@pytest.mark.timeout(15)
def test_push_runner_nb(self, runner_configs):
path = self.coordinator.create_runner(runner_configs[1], 19999, False)
# ret = self.coordinator.run_local(path)
# assert ret.returncode == 0
if os.getenv("CI") != "true":
ret = self.coordinator.push(path) # just push first
assert ret.returncode == 0
def test_push_runner_cmd(self, runner_configs):
subprocess.run(f"python -m kaggle_runner "
f"{runner_configs[1]['port']} dev", shell=True, check=True)
@pytest.mark.timeout(10)
@pytest.mark.skip("runner runs in computation server, no need test local")
def test_get_mq_back(self, runner_configs):
path = self.coordinator.create_runner(runner_configs[1], 20202)
ret = self.coordinator.push(path)
assert ret.returncode == 0
# just use a timeout, not within then return error
self.coordinator._get_result(timeout=100)
@pytest.mark.skip("runner runs in computation server, no need test local")
def test_create_runners(self, runner_configs):
"""Should just use unit test setup and teardown
"""
for c in runner_configs:
r = self.coordinator.create_runner(c) # we need to let it run
assert r.AMQPURL is not None
class TestMain:
def test_call_remote_mq(self):
call_params = [
"python",
"main.py",
"amqp://drdsfaew:QrBHPPxbsd8IuIxKrCnX3-RGoLKaFhYI@term<EMAIL>.<EMAIL>amqp.com/drdsfaew",
"384", # size 256+128
"123",
"intercept-resnet",
]
utils.logger.debug(" ".join(call_params))
ret = subprocess.run(call_params)
assert ret.returncode == 0
@pytest.mark.skip("test done")
def test_call_local(self):
call_params = [
"python",
"main.py",
"amqp://guest:[email protected]/",
"384", # size 256+128
"123",
"intercept-resnet",
]
utils.logger.debug(" ".join(call_params))
ret = subprocess.run(call_params)
assert ret.returncode == 0
|
StarcoderdataPython
|
72769
|
<filename>hypha/apply/flags/templatetags/flag_tags.py
from django import template
register = template.Library()
@register.filter
def flagged_by(submission, user):
return submission.flagged_by(user)
@register.filter
def flagged_staff(submission):
return submission.flagged_staff
|
StarcoderdataPython
|
9670478
|
"""
24. Faça um Programa que leia 2 números e em seguida pergunte ao usuário qual operação ele deseja realizar.
O resultado da operação deve ser acompanhado de uma frase que diga se o número é:
par ou ímpar;
positivo ou negativo;
inteiro ou decimal.
"""
numeros = list()
resultado = None
while True:
if len(numeros) < 2:
numero = float(input(f'Informe o {len(numeros) + 1}º número\n'))
numeros.append(numero)
if len(numeros) == 2:
operacao = input('Qual operação deseja realizar?\n[1] +\n[2] -\n[3] /\n[4] *\n')
if operacao == '1':
resultado = numeros[0] + numeros[1]
break
elif operacao == '2':
resultado = numeros[0] - numeros[1]
break
elif operacao == '3':
resultado = numeros[0] / numeros[1]
break
elif operacao == '4':
resultado = numeros[0] * numeros[1]
break
else:
pass
print(f'O resultado da operação foi {resultado}')
if resultado % 2 == 0:
print(f'O resultado {resultado} é par')
elif resultado % 2 == 1:
print(f'O resultado {resultado} é impar')
if resultado > 0:
print(f'O resultado {resultado} é positivo')
else:
print(f'O resultado {resultado} é negativo')
if (resultado * 10) % 10 != 0:
print(f'O resultado {resultado} é decimal')
else:
print(f'O resultado {resultado} é inteiro')
|
StarcoderdataPython
|
3318221
|
<filename>scripts/read_infoclimat_data.py
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 22 15:33:13 2021
@author: User
"""
import netCDF4
import numpy as np
for year in range(1980, 2022):
# filepath = 'C:/Users/jean-/Downloads/PREC_2020.nc'
filepath = f'D:/Data/GrilleInfoClimat2021/PREC_{year}.nc'
netcdf_dset = netCDF4.Dataset(filepath, 'r+')
time = np.array(netcdf_dset['time'])
prec = np.array(netcdf_dset['PREC'])
print(year, '-----------------------')
for i in range(365):
maxval = np.max(prec[i, :, :])
if maxval > 100:
print(i, '{:0.2f}'.format(maxval))
# netcdf_dset.close()
break
|
StarcoderdataPython
|
8082800
|
<reponame>jlehrer1/ConvNeXt-lightning
import comet_ml
import pandas as pd
import pytorch_lightning as pl
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import CometLogger
from pl_bolts.datamodules import CIFAR10DataModule, ImagenetDataModule
from convnextpl import Convnext
import pathlib, os
here = pathlib.Path(__file__).parent.absolute()
key = [f.rstrip() for f in open(os.path.join(here, 'credentials'))][0]
cifar = CIFAR10DataModule()
num_classes = 10
model = Convnext(num_classes=num_classes)
cometlogger = CometLogger(
api_key = key,
project_name = 'convnext-test',
workspace = 'jlehrer1',
)
trainer = Trainer(
logger=cometlogger,
max_epochs=100000,
)
trainer.fit(model, cifar)
|
StarcoderdataPython
|
4934596
|
<gh_stars>0
import matrices as Matrices
from excel_file_extration import ExcelExtractor
def main():
file_name = "data.xlsx"
extractor = ExcelExtractor(file_name)
extracted_matrix = extractor.extract_matrix()
matrice = Matrices.Matrice(len(extracted_matrix), len(extracted_matrix[0]))
matrice.matrice = extracted_matrix
print("matrice:\n" + str(matrice))
mean = Matrices.mean(matrice)
print("mean:\n" + str(mean))
mean_matrice = Matrices.substract_mean(matrice)
print("mean matrice:\n" + str(mean_matrice))
transposee = mean_matrice.Transposee()
AAT = transposee * mean_matrice # l'ordre est inverse car on veut que les colonnes de la matrice soit les categories (agr, min, man..)
covariances = AAT / (mean_matrice.colonnes - 1) # S = AAT/n-1
print("covariances (S):\n" + str(covariances))
eigenvalues, U = Matrices.Jacobi(covariances)
print("lambdas (eigenvalues):\n" + str(eigenvalues))
print("U (eigenvectors):\n" + str(U))
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
115787
|
# -*- coding: utf-8 -*-
"""Utility functionality."""
|
StarcoderdataPython
|
3561310
|
<filename>release/stubs.min/System/Drawing/__init___parts/SystemIcons.py
class SystemIcons(object):
""" Each property of the System.Drawing.SystemIcons class is an System.Drawing.Icon object for Windows system-wide icons. This class cannot be inherited. """
Application=None
Asterisk=None
Error=None
Exclamation=None
Hand=None
Information=None
Question=None
Shield=None
Warning=None
WinLogo=None
|
StarcoderdataPython
|
11325231
|
# -*- coding: utf-8 -*-
"""
Created on Sat May 20 07:20:05 2017
@author: <NAME> <<EMAIL>>
"""
import random
def partition(A, p, r):
"""
Particiona o vetor.
"""
x = A[r]
i = p - 1
for j in range(p, r): # de p a r-1
if A[j] <= x:
i += 1
A[i], A[j] = A[j], A[i] # troca valores
A[i+1], A[r] = A[r], A[i+1] # troca valores
return i + 1
def randomPartition(A, p, r):
"""
Versão aleatória do particionamento.
"""
i = random.randint(p, r)
A[r], A[i] = A[i], A[r]
return partition(A, p, r)
def quickSort(A, p=0, r=None):
"""
Ordena o subvetor em A de p a r.
"""
if r == None:
r = len(A) - 1
if p < r:
q = partition(A, p, r)
quickSort(A, p, q-1)
quickSort(A, q+1, r)
def randomQuickSort(A, p=0, r=None):
"""
Versão aleatória do quick sort.
"""
if r == None:
r = len(A) - 1
if p < r:
q = randomPartition(A, p, r)
randomQuickSort(A, p, q-1)
randomQuickSort(A, q+1, r)
"""
Rotinas de teste
"""
test = [2, 8, 7, 1, 3, 5, 6, 4]
#quickSort(test)
randomQuickSort(test)
print(test)
|
StarcoderdataPython
|
1702278
|
#!/usr/bin/env python
# -*-coding:utf-8 -*-
import rospy
import threading
import time
import numpy as np
from modbus.modbus_nex_api import ModbusNexApi
from modbus.msg import peripheralCmd
from PyQt5 import QtWidgets, QtGui
from PyQt5.QtCore import QTimer, QThread, pyqtSignal
from main_ui import Ui_MainWindow
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
class nex_control:
def __init__(self):
self.task_cmd = 0 # init number
self.statusID = 0 # init number
self.nex_api = ModbusNexApi()
rospy.loginfo("API setting")
self.Stop_motion_flag = False
self.Start_motion_flag = False
self.pub_armstatus = rospy.Publisher("/reply_external_comm",peripheralCmd,queue_size=10)
self.sub_taskcmd = rospy.Subscriber("/write_external_comm",peripheralCmd,self.callback)
self.peripheralCmd = peripheralCmd()
def callback(self,data):
self.task_cmd = data.actionTypeID
self.statusID = data.statusID
rospy.loginfo("I heard actionTypeID is %s", data.actionTypeID)
rospy.loginfo("I heard statusID is %s", data.statusID)
def stop_arm_reset(self):
self.nex_api.stop_programs()
self.nex_api.disable_robot()
self.nex_api.send_reset(4096)
def start_arm_reset(self):
self.nex_api.send_reset_other_state(4096, 4) # reset and only reserve enable
self.nex_api.reload_all_programs()
self.nex_api.stop_programs() # reset before starting cmd
# TODO: this is not test
while not rospy.is_shutdown():
if self.nex_api.is_task_init() == True:
rospy.loginfo("reload_all_programs finished")
break #
def publish_status_running(self):
self.pub_armstatus.publish(99, 99)# hex 63, 63
def publish_status_exit(self):
self.pub_armstatus.publish(100, 100)# hex 64, 64
def holding_registers_control(self, register, value):
self.nex_api.modclient.setOutput(register,value,0)
def arm_task_program(self):
## Test all program run, can get task state
if self.nex_api.operation_mode_state() == "EXT":
self.start_arm_reset()
register = 1024
self.nex_api.modclient.setOutput(register,1,0)
start_status = self.nex_api.start_programs(0)
while not rospy.is_shutdown():
try:
# status 1003 AGV scripts close or shutdown
if self.task_cmd == 1003 and self.statusID == 99:
rospy.loginfo("Scripts break")
self.nex_api.stop_programs()
# self.stop_arm_reset()
break
else:
if start_status == True:
if self.Stop_motion_flag == False:
# stop program command
if self.task_cmd == 5:
self.task_cmd = 0
self.stop_arm_reset()
break
else:
# task is running
self.publish_status_running()
# TODO: add If arm error or stop
if self.nex_api.task_state(0) == "Task exit": #if Task exit
self.Stop_motion_flag = True
# task is exit
self.publish_status_exit()
else:
rospy.loginfo("Stop programs")
self.nex_api.stop_programs()
break
else:
break
except Exception, e:
self.Stop_motion_flag = False
rospy.logwarn("Could not running. %s", str(e))
raise e
else:
self.Stop_motion_flag = False
rospy.loginfo("Please switch to external control mode")
def rotation_task(self):
## Test all program run, can get task state
if self.nex_api.operation_mode_state() == "EXT":
self.start_arm_reset()
register = 1024
self.nex_api.modclient.setOutput(register,2,0)
start_status = self.nex_api.start_programs(0)
while not rospy.is_shutdown():
try:
# status 1003 AGV scripts close or shutdown
if self.task_cmd == 1003 and self.statusID == 99:
rospy.loginfo("Scripts break")
self.nex_api.stop_programs()
# self.stop_arm_reset()
break
else:
if start_status == True:
if self.Stop_motion_flag == False:
# stop program command
if self.task_cmd == 5:
self.task_cmd = 0
self.stop_arm_reset()
break
else:
# task is running
self.publish_status_running()
# TODO: add If arm error or stop
if self.nex_api.task_state(0) == "Task exit": #if Task exit
self.Stop_motion_flag = True
# task is exit
self.publish_status_exit()
else:
rospy.loginfo("Stop programs")
self.nex_api.stop_programs()
break
else:
break
except Exception, e:
self.Stop_motion_flag = False
rospy.logwarn("Could not running. %s", str(e))
raise e
else:
self.Stop_motion_flag = False
rospy.loginfo("Please switch to external control mode")
def pick_and_place_task(self):
## Test all program run, can get task state
if self.nex_api.operation_mode_state() == "EXT":
self.start_arm_reset()
register = 1024
self.nex_api.modclient.setOutput(register,3,0)
start_status = self.nex_api.start_programs(0)
while not rospy.is_shutdown():
try:
# status 1003 AGV scripts close or shutdown
if self.task_cmd == 1003 and self.statusID == 99:
rospy.loginfo("Scripts break")
self.nex_api.stop_programs()
# self.stop_arm_reset()
break
else:
if start_status == True:
if self.Stop_motion_flag == False:
# stop program command
if self.task_cmd == 5:
self.task_cmd = 0
self.stop_arm_reset()
break
else:
# task is running
self.publish_status_running()
# TODO: add If arm error or stop
if self.nex_api.task_state(0) == "Task exit": #if Task exit
self.Stop_motion_flag = True
# task is exit
self.publish_status_exit()
else:
rospy.loginfo("Stop programs")
self.nex_api.stop_programs()
break
else:
break
except Exception, e:
self.Stop_motion_flag = False
rospy.logwarn("Could not running. %s", str(e))
raise e
else:
self.Stop_motion_flag = False
rospy.loginfo("Please switch to external control mode")
def back_home_task(self):
## Test all program run, can get task state
if self.nex_api.operation_mode_state() == "EXT":
self.start_arm_reset()
register = 1024
self.nex_api.modclient.setOutput(register,4,0)
start_status = self.nex_api.start_programs(0)
while not rospy.is_shutdown():
try:
# status 1003 AGV scripts close or shutdown
if self.task_cmd == 1003 and self.statusID == 99:
rospy.loginfo("Scripts break")
self.nex_api.stop_programs()
# self.stop_arm_reset()
break
else:
if start_status == True:
if self.Stop_motion_flag == False:
# stop program command
if self.task_cmd == 5:
self.task_cmd = 0
self.stop_arm_reset()
break
else:
# task is running
self.publish_status_running()
# TODO: add If arm error or stop
if self.nex_api.task_state(0) == "Task exit": #if Task exit
self.Stop_motion_flag = True
# task is exit
self.publish_status_exit()
else:
rospy.loginfo("Stop programs")
self.nex_api.stop_programs()
break
else:
break
except Exception, e:
self.Stop_motion_flag = False
rospy.logwarn("Could not running. %s", str(e))
raise e
else:
self.Stop_motion_flag = False
rospy.loginfo("Please switch to external control mode")
def arm_task_sub(self):
# TODO: switch function test
for case in switch(self.task_cmd):
# start simple task_programs
if case(1):
self.task_cmd = 0
self.arm_task_program()
break
# start rotation_task
if case(2):
self.task_cmd = 0
self.rotation_task()
break
# start pick_and_place_task
if case(3):
self.task_cmd = 0
self.pick_and_place_task()
break
# back_home
if case(4):
self.task_cmd = 0
self.back_home_task()
break
# disable robot
if case(5):
self.task_cmd = 0
self.nex_api.disable_robot()
break
# reset register address 4096
if case(6):
self.task_cmd = 0
self.nex_api.send_reset(4096)
self.nex_api.reset_error_robot()
self.nex_api.send_reset(4096)
break
if case(): # default, could also just omit condition or 'if True'
# self.pub_armstatus.publish(100, 100)
if self.task_cmd == 1003 and self.statusID == 99:
rospy.loginfo("Scripts break")
self.nex_api.stop_programs()
# self.stop_arm_reset()
self.Stop_motion_flag = False
class MyThread(QThread):
callback = pyqtSignal(int, int)#自定義訊號, Qt的文件中有說明, 必需為類別變數
def __init__(self, label, delay, parent=None):
super(MyThread, self).__init__(parent)
self.runFlag = True
self.label=label
self.delay=delay
def __del__(self):
self.runFlag = False
self.wait()
def run(self):
index=0
while self.runFlag:
self.callback.emit(index, self.label)
# print(threading.currentThread().getName())
# TODO:移除index
index+=1
self.msleep(self.delay)
class StrategyThread(QThread):
callback = pyqtSignal(int, int)#自定義訊號, Qt的文件中有說明, 必需為類別變數
def __init__(self, label, delay, parent=None):
super(StrategyThread, self).__init__(parent)
self.runFlag = True
self.label=label
self.delay=delay
def __del__(self):
self.runFlag = False
self.wait()
def run(self):
index=0
while self.runFlag:
self.callback.emit(index, self.label)
# print(threading.currentThread().getName())
# TODO:移除index
index+=1
self.msleep(self.delay)
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.vel = 50
self.acc = 50
self.ui = Ui_MainWindow()
self.nex_api_ui = ModbusNexApi()
self.nex_control = nex_control()
self.mission_number = 0
self.ui.setupUi(self)
self._creat_menubar()
self.setWindowIcon(QtGui.QIcon('src/modbus/modbus/picture/teco_icon.png'))
self.ui.lineEdit_vel.setText(str(self.vel))
self.ui.lineEdit_acc.setText(str(self.acc))
self.ui.btn_reset.clicked.connect(self.reset_buttonClicked)
self.ui.btn_enable.clicked.connect(self.enable_buttonClicked)
self.ui.btn_disable.clicked.connect(self.disable_buttonClicked)
self.ui.btn_reload.clicked.connect(self.reload_buttonClicked)
self.ui.onBtn.clicked.connect(self.onBtn)
self.ui.btn_start_program.clicked.connect(self.start_buttonClicked)
self.ui.btn_vel_set.clicked.connect(self.vel_setClicked)
self.ui.btn_acc_set.clicked.connect(self.acc_setClicked)
self.ui.btn_ip_set.clicked.connect(self.ip_setClicked)
# TODO: edit thread for AGV & arm strategy
# self.thread1=StrategyThread(1, 100)
# self.thread1.callback.connect(self.drawUi)
# self.thread1.start()
# Vel. HorizontalSlider
self.ui.horizontalSlider_vel.valueChanged.connect(self.VelSliderValue)
# Acc. HorizontalSlider
self.ui.horizontalSlider_acc.valueChanged.connect(self.AccSliderValue)
# QTimer
self.timer = QTimer()
# QPushButton
self.ui.btn_start_time.clicked.connect(self.timeGo)
self.ui.btn_stop_time.clicked.connect(self.timeStop)
self.ui.btn_reset_time.clicked.connect(self.timeReset)
# Other
self.timer.timeout.connect(self.LCDEvent)
self.s = 0
# ComboBox
choices = ['None','All', 'Init', '3', 'Home','5','6']
self.ui.comboBox.addItems(choices)
self.ui.comboBox.currentIndexChanged.connect(self.display)
self.display()
self.initUi()
# Gif
self.movie = QtGui.QMovie("src/modbus/modbus/picture/earth.gif")
self.ui.label_arm_gif.setMovie(self.movie)
self.movie.start()
def initUi(self):
self.status = self.statusBar()
self.status.showMessage('Update State', 0) #状态栏本身显示的信息 第二个参数是信息停留的时间,单位是毫秒,默认是0(0表示在下一个操作来临前一直显示)
self.status.setStyleSheet("font-size: 18px;background-color: #F5E8FF")
self.safetyNum = QtWidgets.QLabel("Safety:")
self.taskNum = QtWidgets.QLabel("Task:")
self.reloadNum = QtWidgets.QLabel("Reload:")
self.safetyNum.setFixedWidth(200)
# self.safetyNum.setStyleSheet("background-color:red;font-size: 18px;border-radius: 25px;border: 1px solid black;")
self.safetyNum.setStyleSheet("font-size: 18px;border-radius: 25px;border: 1px solid black;")
self.taskNum.setFixedWidth(200)
self.taskNum.setStyleSheet("font-size: 18px;border-radius: 25px;border: 1px solid black;")
self.reloadNum.setFixedWidth(200)
self.reloadNum.setStyleSheet("font-size: 18px;border-radius: 25px;border: 1px solid black;")
self.status.addPermanentWidget(self.safetyNum, stretch=0)
self.status.addPermanentWidget(self.taskNum, stretch=0)
self.status.addPermanentWidget(self.reloadNum, stretch=0)
def _creat_menubar(self):
self.menu=self.menuBar()
file=self.menu.addMenu('File')
# file.addAction('New')
# file.addAction('Open')
# file.addAction('Close Project')
tool=self.menu.addMenu('Tool')
# tool.addAction('Python')
# tool.addAction('C++')
# tool.addAction('C')
def display(self):
self.ui.label_mission_case_show.setText('Choose:%s' % self.ui.comboBox.currentText())
if self.ui.comboBox.currentText() == "None":
task_value = 0
elif self.ui.comboBox.currentText() == "All":
task_value = 1
elif self.ui.comboBox.currentText() == "Init":
task_value = 2
elif self.ui.comboBox.currentText() == "3":
task_value = 3
elif self.ui.comboBox.currentText() == "Home":
task_value = 4
elif self.ui.comboBox.currentText() == "5":
task_value = 5
elif self.ui.comboBox.currentText() == "6":
task_value = 6
self.mission_number = task_value
def timeGo(self):
self.timer.start(100)
def timeStop(self):
self.timer.stop()
def timeReset(self):
# self.timer.reset()
self.s = 0
second = self.s/10
m_second = self.s%10
showtest = str(second) + '.' + str(m_second)
self.ui.lcdNumber.display(showtest)
def LCDEvent(self):
self.s += 1
second = self.s/10
m_second = self.s%10
showtest = str(second) + '.' + str(m_second)
self.ui.lcdNumber.display(showtest)
def reset_buttonClicked(self):
self.nex_api_ui.send_reset(4096)
self.nex_api_ui.reset_error_robot()
self.nex_api_ui.send_reset(4096)
def enable_buttonClicked(self):
self.nex_api_ui.send_reset_other_state(4096, 4) # reset and only reserve enable
def disable_buttonClicked(self):
self.nex_api_ui.disable_robot()
def reload_buttonClicked(self):
# self.nex_control.start_arm_reset()
self.nex_api_ui.send_reset_other_state(4096, 4) # reset and only reserve enable
self.nex_api_ui.reload_all_programs()
self.nex_api_ui.stop_programs() # reset before starting cmd
while not rospy.is_shutdown():
if self.nex_api_ui.is_task_init() == True:
rospy.loginfo("reload_all_programs finished")
break #
def start_buttonClicked(self):
register = 1024
value = self.mission_number
self.nex_api_ui.modclient.setOutput(register,value,0)
start_status = self.nex_api_ui.start_programs(0)
def onBtn(self, event):
self.thread1=MyThread(1, 100)
self.thread1.callback.connect(self.drawUi)
self.thread1.start()
self.thread2=MyThread(2, 100)
self.thread2.callback.connect(self.drawUi)
self.thread2.start()
def ip_setClicked(self):
text = self.ui.lineEdit_ip.text()
self.ui.label_ip.setText("Set IP:"+text)
self.ui.lineEdit_ip.clear()
self.nex_api_ui.ip_set(text)
rospy.loginfo("Setup complete")
#@<EMAIL>.pyqtSlot(int, int)
def drawUi(self, index, label):
if label==1:
# self.ui.label_task_state.setText("task state:"+self.nex_api_ui.task_state(0))
# self.ui.label_reload_state.setText("reload state:"+self.nex_api_ui.is_task_init())
self.safetyNum.setText("Safety:"+self.safety_state())
self.taskNum.setText("Task:"+self.task_state(0))
self.reloadNum.setText("Reload:"+str(self.is_task_init()))
# (0)Disable, (1)Ready, (2)Error, (3)Enable, (4)Running
if self.nex_api_ui.safety_state() == "Error":
self.safetyNum.setStyleSheet("background-color:red;font-size: 18px;border-radius: 25px;border: 1px solid black;")
else :
# ACS_actual = self.nex_api_ui.read_ACS_actual_position()
# ACS_command = self.nex_api_ui.read_ACS_command_position()
# PCS_actual = self.nex_api_ui.read_PCS_actual_position()
# PCS_command = self.nex_api_ui.read_PCS_command_position()
# self.ui.label_acs_command_show.setText("A1:"+ ACS_command.axis1 +"A2:" + ACS_command.axis2 + "A3:" + ACS_command.axis3 + "A4:"+ ACS_command.axis4 + "A5:"+ ACS_command.axis5 + "A6:" + ACS_command.axis6 )
# self.ui.label_acs_actual_show.setText("A1:"+ ACS_actual.axis1 +"A2:" + ACS_actual.axis2 + "A3:" + ACS_actual.axis3 + "A4:"+ ACS_actual.axis4 + "A5:"+ ACS_actual.axis5 + "A6:" + ACS_actual.axis6 )
# self.ui.label_pcs_command_show.setText("X:"+ PCS_command.X +"Y:" + PCS_command.Y + "Z:" + PCS_command.Z + "A:"+ PCS_command.A + "B:"+ PCS_command.B + "C:" + PCS_command.C )
# self.ui.label_pcs_actual_show.setText("X:"+ PCS_actual.X +"Y:" + PCS_actual.Y + "Z:" + PCS_actual.Z + "A:"+ PCS_actual.A + "B:"+ PCS_actual.B + "C:" + PCS_actual.C )
pass
def vel_setClicked(self):
register = 1025
self.vel = int(self.ui.lineEdit_vel.text())
self.nex_api_ui.modclient.setOutput(register,self.vel,0)
def acc_setClicked(self):
register = 1026
self.acc = int(self.ui.lineEdit_acc.text())
self.nex_api_ui.modclient.setOutput(register,self.acc,0)
def VelSliderValue(self):
self.ui.lineEdit_vel.setText(str(self.ui.horizontalSlider_vel.value()))
def AccSliderValue(self):
self.ui.lineEdit_acc.setText(str(self.ui.horizontalSlider_acc.value()))
if __name__=="__main__":
rospy.init_node("control_strategy")
nex = nex_control()
app = QtWidgets.QApplication([])
window = MainWindow()
window.show()
sys.exit(app.exec_())
# thread_ui.join()
|
StarcoderdataPython
|
1689263
|
import os
from tinydb import TinyDB
from deduplify.hash_files import hashfile, restart_run
def test_hashfile():
path = os.path.join("tests", "assets", "test_infile.json")
md5_hash, outpath = hashfile(path)
assert md5_hash == "f3fb257d843b252bdc0442402552d840"
assert outpath == path
def test_restart_run():
test_db = TinyDB(os.path.join("tests", "assets", "test_db.json"))
expected_list = ["file.txt", "file.txt", "file.txt"]
files_to_be_skipped = restart_run(test_db)
assert files_to_be_skipped == expected_list
|
StarcoderdataPython
|
11252547
|
import datetime
import discord
from discord.ext import commands
from discord.ext.commands import Bot
from cogs.core.config.config_botchannel import botchannel_check
from cogs.core.config.config_embedcolour import get_embedcolour
from cogs.core.config.config_prefix import get_prefix_string
from cogs.core.defaults.defaults_embed import get_embed_footer
from cogs.core.functions.logging import log
class serverinfo(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def serverinfo(self, ctx):
time = datetime.datetime.now()
user = ctx.author.name
if await botchannel_check(ctx):
embed = discord.Embed(
title=f"**Serverinfo für {ctx.guild.name}**",
colour=await get_embedcolour(ctx.message),
)
embed.set_thumbnail(url=ctx.guild.icon_url)
embed.add_field(name="**Name:**", value=ctx.guild.name, inline=True)
embed.add_field(name="**ID:**", value=ctx.guild.id, inline=True)
embed.add_field(name="**Region:**", value=ctx.guild.region, inline=True)
embed.add_field(
name="**Erstellt am:**",
value=ctx.guild.created_at.strftime("%d.%m.%y um %H:%M"),
inline=True,
)
embed.add_field(
name="**Besitzer:**", value=ctx.guild.owner.mention, inline=True
)
embed.add_field(
name="**Nutzerzahlen:**",
value=f"Gesamt: `{ctx.guild.member_count}`\n"
"Nutzer: "
f"`{len(list(filter(lambda m: not m.bot, ctx.guild.members)))}`\n"
"Bots: "
f"`{len(list(filter(lambda m: m.bot, ctx.guild.members)))}`\n",
inline=True,
)
embed._footer = await get_embed_footer(ctx)
await ctx.send(embed=embed)
await log(
str(time)
+ ": Der Nutzer "
+ str(user)
+ " hat den Befehl "
+ await get_prefix_string(ctx.message)
+ "serverinfo benutzt!",
ctx.guild.id,
)
else:
Bot.dispatch(self.bot, "botchannelcheck_failure", ctx)
########################################################################################################################
def setup(bot):
bot.add_cog(serverinfo(bot))
|
StarcoderdataPython
|
8046407
|
import re
import csv
import unicodecsv
import xlrd
from bs4 import BeautifulSoup
from openelex.base.load import BaseLoader
from openelex.models import RawResult
from openelex.lib.text import ocd_type_id, slugify
from .datasource import Datasource
class LoadResults(object):
"""Entry point for data loading.
Determines appropriate loader for file and triggers load process.
"""
def run(self, mapping):
election_id = mapping['election']
if 'precincts' in election_id:
loader = OHPrecinctLoader()
elif '2000' in election_id and 'primary' in election_id:
loader = OHLoader2000Primary()
elif '2008' in election_id and 'special' in election_id:
loader = OHLoader2008Special()
else:
loader = OHHTMLoader()
loader.run(mapping)
class OHBaseLoader(BaseLoader):
datasource = Datasource()
target_offices = set([
'President - Vice Pres',
'President and Vice President of the United States',
'U.S. Senate',
'U.S. Representative',
'Representative in Congress',
'Governor/Lieutenant Governor',
'Attorney General',
'Auditor of State',
'Secretary of State',
'Treasurer of State',
'State Senate',
'State Representative',
])
district_offices = set([
'U.S. Congress',
'Representative in Congress',
'State Senator',
"House of Delegates",
])
def _skip_row(self, row):
"""
Should this row be skipped?
This should be implemented in subclasses.
"""
return False
class OH2012PrecinctLoader(OHBaseLoader):
"""
Parse Ohio election results for 2012 precinct-level results files.
"""
def load(self):
with self._file_handle as xlsfile:
results = []
workbook = xlrd.open_workbook(xlsfile)
worksheet = workbook.sheet_by_name('AllCounties')
headers = worksheet.row(1)
for row in reader:
# Skip non-target offices
if self._skip_row(row):
continue
elif 'state_legislative' in self.source:
results.extend(self._prep_state_leg_results(row))
elif 'precinct' in self.source:
results.append(self._prep_precinct_result(row))
else:
results.append(self._prep_county_result(row))
RawResult.objects.insert(results)
class OH2010PrecinctLoader(OHBaseLoader):
"""
Parse Ohio election results for 2010 precinct-level results (general
and primary) contained in xlsx/xls files.
"""
def load(self):
with self._file_handle as xlsfile:
results = []
workbook = xlrd.open_workbook(xlsfile)
worksheet = workbook.sheet_by_name('AllCounties')
headers = worksheet.row(1)
for row in reader:
# Skip non-target offices
if self._skip_row(row):
continue
elif 'state_legislative' in self.source:
results.extend(self._prep_state_leg_results(row))
elif 'precinct' in self.source:
results.append(self._prep_precinct_result(row))
else:
results.append(self._prep_county_result(row))
RawResult.objects.insert(results)
def _skip_row(self, row):
return row['Office Name'].strip() not in self.target_offices
def _build_contest_kwargs(self, row, primary_type):
kwargs = {
'office': row['Office Name'].strip(),
'district': row['Office District'].strip(),
}
# Add party if it's a primary
#TODO: QUESTION: Should semi-closed also have party?
if primary_type == 'closed':
kwargs['primary_party'] = row['Party'].strip()
return kwargs
def _build_candidate_kwargs(self, row):
try:
full_name = row['Candidate Name'].strip()
except KeyError:
# 2000 results use "Candidate" for the column name
full_name = row['Candidate'].strip()
slug = slugify(full_name, substitute='-')
kwargs = {
'full_name': full_name,
#TODO: QUESTION: Do we need this? if so, needs a matching model field on RawResult
'name_slug': slug,
}
return kwargs
def _base_kwargs(self, row):
"Build base set of kwargs for RawResult"
# TODO: Can this just be called once?
kwargs = self._build_common_election_kwargs()
contest_kwargs = self._build_contest_kwargs(row, kwargs['primary_type'])
candidate_kwargs = self._build_candidate_kwargs(row)
kwargs.update(contest_kwargs)
kwargs.update(candidate_kwargs)
return kwargs
def _get_state_ocd_id(self):
# It looks like the OCD ID in this years mappings is
# ocd-division/country:us/state:oh/precinct:all
# We need to get rid of the "precinct:all" part to
# build valid OCD IDs for the individual jurisdictions.
return '/'.join(self.mapping['ocd_id'].split('/')[:-1])
def _prep_state_leg_results(self, row):
kwargs = self._base_kwargs(row)
kwargs.update({
'reporting_level': 'state_legislative',
'winner': row['Winner'].strip(),
'write_in': self._writein(row),
'party': row['Party'].strip(),
})
try:
kwargs['write_in'] = row['Write-In?'].strip() # at the contest-level
except KeyError as e:
pass
results = []
for field, val in row.items():
clean_field = field.strip()
# Legislative fields prefixed with LEGS
if not clean_field.startswith('LEGS'):
continue
kwargs.update({
'jurisdiction': clean_field,
'ocd_id': "{}/sldl:{}".format(self._get_state_ocd_id(),
ocd_type_id(clean_field)),
'votes': self._votes(val),
})
results.append(RawResult(**kwargs))
return results
def _prep_county_result(self, row):
kwargs = self._base_kwargs(row)
vote_brkdown_fields = [
('election_night_total', 'Election Night Votes'),
('absentee_total', 'Absentees Votes'),
('provisional_total', 'Provisional Votes'),
('second_absentee_total', '2nd Absentees Votes'),
]
vote_breakdowns = {}
for field, key in vote_brkdown_fields:
try:
vote_breakdowns[field] = row[key].strip()
except KeyError:
pass
kwargs.update({
'reporting_level': 'county',
'jurisdiction': self.mapping['name'],
'ocd_id': "{}/county:{}".format(self._get_state_ocd_id(),
ocd_type_id(self.mapping['name'])),
'party': row['Party'].strip(),
'votes': self._votes(row['Total Votes']),
})
if (kwargs['office'] not in self.district_offices
and kwargs['district'] != ''):
kwargs['reporting_level'] = 'congressional_district_by_county'
kwargs['reporting_district'] = kwargs['district']
del kwargs['district']
return RawResult(**kwargs)
def _prep_precinct_result(self, row):
kwargs = self._base_kwargs(row)
vote_breakdowns = {
'election_night_total': self._votes(row['Election Night Votes'])
}
precinct = "%s-%s" % (row['Election District'], row['Election Precinct'].strip())
kwargs.update({
'reporting_level': 'precinct',
'jurisdiction': precinct,
'ocd_id': "{}/precinct:{}".format(self._get_state_ocd_id(),
ocd_type_id(precinct)),
'party': row['Party'].strip(),
'votes': self._votes(row['Election Night Votes']),
'winner': row['Winner'],
'write_in': self._writein(row),
'vote_breakdowns': vote_breakdowns,
})
return RawResult(**kwargs)
def _votes(self, val):
"""
Returns cleaned version of votes or 0 if it's a non-numeric value.
"""
if val.strip() == '':
return 0
try:
return int(float(val))
except ValueError:
# Count'y convert value from string
return 0
def _writein(self, row):
# sometimes write-in field not present
try:
write_in = row['Write-In?'].strip()
except KeyError:
write_in = None
return write_in
class MDLoader2002(MDBaseLoader):
"""
Loads Maryland results for 2002.
Format:
Maryland results for 2002 are in a delimited text file where the delimiter
is '|'.
Fields:
0: Office
1: Office District - '-' is used to denote null values
2: County
3: Last Name - "zz998" is used for write-in candidates
4: Middle Name - "\N" is used to denote null values
5: First Name - "Other Write-Ins" is used for write-in candidates
6: Party
7: Winner - Value is 0 or 1
8: UNKNOWN - Values are "(Vote for One)", "(Vote for No More Than Three)", etc.
9: Votes
10: UNKNOWN - Values are "\N" for every row
Sample row:
House of Delegates |32 |<NAME> County |Burton |W. |Robert |Republican | 0|(Vote for No More Than Three) | 1494|\N
Notes:
In the general election file, there are rows for judges and for
"Statewide Ballot Questions". The columns in these rows are shifted over,
but we can ignore these rows since we're not interested in these offices.
"""
def load(self):
headers = [
'office',
'district',
'jurisdiction',
'family_name',
'additional_name',
'given_name',
'party',
'winner',
'vote_type',
'votes',
'fill2'
]
self._common_kwargs = self._build_common_election_kwargs()
self._common_kwargs['reporting_level'] = 'county'
# Store result instances for bulk loading
results = []
with self._file_handle as csvfile:
reader = unicodecsv.DictReader(csvfile, fieldnames = headers, delimiter='|', encoding='latin-1')
for row in reader:
if self._skip_row(row):
continue
rr_kwargs = self._common_kwargs.copy()
if rr_kwargs['primary_type'] == 'closed':
rr_kwargs['primary_party'] = row['party'].strip()
rr_kwargs.update(self._build_contest_kwargs(row))
rr_kwargs.update(self._build_candidate_kwargs(row))
rr_kwargs.update({
'party': row['party'].strip(),
'jurisdiction': row['jurisdiction'].strip(),
'office': row['office'].strip(),
'district': row['district'].strip(),
'votes': int(row['votes'].strip()),
})
results.append(RawResult(**rr_kwargs))
RawResult.objects.insert(results)
def _skip_row(self, row):
return row['office'].strip() not in self.target_offices
def _build_contest_kwargs(self, row):
return {
'office': row['office'].strip(),
'district': row['district'].strip(),
}
def _build_candidate_kwargs(self, row):
return {
'family_name': row['family_name'].strip(),
'given_name': row['given_name'].strip(),
'additional_name': row['additional_name'].strip(),
}
class MDLoader2000Primary(MDBaseLoader):
office_choices = [
"President and Vice President of the United States",
"U.S. Senator",
"Representative in Congress",
"Judge of the Circuit Court",
"Female Delegates and Alternate to the Democratic National Convention",
"Female Delegates to the Democratic National Convention",
"Male Delegates to the Democratic National Convention",
"Male Delegates and Alternate to the Democratic National Convention",
"Delegates to the Republican National Convention",
]
def load(self):
candidates = {}
results = []
last_office = None
last_party = None
last_district = None
common_kwargs = self._build_common_election_kwargs()
with self._file_handle as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if not len(row):
continue # Skip blank lines
# determine if this is a row with an office
office, party, district = self._parse_header(row)
if office:
# It's a header row
if office in self.target_offices:
# It's an office we care about. Save the office and
# party for the next row
last_office = office
last_party = party
last_district = district
else:
last_office = None
last_party = None
last_district = None
elif last_office and row[0] == '':
# Candidate name row
candidates, winner_name = self._parse_candidates(row)
elif last_office: # has to be a county result
new_results = self._parse_results(row, last_office,
last_party, last_district,
candidates, winner_name, common_kwargs)
results.extend(new_results)
RawResult.objects.insert(results)
def _parse_header(self, row):
"""
Returns a tuple of office and party and congressional district
if the row is a header.
Returns (None, None, None) for a non-header row.
Note that the district doesn't represent the district of the office
"""
office = self._parse_office(row)
if office:
party = self._parse_party(row)
district = self._parse_district(row)
else:
party = None
district = None
return office, party, district
def _parse_office(self, row):
for o in self.office_choices:
if o in row[0]:
return o
return None
def _parse_party(self, row):
if 'Democratic' in row[0]:
return 'Democratic'
elif 'Republican' in row[0]:
return 'Republican'
else:
return None
def _parse_district(self, row):
if 'District' not in row[0]:
return None
return re.search(r'(\d+)', row[0]).groups(0)[0]
def _parse_candidates(self, row):
candidates = []
for col in row:
if col != '':
full_name = col.strip()
if 'Winner' in full_name:
# Trim winner from candidate name
full_name, remainder = full_name.split(' Winner')
winner = full_name
candidates.append(full_name)
return candidates, winner
# TODO: QUESTION: How to handle "Uncomitted to any ..." values
def _parse_results(self, row, office, party, district, candidates,
winner_name, common_kwargs):
results = []
cols = [x.strip() for x in row if x != '']
county = cols[0].strip()
cand_results = zip(candidates, cols[1:])
for cand, votes in cand_results:
result_kwargs = common_kwargs.copy()
result_kwargs.update({
'jurisdiction': county,
'office': office,
'party': party,
'full_name': cand,
'votes': int(votes),
})
if result_kwargs['primary_type'] == 'closed':
result_kwargs['primary_party'] = party
if office == "Representative in Congress":
# In the case of U.S. representatives, the district represents
# the office district. In all other cases, it just
# represents the level of result aggregation.
result_kwargs['district'] = district
if cand == winner_name:
result_kwargs['winner'] = 'Winner'
# Try to figure out if this is a case where results are
# provided by congressional district split by county and
# record this.
result_kwargs['reporting_level'] = self._get_reporting_level(district)
if result_kwargs['reporting_level'] == 'congressional_district_by_county':
result_kwargs['reporting_district'] = district
results.append(RawResult(**result_kwargs))
return results
def _get_reporting_level(self, district):
"""
Returns the reporting level based on the value of the results' district.
This deals with the way in which results for 2000 primaries are
returned broken down by both congressional district, split by county.
"""
if district:
return "congressional_district_by_county"
else:
return "county"
class OHHTMLLoader(BaseLoader):
"""
Loader for Ohio .aspx results files
"""
datasource = Datasource()
def load(self):
table = self._get_html_table()
rows = self._parse_html_table(table)
winner_name = self._parse_winner_name(rows[0])
candidate_attrs = self._parse_candidates_and_parties(rows[0],
winner_name)
results = self._parse_results(rows[1:88], candidate_attrs)
RawResult.objects.insert(results)
def _get_html_table(self):
soup = BeautifulSoup(self._file_handle)
return soup.find(text=re.compile("Denotes winner")).parent.parent.find_all('table')[0]
def _parse_html_table(self, table):
rows = []
for tr in table.find_all('tr'):
rows.append(self._parse_html_table_row(tr))
return rows
def _parse_html_table_row(self, tr):
row = []
cells = tr.find_all('th') + tr.find_all('td')
for cell in cells:
row.append(cell.text.strip())
return row
def _parse_winner_name(self, row):
# winner is prefaced by an *
name = [x for x in row if x[0] == '*'][0]
return self._parse_name(name[1:])
def _parse_candidates_and_parties(self, row, winner_name):
candidate_attrs = []
for cell in row[1:]:
# Skip the first cell. It's a header, "County"
attrs = {
'full_name': self._parse_name(cell)
}
if attrs['full_name'] == winner_name:
attrs['contest_winner'] = True
candidate_attrs.append(attrs)
return candidate_attrs
def _parse_name(self, s):
if s == "Other Write-Ins":
return s
# We know that all the candidate names are just first and last names
bits = re.split(r'\s', s)
return ' '.join(bits[:2])
def _parse_party(self, s):
if s == "Other Write-Ins":
return None
bits = re.split(r'\s', s)
return bits[2]
def _parse_write_in(self, s):
if s == "Other Write-Ins":
return s
elif "Write-In" in s:
return "Write-In"
else:
return ""
def _parse_results(self, rows, candidate_attrs):
# These raw result attributes will be the same for every result.
common_kwargs = self._build_common_election_kwargs()
common_kwargs.update({
'office': "Representative in Congress",
'district': '4',
'reporting_level': "county",
})
results = []
for row in rows:
county = row[0]
for i in range(1, len(row)):
kwargs = common_kwargs.copy()
kwargs.update(candidate_attrs[i-1])
kwargs['jurisdiction'] = county
kwargs['votes'] = self._parse_votes(row[i])
results.append(RawResult(**kwargs))
return results
def _parse_votes(self, s):
return int(s.split(' ')[0].replace(',', ''))
|
StarcoderdataPython
|
12852299
|
<gh_stars>1-10
from django.http import HttpResponse
def index(request):
return HttpResponse(request.get_full_path())
|
StarcoderdataPython
|
3480475
|
<gh_stars>1-10
class AuthenticateError(Exception):
''' Raised when the authentication failed.'''
pass
class InputParameterError(Exception):
"""Raised when the both input options are provided."""
pass
|
StarcoderdataPython
|
6416633
|
import os
from decouple import config
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', cast=bool)
ALLOWED_HOSTS = ['localhost', '127.0.0.1', '*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'api',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
],
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 10
}
ROOT_URLCONF = 'core.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'core.wsgi.application'
# Database
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': config('DB_HOST'),
'PORT': config('DB_PORT'),
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('<PASSWORD>'),
}
}
# Password validation
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
#############################################################
# STATIC FILES
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
#############################################################
# LOGIN / LOGOUT
LOGIN_URL = 'rest_framework:login'
LOGOUT_URL = 'rest_framework:logout'
LOGIN_REDIRECT_URL = '/api/produtos/'
LOGOUT_REDIRECT_URL = '/api-auth/login/'
SETTINGS = {
'USE_SESSION_AUTH': True,
'JSON_EDITOR': True,
'REFETCH_SCHEMA_ON_LOGOUT': True,
'SECURITY_DEFINITIONS': {
'basic': {
'type': 'basic'
}
},
}
#############################################################
# LOGGIN
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'console': {
'format': '%(asctime)s %(name)s %(filename)s:%(lineno)s %(funcName)s %(levelname)s %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S',
},
'file': {
'format': '%(asctime)s %(name)s %(filename)s:%(lineno)s %(funcName)s %(levelname)s %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S',
},
},
'filters': {
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'filters': ['require_debug_true'],
'formatter': 'console',
'level': 'INFO',
},
'file': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': BASE_DIR+'/debug.log',
'formatter': 'file',
'maxBytes': 10 * 1024 * 1024, # 10MB
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
'formatter': 'console',
'filters': ['require_debug_false'],
}
},
'loggers': {
'': {
'level': config('LOGLEVEL'),
'handlers': ['console', 'file'],
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
},
'django.db.backends': {
'level': 'DEBUG',
},
},
}
|
StarcoderdataPython
|
4948377
|
<gh_stars>0
# pylint: disable=line-too-long
"""This app is based on the [`FastGridTemplate`]\
(https://panel.holoviz.org/reference/templates/FastGridTemplate.html#templates-gallery-fastgridtemplate) and the *Fast Components* provided by the
<fast-anchor href="https://awesome-panel.readthedocs.io/en/latest/packages/awesome-panel-extensions/index.html#fast" appearance="hypertext" target="_blank">awesome-panel-extensions</fast-anchor>
package.
You can install the package via `pip install awesome-panel-extensions` and use the fast components
via `from awesome_panel_extensions.frameworks import fast`.
We are also using some Panel Components with Fast like CSS styling.
## <a href="https://fast.design" target="_blank"><img src="https://explore.fast.design/e1e15bd85334e4346744078af2f52308.svg" style="vertical-align: middle; height: 32px;"></a>
Fast is the adaptive interface system for modern web experiences.
Interfaces built with FAST adapt to your design system and can be used with any modern UI Framework by leveraging industry standard Web Components.
Checkout the <fast-anchor href="https://explore.fast.design/components/fast-accordion" appearance="hypertext" target="_blank">Component Gallery</fast-anchor>.
"""
# pylint: enable=line-too-long
import holoviews as hv
import numpy as np
import panel as pn
from awesome_panel_extensions.frameworks.fast import (
FastButton,
FastCheckbox,
FastLiteralInput,
FastSwitch,
FastTextInput,
)
from holoviews import opts
from panel.template import FastGridTemplate
from awesome_panel import config
from awesome_panel.apps.echarts import EchartsApp
app = config.extension(url="fast_grid_template", template=None, intro_section=False)
ACCENT = config.ACCENT
SIDEBAR_FOOTER = config.menu_fast_html(app_html=config.app_menu_fast_html, accent=ACCENT)
HEADER = [config.get_header()]
INTRO_SECTION = app.intro_section()
ellipse_opts = opts.Ellipse(line_width=3, color=ACCENT)
def _create_hvplot():
# Generate some data
cl1 = np.random.normal(loc=2, scale=0.2, size=(200, 200))
cl2x = np.random.normal(loc=-2, scale=0.6, size=200)
cl2y = np.random.normal(loc=-2, scale=0.1, size=200)
cl3 = np.random.normal(loc=0, scale=1.5, size=(400, 400))
# Create an overlay of points and ellipses
clusters = (
hv.Points(cl1).opts(color="blue")
* hv.Points((cl2x, cl2y)).opts(color="green")
* hv.Points(cl3).opts(color="#FDDC22")
)
plot = (
clusters
* hv.Ellipse(2, 2, 2).opts(ellipse_opts)
* hv.Ellipse(-2, -2, (4, 2)).opts(ellipse_opts)
)
plot.opts(responsive=True)
return pn.Column(
pn.pane.Markdown("## HoloViews Plot"),
pn.pane.HoloViews(plot, sizing_mode="stretch_both"),
sizing_mode="stretch_both",
)
def _create_fast_button_card():
component = FastButton(name="Click me", appearance="accent")
parameters = [
"clicks",
"name",
"appearance",
"button_type",
]
widgets = {
"clicks": {"disabled": True},
}
return _create_card(component=component, parameters=parameters, widgets=widgets)
def _create_fast_checkbox_card():
component = FastCheckbox(name="Check me", value=False)
parameters = [
"value",
"name",
"readonly",
]
widgets = {
"value": FastCheckbox,
"readonly": FastCheckbox,
}
return _create_card(component=component, parameters=parameters, widgets=widgets)
def _create_fast_literal_input_card():
component = FastLiteralInput(
name="Literal Input",
type=(type, dict),
value={"a": 1, "b": "Hello Fast"},
placeholder="Enter a dict",
)
parameters = [
"value",
"name",
# "type",
"placeholder",
"appearance",
"serializer",
"readonly",
]
widgets = {
"value": FastLiteralInput,
"type": {"type": FastLiteralInput, "disabled": True},
"placeholder": FastTextInput,
"readonly": FastCheckbox,
}
return _create_card(component=component, parameters=parameters, widgets=widgets)
def _create_fast_switch_card():
component = FastSwitch(
name="Notify by Email",
value=False,
checked_message="On",
unchecked_message="Off",
)
parameters = [
"value",
"name",
"checked_message",
"unchecked_message",
"readonly",
]
widgets = {
"value": FastCheckbox,
"checked_message": FastTextInput,
"unchecked_message": FastTextInput,
"readonly": FastCheckbox,
}
return _create_card(component=component, parameters=parameters, widgets=widgets)
def _create_card(component, parameters, widgets):
component.sizing_mode = "stretch_width"
parameters = [*parameters, "disabled", "width", "height", "sizing_mode"]
widgets["name"] = FastTextInput
widgets["disabled"] = FastCheckbox
for index, name in enumerate(parameters):
component.param[name].precedence = index
component.width = 200
settings = pn.Param(
component,
parameters=parameters,
widgets=widgets,
show_name=False,
sizing_mode="stretch_width",
)
return pn.Column(
pn.pane.HTML(f"<h2>{component.__class__.name}</h2>"),
pn.Row(component, height=60),
pn.pane.HTML("<h3>Parameters</h3>"),
settings,
sizing_mode="stretch_both",
)
template = FastGridTemplate(
title="FastGridTemplate",
row_height=55,
prevent_collision=True,
save_layout=True,
accent_base_color=ACCENT,
header_background=ACCENT,
sidebar_footer=SIDEBAR_FOOTER,
header=HEADER,
)
template.main[0:9, 0:6] = INTRO_SECTION
template.main[0:9, 6:12] = _create_hvplot()
template.main[9:16, 0:12] = EchartsApp(accent=ACCENT)
template.main[16:30, 0:3] = _create_fast_button_card()
template.main[16:30, 3:6] = _create_fast_checkbox_card()
template.main[16:30, 6:9] = _create_fast_literal_input_card()
template.main[16:30, 9:12] = _create_fast_switch_card()
template.servable()
|
StarcoderdataPython
|
4919407
|
#!/usr/bin/python2.7
from flask import Flask, render_template, request, json, jsonify
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import or_
import subprocess
app = Flask (__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
db = SQLAlchemy(app)
import os, sys; sys.path.append(os.path.dirname(os.path.realpath(__file__)))
from models import *
@app.route ('/')
def homepage():
return render_template('index.html', title='Downing Jones')
@app.route('/index')
def index():
return render_template('index.html', title='Downing Jones')
@app.route('/about')
def about():
return render_template('about.html', title='About')
@app.route('/companies')
def companies():
companies = Company.query.distinct(Company.symbol)
return render_template('companies.html',
title='Companies',
companies=companies)
@app.route('/companies/<id>')
def company(id):
company = Company.query.get(id)
return render_template('company.html',
title=company.name,
company=company)
@app.route('/currencies')
def currencies():
currencies = Currency.query.distinct(Currency.currency)
return render_template('currencies.html',
title='Currencies',
currencies=currencies)
@app.route ('/currencies/<id>')
def currency(id):
currency = Currency.query.get(id)
return render_template('currency.html',
title=currency.name,
currency=currency)
@app.route('/locations')
def locations():
locations = Location.query.distinct(Location.name)
return render_template('locations.html',
title='Locations',
locations=locations)
@app.route('/locations/<id>')
def location(id):
location = Location.query.get(id)
return render_template('location.html',
title=location.name,
location=location)
@app.route ('/stockmarkets')
def stockmarkets():
markets = Exchange.query.distinct(Exchange.name)
return render_template('stockmarkets.html',
title='Exchanges',
markets=markets)
@app.route ('/stockmarkets/<id>')
def market(id):
market = Exchange.query.get(id)
return render_template('stockmarket.html',
title=market.name,
market=market)
@app.route('/api/run_tests')
def tests():
try:
process = subprocess.Popen(['python3', '/var/www/jones/salehjones.com/downing/tests.py'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
return str(out) + str(err)
except Exception as exc:
return str(exc)
@app.route('/api/exchange/<int:id>', methods=['GET'])
def get_exchange(id):
market = Exchange.query.get(id)
return jsonify(id = id,
name = market.name,
name_code = market.exchange,
market_cap = market.market_cap_exchange,
location = market.location,
currency = market.currency)
@app.route('/api/location/<int:id>', methods=['GET'])
def get_location(id):
location = Location.query.get(id)
return jsonify(id = id,
name = location.name,
iso = location.iso,
capital = location.capital,
GDP = location.gdp,
location = location.location_exchange,
currency = location.currency)
@app.route('/api/currency/<int:id>', methods=['GET'])
def get_currency(id):
currency = Currency.query.get(id)
return jsonify(id = id,
name = currency.name,
exchange_rate = currency.exchange_rate,
exchanges = currency.exchanges,
locations = currency.locations,
currency = currency.currency)
@app.route('/api/company/<int:id>', methods=['GET'])
def get_company(id):
company = Company.query.get(id)
return jsonify(id = id,
name = company.name,
symbol = company.symbol,
exchange = company.exchange,
location = company.location,
open_price = company.open_price,
previous_price = company.previous_price,
percent_change = company.percent_change,
year_high = company.year_high,
ask_price = company.ask_price,
eps = company.eps,
peg = company.peg,
days_range = company.days_range,
percent_change_fifty = company.percent_change_fifty,
percent_change_twohundred = company.percent_change_twohundred,
volume = company.volume,
avg_volume = company.avg_volume,
market_cap = company.market_cap)
@app.route('/api', methods=['GET'])
def get_entry_points():
return jsonify(exchange_url = "/exchange",
company_url = "/company",
location_url = "/location",
currency_url = "/currency")
"""
Minor routing changes for POST request
"""
@app.route('/search', methods=['GET', 'POST'])
def search():
hackString = ""
url = request.form['url']
thisString = url.split('=')
#line will take out empty spaces
thisString = [stri.strip() if len(stri) > 0 else None for stri in thisString]
andqueries = thisString
search_queryand1 = Location.query.filter(or_(Location.name.contains(andqueries[0]),Location.iso.contains(andqueries[0]), Location.capital.contains(andqueries[0]),Location.gdp.contains(andqueries[0]),Location.currency.contains(andqueries[0]), Location.location_exchange.contains(andqueries[0])))
search_queryand2 = Exchange.query.filter(or_(Exchange.exchange.contains(andqueries[0]),Exchange.name.contains(andqueries[0]),Exchange.market_cap_exchange.contains(andqueries[0]),Exchange.currency.contains(andqueries[0]),Exchange.location.contains(andqueries[0])))
search_queryand3 = Currency.query.filter(or_(Currency.name.contains(andqueries[0]),Currency.currency.contains(andqueries[0]),Currency.locations.contains(andqueries[0]),Currency.exchanges.contains(andqueries[0]), Currency.exchange_rate.contains(andqueries[0])))
search_queryand4 = Company.query.filter(or_(Company.symbol.contains(andqueries[0]),Company.name.contains(andqueries[0]),Company.exchange.contains(andqueries[0]),Company.currency.contains(andqueries[0]),Company.location.contains(andqueries[0]),Company.open_price.contains(andqueries[0]),Company.previous_price.contains(andqueries[0]),Company.percent_change.contains(andqueries[0]),Company.year_high.contains(andqueries[0]),Company.ask_price.contains(andqueries[0]),Company.eps.contains(andqueries[0]),Company.peg.contains(andqueries[0]),Company.percent_change_fifty.contains(andqueries[0]),Company.volume.contains(andqueries[0]),Company.avg_volume.contains(andqueries[0]),Company.market_cap.contains(andqueries[0])))
index = 0
for x in thisString:
hackString += thisString[index]
index = index + 1
hackString = hackString.split(" ")
queries = hackString
index2 = 0
search_queryor1 = []
search_queryor2 = []
search_queryor3 = []
search_queryor4 = []
for i in queries:
search_queryor1 += Location.query.filter(or_(Location.name.contains(queries[index2]),Location.iso.contains(queries[index2]), Location.capital.contains(queries[index2]),Location.gdp.contains(queries[index2]),Location.currency.contains(queries[index2]), Location.location_exchange.contains(queries[index2])))
search_queryor2 += Exchange.query.filter(or_(Exchange.exchange.contains(queries[index2]),Exchange.name.contains(queries[index2]),Exchange.market_cap_exchange.contains(queries[index2]),Exchange.currency.contains(queries[index2]),Exchange.location.contains(queries[index2])))
search_queryor3 += Currency.query.filter(or_(Currency.name.contains(queries[index2]),Currency.currency.contains(queries[index2]),Currency.locations.contains(queries[index2]),Currency.exchanges.contains(queries[index2]), Currency.exchange_rate.contains(queries[index2])))
search_queryor4 += Company.query.filter(or_(Company.symbol.contains(queries[index2]),Company.name.contains(queries[index2]),Company.exchange.contains(queries[index2]),Company.currency.contains(queries[index2]),Company.location.contains(queries[index2]),Company.open_price.contains(queries[index2]),Company.previous_price.contains(queries[index2]),Company.percent_change.contains(queries[index2]),Company.year_high.contains(queries[index2]),Company.ask_price.contains(queries[index2]),Company.eps.contains(queries[index2]),Company.peg.contains(queries[index2]),Company.percent_change_fifty.contains(queries[index2]),Company.volume.contains(queries[index2]),Company.avg_volume.contains(queries[index2]),Company.market_cap.contains(queries[index2])))
index2 = index2 + 1
return render_template('search.html', queries = queries, queriesand1 = search_queryand1, queriesand2 = search_queryand2, queriesand3 = search_queryand3, queriesand4 = search_queryand4, queriesor1 = search_queryor1, queriesor2= search_queryor2, queriesor3 =search_queryor3, queriesor4 = search_queryor4 ,title="Search")
|
StarcoderdataPython
|
3315358
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Functions to create expect scripts
"""
from __future__ import print_function, absolute_import, with_statement
import re
import os
import subprocess
import pexpect
import struct
import fcntl
import termios
import signal
import sys
import requests
import hammercloud
from hammercloud.templates import env
def virtual():
return True
# pylint: disable=no-method-argument
class DevNull(object):
'''
don't print out log stuff from pexpect
'''
def write(*args):
'''
fake write
'''
pass
def flush(*args):
'''
fake flush
'''
pass
class Shell(object):
def __init__(self, constants):
self.constants = constants
def host(self, logininfo):
'''
login to hypervisor
'''
raise NotImplemented('Function not implemented: {0}'.format('host'))
def ssh(self, logininfo):
'''
ssh into managed cloud server using /usr/bin/env expect
'''
template = env.get_template('ssh.jinja')
script_path = logininfo.script_path
with open(script_path, 'w') as expectfile:
print(template.render(login=logininfo, consts=self.constants), file=expectfile)
os.chmod(script_path, 448)
return logininfo.hostname
def sftp(self, logininfo, transfer, src, dest, quiet=False, executable=False):
'''
ssh into managed cloud server using /usr/bin/env expect
'''
if not logininfo.admin_password:
raise Exception('Unmanaged Cloud Server: no rack password')
child = pexpect.spawn(
(u'sftp -o PubkeyAuthentication=no -o RSAAuthentication=no '
u'-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null '
u'-o GSSAPIAuthentication=no -P {login.port} {login.ssh_args} '
u'{login.ssh_user}@{login.ip}').format(login=logininfo),
timeout=logininfo.timeout,
logfile=DevNull() if quiet is True else sys.stdout,
echo=False,
encoding='utf-8'
)
child.expect([u'.*[P|p]assword: '])
child.sendline(logininfo.admin_password)
child.expect(u"sftp>")
child.sendline(u"{0} {1} {2}".format(transfer, src, dest))
if executable is True:
child.expect(u"sftp>")
child.sendline(u"chmod 755 {0}".format(dest))
child.expect(u"sftp>")
child.sendeof()
def cmd(self, logininfo, returnresp=False):
'''
run a command on a server
'''
if not logininfo.admin_password:
raise Exception('Unmanaged Cloud Server: no rack password')
thescript = []
thescript.append(u'ssh -ttt -l {login.ssh_user} -p '
u'{login.port} -o NumberOfPasswordPrompts=1 '
u'-o StrictHostKeyChecking=no -o '
u'UserKnownHostsFile=/dev/null -o '
u'GSSAPIAuthentication=no {login.ssh_args} '
u'{login.ip} -- ')
if logininfo.skip_root:
thescript.append(u'"{0}"'.format(logininfo.command))
else:
thescript.append(u'"sudo -k {0}"'.format(logininfo.command))
result = pexpect.run(
u''.join(thescript).format(login=logininfo),
timeout=-1,
withexitstatus=True,
events={
u'{login.ssh_user}@{login.ip}\'s password:'.format(login=logininfo):
u'{login.admin_password}\n'.format(login=logininfo),
u'Password:': u'{login.admin_password}\n'.format(login=logininfo),
u'password for {login.ssh_user}:'.format(login=logininfo):
u'{login.admin_password}\n'.format(login=logininfo)
}
)
response = re.sub(u'.*Warning: Permanently.*', '', result[0].decode('utf-8'))
response = re.sub(
u'.*{login.ssh_user}@{login.ip}\'s password:'.format(login=logininfo),
'',
response
)
response = re.sub(
u'.*password for {login.ssh_user}:'.format(login=logininfo),
'',
response
)
response = re.sub(u'.*Password:.*', '', response)
response = re.sub(u'Connection to .* closed.', '', response)
if returnresp is True:
return response.strip()
print('### {0} ###'.format(logininfo.hostname))
print('\n{0}\n'.format(response.strip()))
def script(self, logininfo, filepath):
'''
run script on managed cloud server using /usr/bin/env expect
'''
if not logininfo.admin_password:
raise Exception('Unmanaged Cloud Server: no rack password')
if '/' in filepath:
logininfo.script = filepath.split('/')[-1]
else:
logininfo.script = filepath
if filepath.startswith('https://'):
newpath = os.path.expanduser(
'~/.cache/hammercloud/{login.script}'.format(login=logininfo)
)
if not os.path.exists(newpath):
with open(newpath, 'w') as newfile:
resp = requests.get(filepath)
print(resp.content, file=newfile)
filepath = newpath
sftp(
logininfo, 'put', filepath, logininfo.script,
quiet=True, executable=True
)
command = '/home/{login.ssh_user}/{login.script} {login.extraargs}; '
if not logininfo.no_clean:
command += 'rm /home/{login.ssh_user}/{login.script}'
logininfo.command = command
cmd(logininfo)
def setup_dc(self, datacenter, bastion_key, bastion_user):
'''
ssh-copy-id ssh keys to bastions
'''
output = []
output.append('#!/usr/bin/env expect\n')
output.append('exec rm -f $argv0\n')
output.append('set timeout {timeout}\n'.format(timeout=30))
output.append('log_user 0\n')
output.append('match_max 100000\n')
for cbast in self.constants.CBASTS[datacenter]:
output.append((
'spawn -noecho ssh-copy-id -i {bastion_key} '
'-o RSAAuthentication=no -o StrictHostKeyChecking=no '
'-o UserKnownHostsFile=/dev/null -o ProxyCommand=none '
'-o GSSAPIAuthentication=no {username}@{datacenter}\n'
).format(bastion_key=bastion_key, username=bastion_user, datacenter=cbast))
output.append('match_max 100000\n')
output.append('interact {\n')
output.append('\t\\034 exit\n')
output.append('}\n')
confdir = '{0}/.cache/hammercloud/'.format(os.path.expanduser('~'))
script_path = '{0}/{1}1.sh'.format(confdir, datacenter)
with open(script_path, 'w') as fh_:
fh_.write("".join(output))
os.chmod(script_path, 448)
subprocess.call(script_path)
def bastion(self, datacenter, bastion_key, bastion_user):
'''
ssh to the bastion in the datacenter
'''
output = []
output.append('#!/usr/bin/env expect\n')
output.append('exec rm -f $argv0\n')
output.append('set timeout {timeout}\n'.format(timeout=30))
output.append('log_user 0\n')
output.append('match_max 100000\n')
output.append((
'spawn -noecho ssh -i {bastion_key} '
'-o RSAAuthentication=no -o StrictHostKeyChecking=no '
'-o UserKnownHostsFile=/dev/null -o ProxyCommand=none '
'-o GSSAPIAuthentication=no {username}@{datacenter}\n'
).format(bastion_key=bastion_key, username=bastion_user, datacenter=self.constants.DCS[datacenter]))
output.append('match_max 100000\n')
output.append('interact {\n')
output.append('\t\\034 exit\n')
output.append('}\n')
confdir = '{0}/.cache/hammercloud/'.format(os.path.expanduser('~'))
script_path = '{0}/{1}1.sh'.format(confdir, datacenter)
with open(script_path, 'w') as fh_:
fh_.write("".join(output))
os.chmod(script_path, 448)
subprocess.call(script_path)
|
StarcoderdataPython
|
6653043
|
<gh_stars>0
"""
Credits:
Copyright (c) 2017-2022 <NAME>, <NAME>, <NAME>, <NAME>, <NAME> (Sinergise)
Copyright (c) 2017-2022 <NAME>, <NAME>, <NAME>, <NAME>, <NAME> (Sinergise)
Copyright (c) 2017-2019 <NAME>, <NAME> (Sinergise)
This source code is licensed under the MIT license found in the LICENSE
file in the root directory of this source tree.
"""
import concurrent.futures
import datetime as dt
import pytest
import numpy as np
from eolearn.core import (
EOPatch,
EOTask,
EONode,
EOWorkflow,
OutputTask,
WorkflowResults,
FeatureType,
InitializeFeatureTask,
RemoveFeatureTask,
CreateEOPatchTask,
)
from eolearn.core.eoworkflow import NodeStats
class CustomException(ValueError):
pass
class InputTask(EOTask):
def execute(self, *, val=None):
return val
class DivideTask(EOTask):
def execute(self, x, y, *, z=0):
return x / y + z
class IncTask(EOTask):
def execute(self, x, *, d=1):
return x + d
class ExceptionTask(EOTask):
def execute(self, *_, **__):
raise CustomException
def test_workflow_arguments():
input_node1 = EONode(InputTask())
input_node2 = EONode(InputTask(), name="some name")
divide_node = EONode(DivideTask(), inputs=(input_node1, input_node2), name="some name")
output_node = EONode(OutputTask(name="output"), inputs=[divide_node])
workflow = EOWorkflow([input_node1, input_node2, divide_node, output_node])
with concurrent.futures.ProcessPoolExecutor(max_workers=5) as executor:
k2future = {
k: executor.submit(workflow.execute, {input_node1: {"val": k**3}, input_node2: {"val": k**2}})
for k in range(2, 100)
}
executor.shutdown()
for k in range(2, 100):
assert k2future[k].result().outputs["output"] == k
result1 = workflow.execute({input_node1: {"val": 15}, input_node2: {"val": 3}})
assert result1.outputs["output"] == 5
result2 = workflow.execute({input_node1: {"val": 6}, input_node2: {"val": 3}})
assert result2.outputs["output"] == 2
result3 = workflow.execute({input_node1: {"val": 6}, input_node2: {"val": 3}, divide_node: {"z": 1}})
assert result3.outputs[output_node.task.name] == 3
def test_get_nodes():
in_node = EONode(InputTask())
inc_node0 = EONode(IncTask(), inputs=[in_node])
inc_node1 = EONode(IncTask(), inputs=[inc_node0])
inc_node2 = EONode(IncTask(), inputs=[inc_node1])
output_node = EONode(OutputTask(name="out"), inputs=[inc_node2])
eow = EOWorkflow([in_node, inc_node0, inc_node1, inc_node2, output_node])
returned_nodes = eow.get_nodes()
assert [
in_node,
inc_node0,
inc_node1,
inc_node2,
output_node,
] == returned_nodes, "Returned nodes differ from original nodes"
arguments_dict = {in_node: {"val": 2}, inc_node0: {"d": 2}}
workflow_res = eow.execute(arguments_dict)
manual_res = []
for _, node in enumerate(returned_nodes):
manual_res = [node.task.execute(*manual_res, **arguments_dict.get(node, {}))]
assert workflow_res.outputs["out"] == manual_res[0], "Manually running returned nodes produces different results."
def test_get_node_with_uid():
in_node = EONode(InputTask())
inc_node = EONode(IncTask(), inputs=[in_node])
output_node = EONode(OutputTask(name="out"), inputs=[inc_node])
eow = EOWorkflow([in_node, inc_node, output_node])
assert all(node == eow.get_node_with_uid(node.uid) for node in (in_node, inc_node, output_node))
assert eow.get_node_with_uid("nonexsitant") is None
with pytest.raises(KeyError):
eow.get_node_with_uid("nonexsitant", fail_if_missing=True)
@pytest.mark.parametrize(
"faulty_parameters",
[
[InputTask(), IncTask(), IncTask()],
EONode(InputTask()),
[EONode(IncTask()), IncTask()],
[EONode(IncTask()), (EONode(IncTask()), "name")],
[EONode(IncTask()), (EONode(IncTask(), inputs=[EONode(IncTask())]))],
[EONode(IncTask()), (EONode(IncTask()), IncTask())],
],
)
def test_input_exceptions(faulty_parameters):
with pytest.raises(ValueError):
EOWorkflow(faulty_parameters)
def test_bad_structure_exceptions():
in_node = EONode(InputTask())
inc_node0 = EONode(IncTask(), inputs=[in_node])
inc_node1 = EONode(IncTask(), inputs=[inc_node0])
inc_node2 = EONode(IncTask(), inputs=[inc_node1])
output_node = EONode(OutputTask(name="out"), inputs=[inc_node2])
# This one must work
EOWorkflow([in_node, inc_node0, inc_node1, inc_node2, output_node])
# Duplicated node
with pytest.raises(ValueError):
EOWorkflow([in_node, inc_node0, inc_node0, inc_node1, inc_node2, output_node])
# Missing node
with pytest.raises(ValueError):
EOWorkflow([in_node, inc_node0, inc_node2, output_node])
# Create circle (much more difficult now)
super(EONode, inc_node0).__setattr__("inputs", (inc_node1,))
with pytest.raises(ValueError):
EOWorkflow([in_node, inc_node0, inc_node1, inc_node2, output_node])
def test_multiedge_workflow():
in_node = EONode(InputTask())
inc_node = EONode(IncTask(), inputs=[in_node])
div_node = EONode(DivideTask(), inputs=[inc_node, inc_node])
output_node = EONode(OutputTask(name="out"), inputs=[div_node])
workflow = EOWorkflow([in_node, output_node, inc_node, div_node])
arguments_dict = {in_node: {"val": 2}}
workflow_res = workflow.execute(arguments_dict)
assert workflow_res.outputs["out"] == 1
def test_workflow_copying_eopatches():
feature1 = FeatureType.DATA, "data1"
feature2 = FeatureType.DATA, "data2"
create_node = EONode(CreateEOPatchTask())
init_node = EONode(
InitializeFeatureTask([feature1, feature2], shape=(2, 4, 4, 3), init_value=1),
inputs=[create_node],
)
remove_node1 = EONode(RemoveFeatureTask([feature1]), inputs=[init_node])
remove_node2 = EONode(RemoveFeatureTask([feature2]), inputs=[init_node])
output_node1 = EONode(OutputTask(name="out1"), inputs=[remove_node1])
output_node2 = EONode(OutputTask(name="out2"), inputs=[remove_node2])
workflow = EOWorkflow([create_node, init_node, remove_node1, remove_node2, output_node1, output_node2])
results = workflow.execute()
eop1 = results.outputs["out1"]
eop2 = results.outputs["out2"]
assert eop1 == EOPatch(data={"data2": np.ones((2, 4, 4, 3), dtype=np.uint8)})
assert eop2 == EOPatch(data={"data1": np.ones((2, 4, 4, 3), dtype=np.uint8)})
def test_workflows_reusing_nodes():
in_node = EONode(InputTask())
node1 = EONode(IncTask(), inputs=[in_node])
node2 = EONode(IncTask(), inputs=[node1])
out_node = EONode(OutputTask(name="out"), inputs=[node2])
input_args = {in_node: {"val": 2}, node2: {"d": 2}}
original = EOWorkflow([in_node, node1, node2, out_node])
node_reuse = EOWorkflow([in_node, node1, node2, out_node])
assert original.execute(input_args).outputs["out"] == node_reuse.execute(input_args).outputs["out"]
def test_workflow_results():
input_node = EONode(InputTask())
output_node = EONode(OutputTask(name="out"), inputs=[input_node])
workflow = EOWorkflow([input_node, output_node])
results = workflow.execute({input_node: {"val": 10}})
assert isinstance(results, WorkflowResults)
assert results.outputs == {"out": 10}
results_without_outputs = results.drop_outputs()
assert results_without_outputs.outputs == {}
assert id(results_without_outputs) != id(results)
assert isinstance(results.start_time, dt.datetime)
assert isinstance(results.end_time, dt.datetime)
assert results.start_time < results.end_time < dt.datetime.now()
assert isinstance(results.stats, dict)
assert len(results.stats) == 2
for node in [input_node, output_node]:
stats_uid = node.uid
assert isinstance(results.stats.get(stats_uid), NodeStats)
def test_workflow_from_endnodes():
input_node1 = EONode(InputTask())
input_node2 = EONode(InputTask(), name="<NAME>")
divide_node = EONode(DivideTask(), inputs=(input_node1, input_node2), name="<NAME>")
output_node = EONode(OutputTask(name="out"), inputs=[divide_node])
regular_workflow = EOWorkflow([input_node1, input_node2, divide_node, output_node])
endnode_workflow = EOWorkflow.from_endnodes(output_node)
assert isinstance(endnode_workflow, EOWorkflow)
assert set(endnode_workflow.get_nodes()) == set(regular_workflow.get_nodes()), "Nodes are different"
with concurrent.futures.ProcessPoolExecutor(max_workers=5) as executor:
regular_results = [
executor.submit(regular_workflow.execute, {input_node1: {"val": k**3}, input_node2: {"val": k**2}})
for k in range(2, 100)
]
endnode_results = [
executor.submit(endnode_workflow.execute, {input_node1: {"val": k**3}, input_node2: {"val": k**2}})
for k in range(2, 100)
]
executor.shutdown()
assert all(
x.result().outputs["out"] == y.result().outputs["out"] for x, y in zip(regular_results, endnode_results)
)
endnode_duplicates = EOWorkflow.from_endnodes(output_node, output_node, divide_node)
assert set(endnode_duplicates.get_nodes()) == set(regular_workflow.get_nodes()), "Fails if endnodes are repeated"
def test_exception_handling():
input_node = EONode(InputTask(), name="xyz")
exception_node = EONode(ExceptionTask(), inputs=[input_node])
increase_node = EONode(IncTask(), inputs=[exception_node])
workflow = EOWorkflow([input_node, exception_node, increase_node])
with pytest.raises(CustomException):
workflow.execute()
results = workflow.execute(raise_errors=False)
assert results.outputs == {}
assert results.error_node_uid == exception_node.uid
assert len(results.stats) == 2
for node in [input_node, exception_node]:
node_stats = results.stats[node.uid]
assert node_stats.node_uid == node.uid
assert node_stats.node_name == node.name
if node is exception_node:
assert isinstance(node_stats.exception, CustomException)
assert node_stats.exception_traceback.startswith("Traceback")
else:
assert node_stats.exception is None
assert node_stats.exception_traceback is None
|
StarcoderdataPython
|
8015293
|
<gh_stars>100-1000
import os
import FWCore.ParameterSet.Config as cms
from Alignment.APEEstimation.ApeEstimatorSummary_cfi import *
ApeEstimatorSummaryBaseline = ApeEstimatorSummary.clone(
setBaseline = True,
apeWeight = "entriesOverSigmaX2",
#sigmaFactorFit = 2.5,
)
ApeEstimatorSummaryIter = ApeEstimatorSummary.clone(
#setBaseline = False,
apeWeight = "entriesOverSigmaX2",
#sigmaFactorFit = 2.5,
correctionScaling = 0.6,
)
|
StarcoderdataPython
|
3462910
|
# Generated by Django 2.2.7 on 2020-03-27 07:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('forum', '0072_communityinvitation'),
]
operations = [
migrations.AddField(
model_name='community',
name='nice_name',
field=models.CharField(blank=True, max_length=100, null=True),
),
]
|
StarcoderdataPython
|
1977509
|
<reponame>subhadarship/nlp4if-2021
import logging
from typing import List
from data_utils import LabelField
logger = logging.getLogger(__name__)
def postprocess_labels(labels: List[List[int]], label_fields: List[LabelField]) -> List[List[str]]:
"""Postprocess labels. First convert ints to corresponding strings.
Then apply sanity postprocessing for q2, q3, q4, q5"""
out = []
for li in labels:
out.append([label_fields[idx].itos[li[idx]] for idx in range(7)])
out = sanity_postprocess(out)
return out
def sanity_postprocess(inp_list: List[List[str]]) -> List[List[str]]:
"""Change q2, q3, q4, q5 predictions based on q1's prediction"""
postprocessed = []
num_total, num_changed = 0, 0
for row in inp_list:
post = []
q1_pred = row[0]
for col_idx, item in enumerate(row):
if col_idx in [1, 2, 3, 4]:
num_total += 1
if q1_pred == 'no':
post.append('nan')
if item != 'nan':
num_changed += 1
else:
post.append(item)
else:
post.append(item)
postprocessed.append(post)
logger.info(
f'{num_changed} ({100 * num_changed / num_total:0.2f} %) out of {num_total} q2, q3, q4, q5 predictions are changed during postprocessing')
return postprocessed
|
StarcoderdataPython
|
3227576
|
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Noop driver for manually executing OOB tasks."""
import time
import logging
from oslo_config import cfg
import drydock_provisioner.error as errors
import drydock_provisioner.objects.fields as hd_fields
import drydock_provisioner.drivers.oob.driver as oob
class ManualDriver(oob.OobDriver):
oob_types_supported = ['manual']
def __init__(self, **kwargs):
super(ManualDriver, self).__init__(**kwargs)
self.driver_name = "manual_driver"
self.driver_key = "manual_driver"
self.driver_desc = "Manual (Noop) OOB Driver"
self.logger = logging.getLogger(cfg.CONF.logging.oobdriver_logger_name)
def execute_task(self, task_id):
task = self.state_manager.get_task(task_id)
if task is None:
self.logger.error("Invalid task %s" % (task_id))
raise errors.DriverError("Invalid task %s" % (task_id))
if task.action not in self.supported_actions:
self.logger.error("Driver %s doesn't support task action %s" %
(self.driver_desc, task.action))
raise errors.DriverError(
"Driver %s doesn't support task action %s" % (self.driver_desc,
task.action))
design_ref = task.design_ref
if design_ref is None:
raise errors.DriverError("No design ID specified in task %s" %
(task_id))
self.orchestrator.task_field_update(
task.get_id(), status=hd_fields.TaskStatus.Running)
self.logger.info("Sleeping 60s to allow time for manual OOB %s action"
% task.action)
time.sleep(60)
task.set_status(hd_fields.TaskStatus.Complete)
task.success()
task.save()
return
|
StarcoderdataPython
|
5125264
|
<filename>AnimationPlot.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import animatplot as amp
#グラフの描画
def plot_animation(ref_df):
#X軸・Y軸のデータ取得
X_data = 0
Y_data = 0
#refの経路描画
time_data_np = np.array(ref_df["time"])
x_np = np.array(ref_df["x"])
y_np = np.array(ref_df["y"])
sensor_1_np = np.array(ref_df["sensor1"])
sensor_2_np = np.array(ref_df["sensor2"])
sensor_3_np = np.array(ref_df["sensor3"])
Xs_log =np.asarray([x_np[t:t+10] for t in range(len(time_data_np)-10)]) #X軸データ × 時間軸 分の配列
Ys_log =[y_np[t:t+10] for t in range(len(time_data_np)-10)] #Y軸データ × 時間軸 分の配列
sensor_1_log =[sensor_1_np[t:t+10] for t in range(len(time_data_np)-10)]
sensor_2_log =[sensor_2_np[t:t+10] for t in range(len(time_data_np)-10)]
sensor_3_log =[sensor_3_np[t:t+10] for t in range(len(time_data_np)-10)]
Time_log =np.asarray([time_data_np[t:t+10] for t in range(len(time_data_np)-10)])
#subplotの描画 (X-Yの情報を3行分の画面で表示)
ax1 = plt.subplot2grid((3,2), (0,0), rowspan=3)
ax2 = plt.subplot2grid((3,2), (0,1))
ax3 = plt.subplot2grid((3,2), (1,1))
ax4 = plt.subplot2grid((3,2), (2,1))
ax1.set_xlim([x_np.min(), x_np.max()]) #描画範囲の設定
ax1.set_ylim([y_np.min(),y_np.max()]) #描画範囲の設定
block = amp.blocks.Scatter(Xs_log, Ys_log,label="X_Y",ax=ax1)
block2 = amp.blocks.Line(Time_log, sensor_1_log, label="sensor1",ax=ax2)
block3 = amp.blocks.Line(Time_log, sensor_2_log, label="sensor2",ax=ax3)
block4 = amp.blocks.Line(Time_log, sensor_3_log, label="sensor3",ax=ax4)
ax2.set_xlim([time_data_np.min(), time_data_np.max()]) #描画範囲の設定
ax2.set_ylim([sensor_1_np.min(),sensor_1_np.max()]) #描画範囲の設定
ax3.set_xlim([time_data_np.min(), time_data_np.max()]) #描画範囲の設定
ax3.set_ylim([sensor_1_np.min(),sensor_1_np.max()]) #描画範囲の設定
ax4.set_xlim([time_data_np.min(), time_data_np.max()]) #描画範囲の設定
ax4.set_ylim([sensor_1_np.min(),sensor_1_np.max()]) #描画範囲の設定
ax1.legend()
ax2.legend()
ax3.legend()
ax4.legend()
plt.subplots_adjust(wspace=0.4, hspace=0.6)
anim = amp.Animation([block,block2,block3,block4])
anim.controls()
anim.save_gif("result")
plt.show()
if __name__ == '__main__':
csv_file_path = "data\\plotdata.csv"
#CSVの読み込み
ref_df = pd.read_csv(csv_file_path, encoding="utf-8-sig") #日本語データ(Shift-Jis)を含む場合を想定
plot_animation(ref_df)
print("finished!")
|
StarcoderdataPython
|
4870095
|
<reponame>bjascob/SmartLMVocabs
#!/usr/bin/python3
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
from fnmatch import fnmatch
from subprocess import Popen, PIPE
from tflmlib import DataContainer
from tflmlib import ProgressBar
from configs import config
# Billion Word Corpus
bw_pkl_dir = os.path.join(config.bw_corpus, 'BWParsed_FirstPass')
out_txt_dir = os.path.join(config.bw_corpus, 'BWTokenized')
out_pkl_dir = os.path.join(config.bw_corpus, 'BWParsed')
bw_fn_pat = 'bw_*'
if __name__ == '__main__':
print('*' * 80)
print()
test = False
# Create directories if needed
if not os.path.exists(out_txt_dir):
os.mkdir(out_txt_dir)
if not os.path.exists(out_pkl_dir):
os.mkdir(out_pkl_dir)
# Loop through all the files
print('Loading the raw corpus')
fns = sorted([os.path.join(bw_pkl_dir, fn) for fn in
os.listdir(bw_pkl_dir) if fnmatch(fn, bw_fn_pat)])
if test: fns = fns[:1]
bw_set = set()
duplicates = 0
for i, fn in enumerate(fns):
# Read the data
print(' %d/%d : %s' % (i + 1, len(fns), fn))
dcout = DataContainer()
dcout.sents = []
txt_sents = []
dcin = DataContainer.load(fn)
pb = ProgressBar(len(dcin.sents))
for i, sent in enumerate(dcin.sents):
text = ' '.join(sent['words'])
if text not in bw_set:
bw_set.add(text)
dcout.sents.append(sent)
txt_sents.append(text)
else:
duplicates += 1
if 0 == i % 100: pb.update(i)
pb.clear()
# Save the data
fnbase, _ = os.path.splitext(os.path.basename(fn))
out_pkl_fn = os.path.join(out_pkl_dir, fnbase + '.pkl')
out_txt_fn = os.path.join(out_txt_dir, fnbase + '.txt')
prn_pkl_fn = os.sep.join(out_pkl_fn.split(os.sep)[-3:])
prn_txt_fn = os.sep.join(out_txt_fn.split(os.sep)[-3:])
print(' Saving data to %s and %s' % (prn_pkl_fn, prn_txt_fn))
dcout.save(out_pkl_fn)
with open(out_txt_fn, 'w') as f:
for text in txt_sents:
f.write('%s\n' % text)
print()
print('%d duplicates removed from %d files' % (duplicates, len(fns)))
print()
|
StarcoderdataPython
|
6532377
|
from aries_cloudagent.messaging.base_handler import BaseHandler, BaseResponder, RequestContext
from ..messages.read_all_data_agreement_template_response import ReadAllDataAgreementTemplateResponseMessage
import json
class ReadAllDataAgreementTemplateResponseHandler(BaseHandler):
"""Handle for data-agreements/1.0/read-all-template-response message"""
async def handle(self, context: RequestContext, responder: BaseResponder):
"""
Message handler logic for data-agreements/1.0/read-all-template-response message.
"""
# Assert if received message is of type ReadAllDataAgreementTemplateResponseMessage
assert isinstance(context.message, ReadAllDataAgreementTemplateResponseMessage)
self._logger.info(
"Received data-agreements/1.0/read-all-template-response message: \n%s",
json.dumps(context.message.serialize(), indent=4)
)
|
StarcoderdataPython
|
1734997
|
import requests
import json
# response = requests.get("http://pokeapi.co/api/v2/pokemon/charizard")
pokemon_name = raw_input("What Pokemon do you want info about? ")
response = requests.get("https://api.pokemontcg.io/v1/cards?name={}&pageSize=2".format(pokemon_name))
#TODO: if response.text["cards"] is empty; return error message
# text = response.json()
text = response.text
parsed = json.loads(text)
print json.dumps(parsed, indent=4, sort_keys=True)
# print "text type:", type(text)
# print "text.forms:", text["forms"]
# print "forms type:", type(text["forms"])
# print "response text:", response.text
|
StarcoderdataPython
|
1788159
|
#######################################################################
#
# InfoBar Tuner State for Enigma-2
# Coded by betonme (c) 2011 <glaserfrank(at)gmail.com>
# Support: http://www.i-have-a-dreambox.com/wbb2/thread.php?threadid=162629
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
#######################################################################
from enigma import eTimer
# Config
from Components.config import *
# Plugin internal
from ExtensionHandler import addExtension, removeExtension
# Globals
InfoBarShow = None
InfoBarHide = None
InfoBarToggle = None
#######################################################
# InfoBarShowHide for MoviePlayer integration
def overwriteInfoBar():
from Screens.InfoBarGenerics import InfoBarShowHide
global InfoBarShow, InfoBarHide, InfoBarToggle
if config.infobartunerstate.show_infobar.value:
if InfoBarShow is None:
# Backup original function
InfoBarShow = InfoBarShowHide._InfoBarShowHide__onShow
# Overwrite function
InfoBarShowHide._InfoBarShowHide__onShow = InfoBarShowTunerState
if InfoBarHide is None:
# Backup original function
InfoBarHide = InfoBarShowHide._InfoBarShowHide__onHide
# Overwrite function
InfoBarShowHide._InfoBarShowHide__onHide = InfoBarHideTunerState
if config.infobartunerstate.show_ontoggle.value:
if InfoBarToggle is None:
# Backup original function
InfoBarToggle = InfoBarShowHide.toggleShow
# Overwrite function
InfoBarShowHide.toggleShow = InfoBarToggleTunerState
# InfoBar Events
def recoverInfoBar():
from Screens.InfoBarGenerics import InfoBarShowHide
global InfoBarShow, InfoBarHide, InfoBarToggle
if InfoBarShow:
InfoBarShowHide._InfoBarShowHide__onShow = InfoBarShow
InfoBarShow = None
if InfoBarHide:
InfoBarShowHide._InfoBarShowHide__onHide = InfoBarHide
InfoBarHide = None
if InfoBarToggle:
InfoBarShowHide.toggleShow = InfoBarToggle
InfoBarToggle = None
def InfoBarShowTunerState(self):
from Plugins.Extensions.InfoBarTunerState.plugin import gInfoBarTunerState
global gInfoBarTunerState
global InfoBarShow
if InfoBarShow:
InfoBarShow(self)
if gInfoBarTunerState:
gInfoBarTunerState.show()
def InfoBarHideTunerState(self):
from Plugins.Extensions.InfoBarTunerState.plugin import gInfoBarTunerState
global gInfoBarTunerState
global InfoBarHide
if InfoBarHide:
InfoBarHide(self)
if gInfoBarTunerState:
gInfoBarTunerState.hide()
def InfoBarToggleTunerState(self):
from Plugins.Extensions.InfoBarTunerState.plugin import gInfoBarTunerState
global gInfoBarTunerState
global InfoBarToggle
if InfoBarToggle:
InfoBarToggle(self)
if gInfoBarTunerState:
gInfoBarTunerState.toggle()
class InfoBarHandler(object):
def __init__(self):
self.infobar = None
self.forceBindInfoBarTimer = eTimer()
try:
self.forceBindInfoBarTimer_conn = self.forceBindInfoBarTimer.timeout.connect(self.bindInfoBar)
except:
self.forceBindInfoBarTimer.callback.append(self.bindInfoBar)
# Bind InfoBarEvents
#self.bindInfoBar()
#self.onLayoutFinish.append(self.bindInfoBar)
# Workaround
# The Plugin starts before the InfoBar is instantiated
# Check every second if the InfoBar instance exists and try to bind our functions
# Is there an alternative solution?
if config.infobartunerstate.show_infobar.value:
self.forceBindInfoBarTimer.start(1000, False)
if config.infobartunerstate.show_overwrite.value:
overwriteInfoBar()
# Handle extension menu integration
if config.infobartunerstate.extensions_menu_show.value or config.infobartunerstate.extensions_menu_setup.value:
# Add to extension menu
addExtension()
else:
# Remove from extension menu
removeExtension()
def bindInfoBar(self):
# Reimport InfoBar to force update of the class instance variable
# Rebind only if it isn't done already
from Screens.InfoBar import InfoBar
if InfoBar.instance:
self.infobar = InfoBar.instance
bindShow = False
bindHide = False
if hasattr(InfoBar.instance, "onShow"):
if self.__onInfoBarEventShow not in InfoBar.instance.onShow:
InfoBar.instance.onShow.append(self.__onInfoBarEventShow)
bindShow = True
if hasattr(InfoBar.instance, "onHide"):
if self.__onInfoBarEventHide not in InfoBar.instance.onHide:
InfoBar.instance.onHide.append(self.__onInfoBarEventHide)
bindHide = True
if bindShow and bindHide:
# Bind was successful
self.forceBindInfoBarTimer.stop()
def unbindInfoBar(self):
if self.infobar:
if hasattr(self.infobar, "onShow"):
if self.__onInfoBarEventShow in self.infobar.onShow:
self.infobar.onShow.remove(self.__onInfoBarEventShow)
if hasattr(self.infobar, "onHide"):
if self.__onInfoBarEventHide in self.infobar.onHide:
self.infobar.onHide.remove(self.__onInfoBarEventHide)
def __onInfoBarEventShow(self):
self.show()
def __onInfoBarEventHide(self):
self.hide()
|
StarcoderdataPython
|
1770097
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Dataset available at: https://www.kaggle.com/c/house-prices-advanced-regression-techniques
def get_cat_num_columns(data):
categorical_columns = list()
numerical_columns = list()
for column in data.columns:
if data[column].dtype == 'int64' or data[column].dtype == 'float64':
numerical_columns.append(column)
else:
categorical_columns.append(column)
return categorical_columns, numerical_columns
def pre_processing(data):
data.drop('Id', inplace=True, axis=1)
categorical_columns, numerical_columns = get_cat_num_columns(data)
# Finding the top 10 columns by the count of missing data (NaN), if more than 85%
# of the values are non-nulls, we accept the column. Else delete it.
# top 10 columns by count of missing data
total = data.isnull().sum().sort_values(ascending=False)
percent = (data.isnull().sum()/data.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['num_nulls', 'percentage'])
# If more than 85% of the values of a particular column should not be null, else drop it
threshold = len(data) * .85
data.dropna(thresh = threshold, axis = 1, inplace = True)
print(missing_data.head(10))
# TotRmsAbvGrd and GrLivArea seem to be highly correlated. This is because the number of rooms
# depends directly on the amount of living area which is represented by GrLivArea. So, we can
# remove one of these two columns. We will remove TotalRmsAbvGrd.
data.drop('TotRmsAbvGrd', axis=1, inplace=True)
# There are two sets of features with very high correlation, namely: ('TotalBsmtSF', '1stFlrSF') and
# ('GarageCars','GarageArea'). Usually the basement has the same floor area as the first floor, hence
# the correlation makes sense. Also, the number of cars that can fit into a garage is proportional to
# the area of the garage, which makes sense too. We can have one value from each set and drop the other.
# We shall drop GarageCars and TotalBsmtSF.
# dropping columns
data.drop(['TotalBsmtSF', 'GarageCars'], axis=1, inplace=True)
# top 10 columns by count of missing data
total = data.isnull().sum().sort_values(ascending=False)
percent = (data.isnull().sum()/data.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['num_nulls', 'percentage'])
null_cols = missing_data[missing_data['percentage'] > 0].index.values
categorical_columns.append("MSSubClass")
numerical_columns.remove("MSSubClass")
for col in null_cols:
if col in numerical_columns:
data[col].fillna(data[col].mean(), inplace = True)
return data
training = pd.read_csv("/Users/manojkarthick/Documents/Fall-17/Machine-Learning/Project/train.csv")
test = pd.read_csv("/Users/manojkarthick/Documents/Fall-17/Machine-Learning/Project/test.csv")
whole_data = training.append(test, ignore_index=True)
whole_data.drop('SalePrice', axis=1, inplace=True)
whole_data = pre_processing(whole_data)
categorical_columns, numerical_columns = get_cat_num_columns(whole_data)
categorical_columns.append("MSSubClass")
numerical_columns.remove("MSSubClass")
whole_data = pd.get_dummies(whole_data, columns=categorical_columns)
# print(categorical_columns)
print(numerical_columns)
training_data = whole_data.iloc[:1460,:]
test_data = whole_data.iloc[1460:,:]
# Standardizing the numerical features present in the data for better efficiency of SVM/SVR.
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
training_data[numerical_columns] = scaler.fit_transform(training_data[numerical_columns])
test_data[numerical_columns] = scaler.fit_transform(test_data[numerical_columns])
from sklearn.svm import SVR
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
np.random.seed(0)
# Separating the training data into data matrix and target matrix
target = training['SalePrice']
independents = training_data
regressor = SVR(C=100, epsilon=2, kernel='linear')
predictions = regressor.fit(independents, target).predict(test_data)
# Writing to file for Kaggle submission
count = 1461
ss = open('sample_submission.csv', 'w')
for prediction in predictions:
to_write = "{},{}".format(count, prediction)
ss.write(to_write)
ss.write('\n')
count += 1
# ****************************************
# The Code below performs cross-validation
# ****************************************
# X_train, X_test, y_train, y_test = train_test_split(independents, target, test_size=0.25, random_state=0)
# mse = mean_squared_error(y_train, predictions)
# Cs = [1, 2, 5, 10]
# epsilons = [0, 0.1, 0.5, 1.0, 2.0, 5.0]
# kernels = ('linear', 'rbf')
# parameters = {'kernel': kernels, 'C': Cs, 'epsilon': epsilons, 'cache_size': [500]}
# X_train, X_test, y_train, y_test = train_test_split(independents, target, test_size=0.25, random_state=0)
# from sklearn.model_selection import GridSearchCV
# svr = SVR()
# regressor = GridSearchCV(svr, parameters, cv=10)
# regressor.fit(independents, target)
# print(regressor.best_params_)
# means = regressor.cv_results_['mean_test_score']
# stds = regressor.cv_results_['std_test_score']
# for mean, std, params in zip(means, stds, regressor.cv_results_['params']):
# print("%0.3f (+/-%0.03f) for %r"
# % (mean, std * 2, params))
# print()
|
StarcoderdataPython
|
8128928
|
<filename>tree_model_files/tree_model.py
import joblib
import pandas as pd
from sklearn.calibration import CalibratedClassifierCV
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import roc_curve, roc_auc_score
from sklearn.model_selection import train_test_split
import game
GBM_MODEL = joblib.load('gbm_classifier.joblib')
def get_predictions(model, X_train_orig, y_train_orig, X_test_orig):
"""
Obtain probability and class predictions for a given dataset and model
Parameters
----------
model : object with a fit, predict, and predict_proba method function
Machine learning model from which to produce predictions
X_test_orig : pandas DataFrame
Dataframe with features from which to produce a prediction
y_train_orig : list or pandas series
Target variables to train model on
X_train_orig : pandas DataFrame
Testing values from which to validate model
Returns
-------
Tuple
Two lists: the class (1 or 0) predictions and the probability predictions
"""
model.fit(X_train_orig, y_train_orig)
y_pred_class = model.predict(X_test_orig)
y_pred_prob = model.predict_proba(X_test_orig)
y_pred_prob = [elem[1] for elem in y_pred_prob]
return y_pred_class, y_pred_prob
def analyze_accuracy(y_test_acc, y_pred_prob_acc, y_pred_class_acc):
"""
Show AUC curve and confusion matrix for model predictions
Parameters
----------
y_test_acc : list or pandas series
True values that are attempting to be predicted
y_pred_class_acc : list
Probability predictions for each row
y_pred_prob_acc : list
Class predictions (1 or 0) for each row
"""
naive_prediction = [0.5] * len(y_pred_class_acc)
naive_auc = roc_auc_score(y_test_acc, naive_prediction)
md_auc = roc_auc_score(y_test_acc, y_pred_prob_acc)
print('No Skill: ROC AUC=%.3f' % naive_auc)
print('Model : ROC AUC=%.3f' % md_auc)
ns_fpr, ns_tpr, _ = roc_curve(y_test_acc, naive_prediction)
lr_fpr, lr_tpr, _ = roc_curve(y_test_acc, y_pred_prob_acc)
if __name__ == '__main__':
# Setup Data for Prediction Modeling
df = pd.read_csv('game_list.csv')
df = df.loc[df.iloc[:, 1] <= (20 / 60), :]
y = df.iloc[:, 0]
X = df.iloc[:, 1:]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
# Set up GBM Model
gbm = GradientBoostingClassifier(loss='deviance', random_state=0, n_estimators=100,
max_depth=3, subsample=1, min_impurity_decrease=0)
gbm_sigmoid = CalibratedClassifierCV(gbm, method='sigmoid', cv=3)
gbm_sigmoid.fit(X_train, y_train)
y_class, y_prob = get_predictions(gbm_sigmoid, X_train, y_train, X_test)
gbm.fit(X_train, y_train)
joblib.dump(gbm, 'gbm_classifier.joblib')
clf = joblib.load('gbm_classifier.joblib')
# <editor-fold desc="Create Test Game">
test_game = game.Game()
test_game.board[1][1]['occupant'] = 'W'
test_game.board[2][1]['occupant'] = 'W'
test_game.board[3][1]['occupant'] = 'G'
test_game.board[3][3]['occupant'] = 'G'
test_game.board[1][2]['level'] = 2
test_game.board[0][2]['level'] = 1
test_game.board[4][3]['level'] = 1
test_game.board[4][4]['level'] = 1
test_game.board[4][0]['level'] = 3
test_game.board[2][2]['occupant'] = 'X'
test_game.board[2][2]['level'] = 4
# </editor-fold>
|
StarcoderdataPython
|
5052609
|
<gh_stars>0
from App_Login.forms import ProfilePic, SignUpForm
from django.shortcuts import render, HttpResponseRedirect
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm, PasswordChangeForm
from django.contrib.auth import login, authenticate, logout
from django.urls import reverse
from django.contrib.auth.decorators import login_required
from App_Login.forms import SignUpForm, UserProfileChange, ProfilePic
# Create your views here.
def sign_up(request):
form = SignUpForm()
registered = False
if request.method == 'POST':
form = UserCreationForm(data=request.POST)
if form.is_valid():
form.save()
registered = True
dict = {'form':form, 'registered':registered}
return render(request, 'App_Login/signup.html', context=dict)
def login_page(request):
form = AuthenticationForm()
if request.method == 'POST':
form = AuthenticationForm(data=request.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
return HttpResponseRedirect(reverse('index'))
return render(request, 'App_Login/login.html', context={'form':form})
@login_required
def logout_user(request):
logout(request)
return HttpResponseRedirect(reverse('App_Login:signin'))
@login_required
def profile(request):
return render(request, 'App_Login/profile.html', context={})
@login_required
def user_change(request):
current_user = request.user
form = UserProfileChange(instance=current_user)
if request.method == 'POST':
form = UserProfileChange(request.POST, instance=current_user)
if form.is_valid():
form.save()
form = UserProfileChange(instance=current_user)
return render(request, 'App_Login/change_profile.html', context={'form':form})
@login_required
def pass_change(request):
current_user = request.user
changed = False
form = PasswordChangeForm(current_user)
if request.method == 'POST':
form = PasswordChangeForm(current_user, data=request.POST)
if form.is_valid():
form.save()
changed = True
return render(request, 'App_Login/pass_change.html', context={'form':form, 'changed':changed})
@login_required
def add_pro_pic(request):
form = ProfilePic()
if request.method == 'POST':
form = ProfilePic(request.POST, request.FILES)
if form.is_valid():
user_obj = form.save(commit=False)
user_obj.user = request.user
user_obj.save()
return HttpResponseRedirect(reverse('App_Login:profile'))
return render(request, 'App_Login/pro_pic_add.html', context={'form':form})
@login_required
def change_pro_pic(request):
form = ProfilePic(instance=request.user.user_profile)
if request.method == 'POST':
form = ProfilePic(request.POST, request.FILES, instance=request.user.user_profile)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('App_Login:profile'))
return render(request, 'App_Login/pro_pic_add.html', context={'form':form})
|
StarcoderdataPython
|
9654229
|
<reponame>FrancoBenner/archon-dex<filename>mom/mom_public.py
"""
tokenmom API client
API docs https://docs.tokenmom.com
"""
import requests
base_url = "https://api.tokenmom.com/"
def markets():
r = requests.get(base_url + "market/get_markets")
j = r.json()
markets = j["markets"]
for m in markets[:]:
base = m["base_token"]
#print (m.keys())
tokens = m["tokens"]
print (base,":",len(tokens))
#for t in tokens[:1]:
# print (t)
def list_market():
pair = "TM-WETH"
endpoint = "market/get_markets?market_id=" + pair
r = requests.get(base_url + endpoint)
j = r.json()
print (j)
def list_tickers():
endpoint = "market/get_tickers"
r = requests.get(base_url + endpoint)
j = r.json()
tickers = j["tickers"]
for t in tickers:
v = float(t['volume'])
if v > 0:
p = float(t["price"])
print (p,v)
ev = v*p
print (t["market_id"],":",p,v)
def get_tickers():
pair = "TM-WETH"
endpoint = "market/get_tickers?market_id=" + pair
r = requests.get(base_url + endpoint)
j = r.json()
print (j)
def list_book(pair):
endpoint = "order/get_orderbook?market_id="+pair
r = requests.get(base_url + endpoint)
j = r.json()
if j["status"]=='success':
data = j["orderbook"]
bids = data["bids"]
asks = data["asks"]
key = lambda item: item["price"]
bids = sorted(bids, key=key)
asks = sorted(asks, key=key)
bids.reverse()
return [bids,asks]
def show_book():
pair = "TM-WETH"
[bids,asks] = list_book(pair)
print ('** asks **')
for a in asks:
p,am = a["price"],a["amount"]
print (p,am)
print ('** bids **')
for b in bids:
p,am = b["price"],b["amount"]
print (p,am)
def show_spread():
pair = "TM-WETH"
[bids,asks] = list_book(pair)
bp,ap = float(bids[0]["price"]),float(asks[0]["price"])
spread = (ap-bp)/ap
print (pair,":",spread)
def post_order():
#POST order/build_order
pass
def trades(pair, page=0):
"""get trades per page """
endpoint = "market/get_trades?market_id="+pair+"&page="+str(page)
r = requests.get(base_url + endpoint)
trades = r.json()["trades"]
return trades
def trades_all(pair):
""" get all trades """
trade_list = list()
maxpage = 20
#TODO fetch until recent day
for page in range(0,maxpage):
t = trades(pair, page)
trade_list += t
return trade_list
def trades_write():
pair = "TM-WETH"
trades = trades_all(pair)
with open('trades_' + pair + '.csv','w') as f:
for t in trades:
a = t["amount"]
p = t["price"]
u = t['updated_at']
print (u,":",p," ",a)
f.write(str(t) + '\n')
#markets()
list_tickers()
#list_book()
#show_book()
#show_spread()
#trades_all()
#trades_write()
|
StarcoderdataPython
|
1976737
|
# Copyright 2015 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for templating in PrettyTensor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy
from numpy import testing
import tensorflow as tf
import prettytensor
from prettytensor import pretty_tensor_class
from prettytensor import pretty_tensor_testing
KEY = 'random_key'
TOLERANCE = 0.000001
@prettytensor.Register(assign_defaults='value')
def ValidateMethod(input_tensor, test_class, value):
test_class.assertEqual(KEY, value)
return input_tensor
class TemplatedPrettyTensorTest(pretty_tensor_testing.PtTestCase):
def setUp(self):
super(self.__class__, self).setUp()
# Input is 2x3x5, which isn't a natural size for any op.
self.input_data = numpy.array(
[[[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[10, 12, 13, 14, 15],], [[-1, 2, -3, 4, -5], [6, -7, 8, -9, 10],
[-10, 12, -13, 14, -15]]],
dtype=numpy.float)
self.input = tf.constant(self.input_data, dtype=tf.float32)
def Template(self, key):
return prettytensor.template(key, self.bookkeeper)
def testSimpleTemplate(self):
template = self.Template(KEY)
x = template.construct(random_key=self.input)
out = self.RunTensor(x)
testing.assert_allclose(self.input_data, out, rtol=TOLERANCE)
def testSingleMethod(self):
template = self.Template(KEY).flatten()
x = template.construct(random_key=self.input)
out = self.RunTensor(x)
testing.assert_allclose(
self.input_data.reshape([2, 15]),
out,
rtol=TOLERANCE)
def testSequential(self):
seq = self.Template(KEY).sequential()
seq.flatten()
seq.fully_connected(100)
out = self.RunTensor(seq.as_layer().construct(random_key=self.input))
self.assertSequenceEqual([2, 100], out.shape)
def testAttach(self):
input_pt = self.Wrap(self.input)
template = self.Template('input').flatten().fully_connected(100)
out = self.RunTensor(input_pt.attach_template(template, 'input'))
self.assertSequenceEqual([2, 100], out.shape)
def testUnboundVariableForParameter(self):
input_pt = self.Wrap(self.input)
template = input_pt.flatten().fully_connected(prettytensor.UnboundVariable(
'width'))
self.assertTrue(isinstance(template, pretty_tensor_class._DeferredLayer))
out = self.RunTensor(template.construct(width=200))
self.assertSequenceEqual([2, 200], out.shape)
def testMissingUnboundVariable(self):
input_pt = self.Wrap(self.input)
template = input_pt.flatten().fully_connected(prettytensor.UnboundVariable(
'width'))
with self.assertRaises(ValueError):
template.construct()
def testUnboundVariableReused(self):
"""The same unbound_var can be used multiple times in a graph."""
input_pt = self.Wrap(self.input)
unbound_var = prettytensor.UnboundVariable('width')
template = (input_pt.flatten().fully_connected(unbound_var)
.fully_connected(unbound_var))
out = self.RunTensor(template.construct(width=200))
self.assertSequenceEqual([2, 200], out.shape)
def testAttachToTemplate(self):
input_pt = self.Wrap(self.input)
template1 = self.Template('input').flatten()
template2 = self.Template('input').fully_connected(100)
joined = template1.attach_template(template2, 'input')
out = self.RunTensor(input_pt.attach_template(joined, 'input'))
self.assertSequenceEqual([2, 100], out.shape)
def testUnboundVariableAsDefault(self):
"""The same unbound_var can be used multiple times in a graph."""
input_pt = self.Wrap(self.input)
with prettytensor.defaults_scope(
value=prettytensor.UnboundVariable('key')):
x = input_pt.ValidateMethod(self)
self.assertTrue(isinstance(x, pretty_tensor_class._DeferredLayer))
x.construct(key=KEY)
def testConflictingUnboundVariables(self):
"""Two unbound_vars with the same name are considered conflicting."""
input_pt = self.Wrap(self.input)
with self.assertRaises(ValueError):
(input_pt.flatten()
.fully_connected(prettytensor.UnboundVariable('width'))
.fully_connected(prettytensor.UnboundVariable('width')))
def testMultipleUnboundVariables(self):
input_pt = self.Wrap(self.input)
template = (input_pt.flatten()
.fully_connected(prettytensor.UnboundVariable('width'))
.fully_connected(prettytensor.UnboundVariable('width2')))
out = self.RunTensor(template.construct(width=200, width2=100))
self.assertSequenceEqual([2, 100], out.shape)
def testExtraValues(self):
input_pt = self.Wrap(self.input)
template = (input_pt.flatten()
.fully_connected(prettytensor.UnboundVariable('width')))
with self.assertRaises(ValueError):
template.construct(width=200, width2=100)
def testIncompatibleUnboundVariableValues(self):
"""Ensures that an error is thrown if a var is given incompatible values.
Since the primary use case of templates is parameter sharing, it is
important that substitutions don't conflict.
"""
input_pt = self.Wrap(self.input)
full = input_pt.flatten().fully_connected(prettytensor.UnboundVariable(
'width'))
full.construct(width=100)
with self.assertRaises(ValueError):
full.construct(width=200)
def BuildLargishGraph(self, input_pt):
seq = input_pt.sequential()
seq.reshape('___1')
seq.conv2d(1, 10)
with seq.subdivide(2) as [a, b]:
a.with_name('a').conv2d(1, 5)
b.with_name('b').conv2d(1, 15)
seq.with_name('wow')
seq.flatten()
seq.fully_connected(100, name='a_funny_name')
return seq.as_layer()
def testGraphMatchesImmediate(self):
"""Ensures that the vars line up between the two modes."""
with tf.Graph().as_default():
input_pt = prettytensor.wrap(
tf.constant(self.input_data, dtype=tf.float32))
self.BuildLargishGraph(input_pt)
normal_names = sorted([v.name for v in tf.global_variables()])
with tf.Graph().as_default():
template = prettytensor.template('input')
self.BuildLargishGraph(template).construct(input=prettytensor.wrap(
tf.constant(self.input_data, dtype=tf.float32)))
template_names = sorted([v.name for v in tf.global_variables()])
self.assertSequenceEqual(normal_names, template_names)
def testVariablesAreShared(self):
"""Ensures that adding the graph twice shares variables."""
input_pt = self.Wrap(self.input)
template = self.Template('input').flatten().fully_connected(10)
l1 = template.construct(input=input_pt)
l2 = template.construct(input=input_pt)
self.assertNotEqual(l1.tensor, l2.tensor)
v1 = self.RunTensor(l1, init=True)
v2 = self.RunTensor(l2, init=False)
testing.assert_allclose(v1, v2, rtol=TOLERANCE)
def testBind(self):
input_pt = self.Wrap(self.input)
template = self.Template('input').flatten().fully_connected(10)
l1 = template.bind(input=input_pt).construct()
l2 = template.construct(input=input_pt)
v1 = self.RunTensor(l1, init=True)
v2 = self.RunTensor(l2, init=False)
testing.assert_allclose(v1, v2, rtol=TOLERANCE)
def testBindTuple(self):
labels = numpy.array([[0., 1.], [1., 0.]], dtype=numpy.float32)
template = self.Template('input').flatten().softmax_classifier(2, labels)
bound = template.bind(input=self.input)
tuple1 = bound.construct()
tuple2 = template.construct(input=self.input)
self.assertNotEqual(tuple1.softmax.tensor, tuple2.softmax.tensor)
softmax1 = self.RunTensor(tuple1.softmax, init=True)
loss1 = self.RunTensor(tuple1.loss, init=False)
softmax2 = self.RunTensor(tuple2.softmax, init=False)
loss2 = self.RunTensor(tuple2.loss, init=False)
testing.assert_allclose(softmax1, softmax2, rtol=TOLERANCE)
testing.assert_allclose(loss1, loss2, rtol=TOLERANCE)
def testConstructAllWithConflictingValues(self):
labels = numpy.array([[0., 1.], [1., 0.]], dtype=numpy.float32)
template = self.Template('input').flatten().softmax_classifier(2, labels)
softmax = template.softmax.bind(input=self.input)
loss = template.loss.bind(input=labels)
with self.assertRaises(ValueError):
prettytensor.construct_all([softmax, loss])
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
4975217
|
<filename>src/ggrc/automapper/rules.py
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
import itertools
from collections import namedtuple
from logging import getLogger
from ggrc import models
Attr = namedtuple('Attr', ['name'])
type_ordering = [['Audit'], ['Program'],
['Regulation', 'Policy', 'Standard', 'Contract'],
['Section', 'Clause'], ['Objective'], ['Control'],
['Assessment']]
# pylint: disable=invalid-name
logger = getLogger(__name__)
def get_type_indices():
indices = dict()
for i, layer in enumerate(type_ordering):
for type_ in layer:
indices[type_] = i
return indices
class Rule(object):
def __init__(self, name, top, mid, bottom):
def wrap(o):
return o if isinstance(o, set) else {o}
self.name = name
self.top = wrap(top)
self.mid = wrap(mid)
self.bottom = wrap(bottom)
class RuleSet(object):
Entry = namedtuple('RuleSetEntry', ['explicit', 'implicit'])
entry_empty = Entry(frozenset(set()), frozenset(set()))
_type_indices = get_type_indices()
@classmethod
def _check_type_order(self, type1, type2):
i1 = self._type_indices.get(type1, None)
if i1 is None:
return "Unknown level for %s" % type1
i2 = self._type_indices.get(type2, None)
if i2 is None:
return "Unknown level for %s" % type2
if not i1 <= i2:
return "Type %s does not occur higher than type %s" % (type1, type2)
@classmethod
def _explode_rules(cls, rule_list):
for rule in rule_list:
for top, mid, bottom in itertools.product(rule.top, rule.mid,
rule.bottom):
if Attr in map(type, [top, mid, bottom]):
# if this is a direct mapping
# there is only one way to form the triangle
# TODO rule sanity check
yield (mid, bottom, top, rule)
else:
err1 = cls._check_type_order(top, mid)
err2 = cls._check_type_order(mid, bottom)
if err1 is not None or err2 is not None:
logger.warning("Automapping rule ordering violation")
if err1 is not None:
logger.warning(err1)
if err2 is not None:
logger.warning(err2)
logger.warning("Skipping bad rule (%s, %s, %s)", top, mid, bottom)
continue
yield (mid, bottom, top, rule)
yield (mid, top, bottom, rule)
def __init__(self, count_limit, rule_list):
self.count_limit = count_limit
self._rule_list = rule_list
self._rules = dict()
self._rule_source = dict()
def available(m, l):
return hasattr(getattr(models, m), l + '_id')
for src, dst, mapping, source in self._explode_rules(rule_list):
key = (src, dst)
entry = self._rules.get(key, RuleSet.Entry(set(), set()))
if isinstance(mapping, Attr):
entry = RuleSet.Entry(entry.explicit, entry.implicit | {mapping})
else:
entry = RuleSet.Entry(entry.explicit | {mapping}, entry.implicit)
self._rules[key] = entry
sources = self._rule_source.get((src, dst, mapping), set())
sources.add(source)
self._rule_source[src, dst, mapping] = sources
self._freeze()
def _freeze(self):
for key in self._rules:
explicit, implicit = self._rules[key]
self._rules[key] = RuleSet.Entry(frozenset(explicit),
frozenset(implicit))
def __getitem__(self, key):
if key in self._rules:
return self._rules[key]
else:
return RuleSet.entry_empty
def __repr__(self):
return 'Rules(%s)' % repr(self._rule_list)
def __str__(self):
rules = []
for key in self._rules:
src, dst = key
for mapping in self._rules[key].explicit | self._rules[key].implicit:
source = ','.join(r.name for r in self._rule_source[src, dst, mapping])
rule = (' -> %s <--> %s <--> %s <- )' % (dst, src, mapping))
rule += ' ' * (70 - len(rule))
rule += source
rules.append(rule)
rules.sort()
return 'RulesSet\n' + '\n'.join(rules)
class Types(object):
all = {'Program', 'Regulation', 'Policy', 'Standard', 'Contract',
'Section', 'Clause', 'Objective', 'Control', 'Assessment'}
directives = {'Regulation', 'Policy', 'Standard', 'Contract'}
assets_business = {'System', 'Process', 'DataAsset', 'Product', 'Project',
'Facility', 'Market'}
people_groups = {'AccessGroup', 'Person', 'OrgGroup', 'Vendor'}
rules = RuleSet(count_limit=10000, rule_list=[
Rule(
'mapping directive to a program',
'Program',
Types.directives,
Types.all - {'Program'} - Types.directives,
),
Rule(
'mapping to sections and clauses',
Types.directives,
{'Section', 'Clause'},
{'Objective', 'Control'},
),
Rule(
'mapping to objective',
{'Section'},
{'Objective'},
{'Objective', 'Control'},
),
Rule(
'mapping nested controls',
{'Objective'},
{'Control'},
{'Control'},
),
Rule(
'mapping request to audit',
{Attr('program')},
{'Audit'},
{'Request'},
),
Rule(
'mapping program objects to audit',
{Attr('audits'), 'Audit'},
{'Program'},
{'Regulation', 'Policy', 'Standard', 'Contract',
'Section', 'Clause', 'Objective', 'Control'}
),
])
|
StarcoderdataPython
|
4880266
|
from mock import MagicMock
import mock
from django.test import override_settings
from tests.utilities.utils import SafeTestCase
from tests.utilities.ldap import get_ldap_user_defaults
from accounts.models import (
User,
AccountRequest,
Intent
)
from projects.models import Project
from projects.receivers import check_general_eligibility
organization_info = {
'ucb': {
'long_name': 'University of Colorado Boulder',
'suffix': None,
'general_project_id': 'ucb-general'
},
'csu': {
'long_name': 'Colorado State University',
'suffix': 'colostate.edu',
'general_project_id': 'csu-general'
}
}
@override_settings(ORGANIZATION_INFO=organization_info)
class GeneralEligibilityReceiverTestCase(SafeTestCase):
def test_check_general_eligibility(self):
user_defaults = get_ldap_user_defaults()
auth_user_defaults = dict(
username=user_defaults['username'],
first_name=user_defaults['first_name'],
last_name=user_defaults['last_name'],
email=user_defaults['email']
)
auth_user = User.objects.create(**auth_user_defaults)
account_request_defaults = dict(
username=auth_user.username,
first_name=auth_user.first_name,
last_name=auth_user.last_name,
email=auth_user.email,
organization='ucb'
)
account_request = AccountRequest.objects.create(**account_request_defaults)
intent = Intent.objects.create(
account_request=account_request,
reason_summit=True
)
project_defaults = dict(
pi_emails=['<EMAIL>'],
description='test project',
organization='ucb',
title='test project',
project_id='ucb-general'
)
project = Project.objects.create(**project_defaults)
check_general_eligibility(account_request.__class__,account_request=account_request)
project = Project.objects.get()
self.assertIn(auth_user,project.collaborators.all())
# No Summit intention declared, now add to 'general' account anyway
project.collaborators.clear()
intent.reason_summit = False
intent.save()
check_general_eligibility(account_request.__class__,account_request=account_request)
project = Project.objects.get()
self.assertIn(auth_user,project.collaborators.all())
def test_check_general_eligibility_suffixed(self):
user_defaults = get_ldap_user_defaults()
effective_uid = <EMAIL>'.format(user_defaults['username'])
auth_user_defaults = dict(
username=effective_uid,
first_name=user_defaults['first_name'],
last_name=user_defaults['last_name'],
email=user_defaults['email']
)
auth_user = User.objects.create(**auth_user_defaults)
account_request_defaults = dict(
username=user_defaults['username'],
first_name=auth_user.first_name,
last_name=auth_user.last_name,
email=auth_user.email,
organization='csu'
)
account_request = AccountRequest.objects.create(**account_request_defaults)
intent = Intent.objects.create(
account_request=account_request,
reason_summit=True
)
project_defaults = dict(
pi_emails=['<EMAIL>'],
description='test project',
organization='csu',
title='test project',
project_id='csu-general'
)
project = Project.objects.create(**project_defaults)
check_general_eligibility(account_request.__class__,account_request=account_request)
project = Project.objects.get()
self.assertIn(auth_user,project.collaborators.all())
def test_check_general_eligibility_no_intent(self):
user_defaults = get_ldap_user_defaults()
auth_user_defaults = dict(
username=user_defaults['username'],
first_name=user_defaults['first_name'],
last_name=user_defaults['last_name'],
email=user_defaults['email']
)
auth_user = User.objects.create(**auth_user_defaults)
account_request_defaults = dict(
username=auth_user.username,
first_name=auth_user.first_name,
last_name=auth_user.last_name,
email=auth_user.email,
organization='ucb'
)
account_request = AccountRequest.objects.create(**account_request_defaults)
check_general_eligibility(account_request.__class__,account_request=account_request)
|
StarcoderdataPython
|
9645765
|
import sys
import numpy as np
import matplotlib.pyplot as plt
from mlportopt.util.helperfuncs import gen_real_data, train_test, merge_clusters, get_full_weights
from mlportopt.preprocessing.preprocessing import preprocess
from mlportopt.flatcluster.flatcluster import DPGMM, TFSOM, GPcluster
from mlportopt.hiercluster.hiercluster import scipy_linkage, BHC
from mlportopt.portfolio.portfolio import Allocate, Evaluation, quasidiagonalise
from mlportopt.dependence.dependence import Dependence
from mlportopt.riskmetrics.riskmetrics import RiskMetrics
class Optimise:
def __init__(self, data,
train_test_v = 0.5,
frequency = 'W',
residuals = None,
whiten = True,
reduce_dims = 2,
dep_measure = 'MI',
dist_measure = None,
dep_copula = 'deheuvels',
dep_denoise = None,
flat_cluster = {'DPGMM':{'clusters':5,
'iters': 500,
'step': 1,
'alpha': 1,
'ftol': 1e-6,
'gtol': 1e-6,
'kap': 1e-6,
'var_p': 1e-3,
'conc': 10}},
hier_cluster = {'Bayesian':{'alpha': 0.001,
'beta': 0.001,
'gamma': 1,
'rmt_denoise': None}},
intra_method = 'var',
inter_method = 'var',
plots = True):
'''
Parameters
----------
train_test_v: float
Ratio of split between training and test data (Default is 0.5)
frequency: str
Frequency of data [Options: 'D', 'W', 'M'] (Default is 'W')
residuals: ndarray
Beta-adjusted returns (Regressed on the market). If not None, clustering is performed on these residuals. (Default is None)
whiten: Bool
Boolean indicator for demeaning and standardising (whitening) (Default is True)
reduce_dims: int or None
If not None, target data will be reduced via PCA to a lower dimension of size reduce_dims (Default is 2)
dep_measure: str
Chosen dependence measure [Options: 'MI', 'VI','CE','CD','corr','Waserstein'] (Default is 'MI')
dist_measure: str or None
If not None, the method for transforming a similarity matrix into a distance matrix [Options: 'angular', 'corr_dist', 'acute_angular', 'abs_corr', 'other'] (Default is None)
dep_copula: str
Chosen dependence copula [Options: 'deheuvels', 'gaussian','student','clayton','gumbel'] (Default is None)
dep_denoise: str or None
If not None, the Random Matrix Theoretical approach to denoising Hermitian matrices [Options 'fixed','shrinkage','targeted_shrinkage'] (Default is None)
flat_cluster: None or Nested Dictionary (see below for parameter descriptions)
Parameter Dictionary for flat clustering of form {'Method':{Parameters}}
[Options: {'DPGMM': {clusters, iters, step, alpha, ftol, gtol, kap, var_p, conc, verb}}
{'GP' : {iters, step, s2, l, alpha, gamma, cov_lim, p_Num, latent, verbose}}]
hier_cluster: Nested Dictionary
Parameter Dictionary for hierarchical clustering of form {'Method':{Parameters}}
[Options: {'Bayesian': {alpha, beta, gamma, rmt_denoise}}
{'single': {dep_measure, hier_cluster, dist_measure, dep_copula, dep_denoise}}
{'average': {Same as single}},
{'complete': {Same as single}},
{'ward': {Same as single}}]
intra_method: str
Method for (inversely) weighting at the cluster level - see Risk Options below (Default is 'var')
inter_method: str
Method for (inversely) weighting at the tree level - see Risk Options below (Default is 'var')
Risk Options
-------
- uniform
- prob_sharpe
- ann_sharpe
- sharpe
- var
- vol (std)
- ann_vol
- VaR - normal (VaR under normality assumption)
- VaR - student (VaR under student t assumption)
- VaR - gmm (VaR from fitted GMM samples)
- CVaR - normal (CVaR under normality assumption)
- CVaR - student (CVaR under student t assumption)
- CVaR - gmm (CVaR from fitted GMM samples)
flatcluster - GP
----------
iters: int
Number of iterations in the graident descent method (Default is 10)
step: float
Step size in gradient descent method (Default is 0.1)
s2: float
Initial value for the variance (Default is 1)
l: float
Initial value for the lengthscale (Default is 1)
alpha: float
Initial value for alpha (Default is 1)
gamma: float [0,1]
Controls the proportion of the maximum variance we want to determine cluster boundaries. (Default is 1)
cov_lim: float
Limits the maximum covariance (Default is 0.99)
p_Num:
Determines the number of samples to be generated when fitting (Default is 20)
latent: int
Determines the size of the latent space if dimensionality reduction is required (Default is 3)
verbose: Bool
Boolean indicator for descriptive printing (Default is False)
flatcluster - DPGMM
----------
X: ndarray
clusters: int
Initial number of clusters - updated by the DP prior
iters: int
Number of iterations in gradient descent (Default is 500)
step: float
Step size (Default is 1)
alpha: float
Initial value for the Dirichlet hyper-parameter (Default is 1)
ftol: float
Tolerance for function value convergence (Default is 1e-6)
gtol: float
Tolerance for gradient value convergence (Default is 1e-6)
kap: float
Hyperparameter for prior mean (Default is 1e-6)
var_p: float
Prior value for the variance (Default is 1e-3)
trunc: float
Intial value for the truncation parameter (Default is 10)
verb: Bool
Boolean indicator for explanatory prints (Default is False)
'''
self.data = data
self.train, self.test = train_test(data, train_test_v)
self.frequency = frequency
self.whiten = whiten
######## Miscellaneous ###########
self.residuals = False
self.merge_weights = None
self.plots = plots
self.reduce_dims = reduce_dims
self.linkage = None
self.hier_cluster_dict = hier_cluster
######## Preprocessing ###########
if residuals is not None:
self.residuals = True # Set the target data to the residuals if beta-adjustment is desired
self.X, _ = train_test(residuals, train_test_v)
else: self.X = self.train.copy()
# Whiten and reduce
tform = self.X.copy() + 1e-7
self.X = preprocess(tform, axis = 1, white = self.whiten , reduce = False, n = 0)
self.reduced = preprocess(tform, axis = 1, white = self.whiten , reduce = (reduce_dims > 0), n = reduce_dims)
######## Dependence ############
self.dep_measure = dep_measure
self.dist_measure = dist_measure
self.dep_copula = dep_copula
self.dep_denoise = dep_denoise
###### Cluster Risk Metrics ######
self.intra_method = intra_method
self.inter_method = inter_method
######## Flat Cluster ############
if flat_cluster is not None:
self.flat_cluster = list(flat_cluster.keys())[0]
else:
self.flat_cluster = flat_cluster
if self.flat_cluster == 'DPGMM':
param_dict = flat_cluster['DPGMM']
clusters = param_dict['clusters']
iters = param_dict['iters']
step = param_dict['step']
alpha = param_dict['alpha']
ftol = param_dict['ftol']
gtol = param_dict['gtol']
kap = param_dict['kap']
var_p = param_dict['var_p']
trunc = param_dict['trunc']
verb = False
self.fclust = DPGMM(self.reduced, clusters, iters, step, alpha, ftol, gtol, kap, var_p, trunc, verb)
elif self.flat_cluster == 'GP':
param_dict = flat_cluster['GP']
iters = param_dict['iters']
step = param_dict['step']
s2 = param_dict['s2']
l = param_dict['l']
alpha = param_dict['alpha']
gamma = param_dict['gamma']
cov_lim = param_dict['cov_lim']
p_Num = param_dict['p_Num']
latent = param_dict['latent']
verbose = param_dict['verbose']
self.fclust = GPcluster(self.reduced,iters, step, s2, l, alpha, gamma, cov_lim, p_Num, latent, verbose)
if hier_cluster is not None:
self.hier_cluster = list(hier_cluster.keys())[0]
else:
self.hier_cluster = hier_cluster
return
def param_hclust(self):
######### Hier Cluster ##########
self.hier_cluster = list(self.hier_cluster_dict.keys())[0]
param_dict = self.hier_cluster_dict[self.hier_cluster]
if self.hier_cluster == 'Bayesian':
if self.reduce_dims < 2:
print('Please reduce the dimensionality before attempting Bayesian Hierarchical Clustering')
alpha = param_dict['alpha']
beta = param_dict['beta']
gamma = param_dict['gamma']
rmt_denoise = param_dict['rmt_denoise']
self.hclust = BHC(self.reduced, alpha, beta, gamma, rmt_denoise)
else:
self.hclust = scipy_linkage(self.X, self.dep_measure, self.hier_cluster, self.dist_measure, self.dep_copula, self.dep_denoise)
return
def f_cluster(self):
### Model Settings ###
if self.flat_cluster == 'DPGMM':
self.fclust.fit()
# if self.plots: self.fclust.plot()
# self.fclust.split_all()
# if self.plots: self.fclust.plot()
elif self.flat_cluster == 'GP':
self.fclust.fit()
### Assign Clusters ###
self.assigned_clusters = self.fclust.assigned_clusters
self.unique_flat_clusts = np.unique(self.assigned_clusters).shape[0]
### Merge the clusters weighted by chosen metric to create new data for hierarchical clustering ###
if self.residuals:
_, self.X, self.merge_weights = merge_clusters(data = self.train,
clusters = self.assigned_clusters,
resids = self.X,
freq = self.frequency,
method = self.intra_method)
self.X = preprocess(self.X, axis = 1, white = self.whiten , reduce = False, n = 0)
else:
self.X, _, self.merge_weights = merge_clusters(data = self.train,
clusters = self.assigned_clusters,
resids = None,
freq = self.frequency,
method = self.intra_method)
self.X = preprocess(self.X, axis = 1, white = self.whiten , reduce = False, n = 0)
if self.merge_weights is None:
self.merge_weights = {i:[1] for i in range(self.train.shape[0])}
return
def h_cluster(self):
self.param_hclust()
if self.hier_cluster == 'Bayesian': self.hclust.fit()
if self.plots: self.hclust.plot_dendrogram()
self.linkage = np.array(self.hclust.linkage)
return
def allocate(self):
self.allocation = Allocate(self.train, self.linkage, self.frequency)
if self.hier_cluster is not None:
self.weights = self.allocation.recursively_partition(inter_cluster_metric = self.inter_method,
intra_cluster_metric = self.intra_method)
if self.merge_weights is not None:
self.weights = get_full_weights(self.weights, self.merge_weights)
else:
inter = np.empty(self.train.shape[0])
for i in range(inter.shape[0]):
inter_rm = RiskMetrics()
inter_rm.fit(self.train[i,:], freq = self.frequency)
inter[i] = inter_rm(self.inter_method)
inter /=inter.sum()
full_weights = []
ind = 0
for v, value in enumerate(self.merge_weights.values()):
for item in value:
full_weights.append(v)
ind +=1
self.weights = np.array(full_weights * inter)
self.weights /=self.weights.sum()
return
def evaluate(self, plots = False):
self.evaluation = Evaluation(self.train, self.test, self.weights, self.frequency)
self.evaluation(plots)
return
def __call__(self):
if self.flat_cluster is not None:
self.f_cluster()
if self.hier_cluster is not None:
self.h_cluster()
self.allocate()
self.evaluate(self.plots)
self.summary_df = self.evaluation.summary_df
return
|
StarcoderdataPython
|
9724032
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import abc
import sys
import copy
import time
import datetime
import importlib
from pathlib import Path
from concurrent.futures import ThreadPoolExecutor, as_completed
import fire
import requests
import numpy as np
import pandas as pd
from tqdm import tqdm
from loguru import logger
from yahooquery import Ticker
from dateutil.tz import tzlocal
CUR_DIR = Path(__file__).resolve().parent
sys.path.append(str(CUR_DIR.parent.parent))
from data_collector.utils import get_calendar_list, get_hs_stock_symbols, get_us_stock_symbols
INDEX_BENCH_URL = "http://push2his.eastmoney.com/api/qt/stock/kline/get?secid=1.{index_code}&fields1=f1%2Cf2%2Cf3%2Cf4%2Cf5&fields2=f51%2Cf52%2Cf53%2Cf54%2Cf55%2Cf56%2Cf57%2Cf58&klt=101&fqt=0&beg={begin}&end={end}"
REGION_CN = "CN"
REGION_US = "US"
class YahooCollector:
START_DATETIME = pd.Timestamp("2000-01-01")
HIGH_FREQ_START_DATETIME = pd.Timestamp(datetime.datetime.now() - pd.Timedelta(days=5 * 5))
END_DATETIME = pd.Timestamp(datetime.datetime.now() + pd.Timedelta(days=1))
def __init__(
self,
save_dir: [str, Path],
start=None,
end=None,
interval="1d",
max_workers=4,
max_collector_count=5,
delay=0,
check_data_length: bool = False,
limit_nums: int = None,
show_1m_logging: bool = False,
):
"""
Parameters
----------
save_dir: str
stock save dir
max_workers: int
workers, default 4
max_collector_count: int
default 5
delay: float
time.sleep(delay), default 0
interval: str
freq, value from [1m, 1d], default 1m
start: str
start datetime, default None
end: str
end datetime, default None
check_data_length: bool
check data length, by default False
limit_nums: int
using for debug, by default None
show_1m_logging: bool
show 1m logging, by default False; if True, there may be many warning logs
"""
self.save_dir = Path(save_dir).expanduser().resolve()
self.save_dir.mkdir(parents=True, exist_ok=True)
self._delay = delay
self._show_1m_logging = show_1m_logging
self.stock_list = sorted(set(self.get_stock_list()))
if limit_nums is not None:
try:
self.stock_list = self.stock_list[: int(limit_nums)]
except Exception as e:
logger.warning(f"Cannot use limit_nums={limit_nums}, the parameter will be ignored")
self.max_workers = max_workers
self._max_collector_count = max_collector_count
self._mini_symbol_map = {}
self._interval = interval
self._check_small_data = check_data_length
self._start_datetime = pd.Timestamp(str(start)) if start else self.START_DATETIME
self._end_datetime = min(pd.Timestamp(str(end)) if end else self.END_DATETIME, self.END_DATETIME)
if self._interval == "1m":
self._start_datetime = max(self._start_datetime, self.HIGH_FREQ_START_DATETIME)
elif self._interval == "1d":
self._start_datetime = max(self._start_datetime, self.START_DATETIME)
else:
raise ValueError(f"interval error: {self._interval}")
# using for 1m
self._next_datetime = self.convert_datetime(self._start_datetime.date() + pd.Timedelta(days=1))
self._latest_datetime = self.convert_datetime(self._end_datetime.date())
self._start_datetime = self.convert_datetime(self._start_datetime)
self._end_datetime = self.convert_datetime(self._end_datetime)
@property
@abc.abstractmethod
def min_numbers_trading(self):
# daily, one year: 252 / 4
# us 1min, a week: 6.5 * 60 * 5
# cn 1min, a week: 4 * 60 * 5
raise NotImplementedError("rewrite min_numbers_trading")
@abc.abstractmethod
def get_stock_list(self):
raise NotImplementedError("rewrite get_stock_list")
@property
@abc.abstractmethod
def _timezone(self):
raise NotImplementedError("rewrite get_timezone")
def convert_datetime(self, dt: [pd.Timestamp, datetime.date, str]):
try:
dt = pd.Timestamp(dt, tz=self._timezone).timestamp()
dt = pd.Timestamp(dt, tz=tzlocal(), unit="s")
except ValueError as e:
pass
return dt
def _sleep(self):
time.sleep(self._delay)
def save_stock(self, symbol, df: pd.DataFrame):
"""save stock data to file
Parameters
----------
symbol: str
stock code
df : pd.DataFrame
df.columns must contain "symbol" and "datetime"
"""
if df.empty:
raise ValueError("df is empty")
symbol = self.normalize_symbol(symbol)
stock_path = self.save_dir.joinpath(f"{symbol}.csv")
df["symbol"] = symbol
if stock_path.exists():
with stock_path.open("a") as fp:
df.to_csv(fp, index=False, header=False)
else:
with stock_path.open("w") as fp:
df.to_csv(fp, index=False)
def _save_small_data(self, symbol, df):
if len(df) <= self.min_numbers_trading:
logger.warning(f"the number of trading days of {symbol} is less than {self.min_numbers_trading}!")
_temp = self._mini_symbol_map.setdefault(symbol, [])
_temp.append(df.copy())
return None
else:
if symbol in self._mini_symbol_map:
self._mini_symbol_map.pop(symbol)
return symbol
def _get_from_remote(self, symbol):
def _get_simple(start_, end_):
self._sleep()
error_msg = f"{symbol}-{self._interval}-{start_}-{end_}"
def _show_logging_func():
if self._interval == "1m" and self._show_1m_logging:
logger.warning(f"{error_msg}:{_resp}")
try:
_resp = Ticker(symbol, asynchronous=False).history(interval=self._interval, start=start_, end=end_)
if isinstance(_resp, pd.DataFrame):
return _resp.reset_index()
elif isinstance(_resp, dict):
_temp_data = _resp.get(symbol, {})
if isinstance(_temp_data, str) or (
isinstance(_resp, dict) and _temp_data.get("indicators", {}).get("quote", None) is None
):
_show_logging_func()
else:
_show_logging_func()
except Exception as e:
logger.warning(f"{error_msg}:{e}")
_result = None
if self._interval == "1d":
_result = _get_simple(self._start_datetime, self._end_datetime)
elif self._interval == "1m":
if self._next_datetime >= self._latest_datetime:
_result = _get_simple(self._start_datetime, self._end_datetime)
else:
_res = []
def _get_multi(start_, end_):
_resp = _get_simple(start_, end_)
if _resp is not None and not _resp.empty:
_res.append(_resp)
for _s, _e in (
(self._start_datetime, self._next_datetime),
(self._latest_datetime, self._end_datetime),
):
_get_multi(_s, _e)
for _start in pd.date_range(self._next_datetime, self._latest_datetime, closed="left"):
_end = _start + pd.Timedelta(days=1)
self._sleep()
_get_multi(_start, _end)
if _res:
_result = pd.concat(_res, sort=False).sort_values(["symbol", "date"])
else:
raise ValueError(f"cannot support {self._interval}")
return _result
def _get_data(self, symbol):
_result = None
df = self._get_from_remote(symbol)
if isinstance(df, pd.DataFrame):
if not df.empty:
if self._check_small_data:
if self._save_small_data(symbol, df) is not None:
_result = symbol
self.save_stock(symbol, df)
else:
_result = symbol
self.save_stock(symbol, df)
return _result
def _collector(self, stock_list):
error_symbol = []
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
with tqdm(total=len(stock_list)) as p_bar:
for _symbol, _result in zip(stock_list, executor.map(self._get_data, stock_list)):
if _result is None:
error_symbol.append(_symbol)
p_bar.update()
print(error_symbol)
logger.info(f"error symbol nums: {len(error_symbol)}")
logger.info(f"current get symbol nums: {len(stock_list)}")
error_symbol.extend(self._mini_symbol_map.keys())
return sorted(set(error_symbol))
def collector_data(self):
"""collector data"""
logger.info("start collector yahoo data......")
stock_list = self.stock_list
for i in range(self._max_collector_count):
if not stock_list:
break
logger.info(f"getting data: {i+1}")
stock_list = self._collector(stock_list)
logger.info(f"{i+1} finish.")
for _symbol, _df_list in self._mini_symbol_map.items():
self.save_stock(_symbol, pd.concat(_df_list, sort=False).drop_duplicates(["date"]).sort_values(["date"]))
if self._mini_symbol_map:
logger.warning(f"less than {self.min_numbers_trading} stock list: {list(self._mini_symbol_map.keys())}")
logger.info(f"total {len(self.stock_list)}, error: {len(set(stock_list))}")
self.download_index_data()
@abc.abstractmethod
def download_index_data(self):
"""download index data"""
raise NotImplementedError("rewrite download_index_data")
@abc.abstractmethod
def normalize_symbol(self, symbol: str):
"""normalize symbol"""
raise NotImplementedError("rewrite normalize_symbol")
class YahooCollectorCN(YahooCollector):
@property
def min_numbers_trading(self):
if self._interval == "1m":
return 60 * 4 * 5
elif self._interval == "1d":
return 252 / 4
def get_stock_list(self):
logger.info("get HS stock symbos......")
symbols = get_hs_stock_symbols()
logger.info(f"get {len(symbols)} symbols.")
return symbols
def download_index_data(self):
# TODO: from MSN
# FIXME: 1m
if self._interval == "1d":
_format = "%Y%m%d"
_begin = self._start_datetime.strftime(_format)
_end = (self._end_datetime + pd.Timedelta(days=-1)).strftime(_format)
for _index_name, _index_code in {"csi300": "000300", "csi100": "000903"}.items():
logger.info(f"get bench data: {_index_name}({_index_code})......")
try:
df = pd.DataFrame(
map(
lambda x: x.split(","),
requests.get(INDEX_BENCH_URL.format(index_code=_index_code, begin=_begin, end=_end)).json()[
"data"
]["klines"],
)
)
except Exception as e:
logger.warning(f"get {_index_name} error: {e}")
continue
df.columns = ["date", "open", "close", "high", "low", "volume", "money", "change"]
df["date"] = pd.to_datetime(df["date"])
df = df.astype(float, errors="ignore")
df["adjclose"] = df["close"]
df.to_csv(self.save_dir.joinpath(f"sh{_index_code}.csv"), index=False)
else:
logger.warning(f"{self.__class__.__name__} {self._interval} does not support: downlaod_index_data")
def normalize_symbol(self, symbol):
symbol_s = symbol.split(".")
symbol = f"sh{symbol_s[0]}" if symbol_s[-1] == "ss" else f"sz{symbol_s[0]}"
return symbol
@property
def _timezone(self):
return "Asia/Shanghai"
class YahooCollectorUS(YahooCollector):
@property
def min_numbers_trading(self):
if self._interval == "1m":
return 60 * 6.5 * 5
elif self._interval == "1d":
return 252 / 4
def get_stock_list(self):
logger.info("get US stock symbols......")
symbols = get_us_stock_symbols() + [
"^GSPC",
"^NDX",
"^DJI",
]
logger.info(f"get {len(symbols)} symbols.")
return symbols
def download_index_data(self):
pass
def normalize_symbol(self, symbol):
return symbol.upper()
@property
def _timezone(self):
return "America/New_York"
class YahooNormalize:
COLUMNS = ["open", "close", "high", "low", "volume"]
def __init__(self, source_dir: [str, Path], target_dir: [str, Path], max_workers: int = 16):
"""
Parameters
----------
source_dir: str or Path
The directory where the raw data collected from the Internet is saved
target_dir: str or Path
Directory for normalize data
max_workers: int
Concurrent number, default is 16
"""
if not (source_dir and target_dir):
raise ValueError("source_dir and target_dir cannot be None")
self._source_dir = Path(source_dir).expanduser()
self._target_dir = Path(target_dir).expanduser()
self._max_workers = max_workers
self._calendar_list = self._get_calendar_list()
def normalize_data(self):
logger.info("normalize data......")
def _normalize(source_path: Path):
columns = copy.deepcopy(self.COLUMNS)
df = pd.read_csv(source_path)
df.set_index("date", inplace=True)
df.index = pd.to_datetime(df.index)
df = df[~df.index.duplicated(keep="first")]
if self._calendar_list is not None:
df = df.reindex(pd.DataFrame(index=self._calendar_list).loc[df.index.min() : df.index.max()].index)
df.sort_index(inplace=True)
df.loc[(df["volume"] <= 0) | np.isnan(df["volume"]), set(df.columns) - {"symbol"}] = np.nan
df["factor"] = df["adjclose"] / df["close"]
for _col in columns:
if _col == "volume":
df[_col] = df[_col] / df["factor"]
else:
df[_col] = df[_col] * df["factor"]
_tmp_series = df["close"].fillna(method="ffill")
df["change"] = _tmp_series / _tmp_series.shift(1) - 1
columns += ["change", "factor"]
df.loc[(df["volume"] <= 0) | np.isnan(df["volume"]), columns] = np.nan
df.index.names = ["date"]
df.loc[:, columns].to_csv(self._target_dir.joinpath(source_path.name))
with ThreadPoolExecutor(max_workers=self._max_workers) as worker:
file_list = list(self._source_dir.glob("*.csv"))
with tqdm(total=len(file_list)) as p_bar:
for _ in worker.map(_normalize, file_list):
p_bar.update()
def manual_adj_data(self):
"""adjust data"""
logger.info("manual adjust data......")
def _adj(file_path: Path):
df = pd.read_csv(file_path)
df = df.loc[:, ["open", "close", "high", "low", "volume", "change", "factor", "date"]]
df.sort_values("date", inplace=True)
df = df.set_index("date")
df = df.loc[df.first_valid_index() :]
_close = df["close"].iloc[0]
for _col in df.columns:
if _col == "volume":
df[_col] = df[_col] * _close
elif _col != "change":
df[_col] = df[_col] / _close
else:
pass
df.reset_index().to_csv(self._target_dir.joinpath(file_path.name), index=False)
with ThreadPoolExecutor(max_workers=self._max_workers) as worker:
file_list = list(self._target_dir.glob("*.csv"))
with tqdm(total=len(file_list)) as p_bar:
for _ in worker.map(_adj, file_list):
p_bar.update()
def normalize(self):
self.normalize_data()
self.manual_adj_data()
@abc.abstractmethod
def _get_calendar_list(self):
"""Get benchmark calendar"""
raise NotImplementedError("")
class YahooNormalizeUS(YahooNormalize):
def _get_calendar_list(self):
# TODO: from MSN
return get_calendar_list("US_ALL")
class YahooNormalizeCN(YahooNormalize):
def _get_calendar_list(self):
# TODO: from MSN
return get_calendar_list("ALL")
class Run:
def __init__(self, source_dir=None, normalize_dir=None, max_workers=4, region=REGION_CN):
"""
Parameters
----------
source_dir: str
The directory where the raw data collected from the Internet is saved, default "Path(__file__).parent/source"
normalize_dir: str
Directory for normalize data, default "Path(__file__).parent/normalize"
max_workers: int
Concurrent number, default is 4
region: str
region, value from ["CN", "US"], default "CN"
"""
if source_dir is None:
source_dir = CUR_DIR.joinpath("source")
self.source_dir = Path(source_dir).expanduser().resolve()
self.source_dir.mkdir(parents=True, exist_ok=True)
if normalize_dir is None:
normalize_dir = CUR_DIR.joinpath("normalize")
self.normalize_dir = Path(normalize_dir).expanduser().resolve()
self.normalize_dir.mkdir(parents=True, exist_ok=True)
self._cur_module = importlib.import_module("collector")
self.max_workers = max_workers
self.region = region
def download_data(
self,
max_collector_count=5,
delay=0,
start=None,
end=None,
interval="1d",
check_data_length=False,
limit_nums=None,
show_1m_logging=False,
):
"""download data from Internet
Parameters
----------
max_collector_count: int
default 5
delay: float
time.sleep(delay), default 0
interval: str
freq, value from [1m, 1d], default 1m
start: str
start datetime, default "2000-01-01"
end: str
end datetime, default ``pd.Timestamp(datetime.datetime.now() + pd.Timedelta(days=1))``
check_data_length: bool
check data length, by default False
limit_nums: int
using for debug, by default None
show_1m_logging: bool
show 1m logging, by default False; if True, there may be many warning logs
Examples
---------
# get daily data
$ python collector.py download_data --source_dir ~/.qlib1/stock_data/source --region CN --start 2020-11-01 --end 2020-11-10 --delay 0.1 --interval 1d
# get 1m data
$ python collector.py download_data --source_dir ~/.qlib1/stock_data/source --region CN --start 2020-11-01 --end 2020-11-10 --delay 0.1 --interval 1m
"""
_class = getattr(self._cur_module, f"YahooCollector{self.region.upper()}")
_class(
self.source_dir,
max_workers=self.max_workers,
max_collector_count=max_collector_count,
delay=delay,
start=start,
end=end,
interval=interval,
check_data_length=check_data_length,
limit_nums=limit_nums,
show_1m_logging=show_1m_logging,
).collector_data()
def normalize_data(self):
"""normalize data
Examples
---------
$ python collector.py normalize_data --source_dir ~/.qlib1/stock_data/source --normalize_dir ~/.qlib1/stock_data/normalize --region CN
"""
_class = getattr(self._cur_module, f"YahooNormalize{self.region.upper()}")
_class(self.source_dir, self.normalize_dir, self.max_workers).normalize()
def collector_data(
self,
max_collector_count=5,
delay=0,
start=None,
end=None,
interval="1d",
check_data_length=False,
limit_nums=None,
show_1m_logging=False,
):
"""download -> normalize
Parameters
----------
max_collector_count: int
default 5
delay: float
time.sleep(delay), default 0
interval: str
freq, value from [1m, 1d], default 1m
start: str
start datetime, default "2000-01-01"
end: str
end datetime, default ``pd.Timestamp(datetime.datetime.now() + pd.Timedelta(days=1))``
check_data_length: bool
check data length, by default False
limit_nums: int
using for debug, by default None
show_1m_logging: bool
show 1m logging, by default False; if True, there may be many warning logs
Examples
-------
python collector.py collector_data --source_dir ~/.qlib1/stock_data/source --normalize_dir ~/.qlib1/stock_data/normalize --region CN --start 2020-11-01 --end 2020-11-10 --delay 0.1 --interval 1d
"""
self.download_data(
max_collector_count=max_collector_count,
delay=delay,
start=start,
end=end,
interval=interval,
check_data_length=check_data_length,
limit_nums=limit_nums,
show_1m_logging=show_1m_logging,
)
self.normalize_data()
if __name__ == "__main__":
fire.Fire(Run)
|
StarcoderdataPython
|
238478
|
<reponame>PrzemyslawSalek/9eats
from .models import Eats
from rest_framework import serializers
class EatsSerializers(serializers.ModelSerializer):
class Meta:
model = Eats
fields = ['id', 'name', 'price', 'ingredients', 'timestamp']
|
StarcoderdataPython
|
11227166
|
from contributions import Contribution
from elections import (
Election,
Candidate,
Office,
Proposition,
PropositionFiler
)
from expenditures import Expenditure
from filers import Filer, Committee
from filings import Filing, Cycle, Summary
__all__ = (
'Contribution',
'Election',
'Candidate',
'Office',
'Proposition',
'PropositionFiler',
'Expenditure',
'Committee',
'Filer',
'Filing',
'Cycle',
'Summary'
)
|
StarcoderdataPython
|
4832897
|
import copy
import logging
import functools
from ptsemseg.loss.loss import cross_entropy2d
from ptsemseg.loss.loss import cross_entropy1d
from ptsemseg.loss.loss import bootstrapped_cross_entropy2d
from ptsemseg.loss.loss import multi_scale_cross_entropy2d
from ptsemseg.loss.loss import mse
from ptsemseg.loss.loss import masked_mse
logger = logging.getLogger('ptsemseg')
key2loss = {'cross_entropy_2d': cross_entropy2d,
'cross_entropy_1d': cross_entropy1d,
'masked_mse': masked_mse,
'bootstrapped_cross_entropy': bootstrapped_cross_entropy2d,
'multi_scale_cross_entropy': multi_scale_cross_entropy2d,
'mse': mse}
def get_loss_function(cfg, task=None):
if task is None:
logger.error("No task was specified when setting up loss function.")
raise ValueError("No task was specified when setting up loss function.")
if cfg['training']['loss'][task] is None:
logger.info("Using default cross entropy loss")
return cross_entropy2d
else:
loss_dict = cfg['training']['loss'][task]
loss_name = loss_dict['name']
loss_params = {k:v for k,v in loss_dict.items() if k != 'name'}
if loss_name not in key2loss:
raise NotImplementedError('Loss {} not implemented'.format(loss_name))
logger.info('Using {} with {} params'.format(loss_name,
loss_params))
return functools.partial(key2loss[loss_name], **loss_params)
|
StarcoderdataPython
|
6585896
|
<gh_stars>1-10
import numpy as np
# B-Felsstärke für verschiedene Tiefe der Hallsonde in dem Elektromagneten
B = np.array([0, 0, 0, 1, 1, 1, 2, 4, 7, 13, 23, 43, 84, 166, 272, 351, 393, 413, 416, 423, 421, 426, 421, 422, 414, 411, 389, 343, 249, 136, 67, 35, 19, 10, 6, 3, 2, 1, 1, 0])
# Tiefe der Hallsonde in dem Elektromagneten
s = np.array([145, 143, 141, 139, 137, 135, 133, 131, 129, 127, 125, 123, 121, 119, 117, 115, 113, 111, 110, 109, 108, 107, 106, 105, 104, 103, 101, 99, 97, 95, 93, 91, 89, 87, 85, 83, 81, 79, 77, 75])
# Durchlass Wellenlönge des Monochromators / Intereferenzfilter
_lambda = np.array([1000, 1100, 1200, 1300, 1400, 1500, 1550, 1600])
# Winkel des hochreinen GaAs
hr_d = 5.11
hr_theta_1_grad = np.array([137, 139, 140, 142, 144, 144, 156, 155])
hr_theta_2_grad = np.array([164, 160, 159, 159, 158, 156, 162, 162])
hr_theta_1_min = np.array([34, 0, 10, 15, 55, 0, 26, 8])
hr_theta_2_min = np.array([35, 32, 44, 24, 40, 26, 26, 25])
# Winkel des dotierten GaAs mit N = 1.2e18
N_1 = 1.2e18
d_1 = 1.36
theta_1_grad_1= np.array([144, 144, 146, 146, 147, 147, 154, 154])
theta_2_grad_1 = np.array([152, 153, 153, 153, 154, 159, 162, 161])
theta_1_min_1 = np.array([17, 0, 39, 0, 11, 40, 26, 34])
theta_2_min_1 = np.array([23, 37, 20, 32, 12, 0, 19, 55])
# Winkel des dotierten GaAs mit N = 2.8e18
N_2 = 2.8e18
d_2 = 1.296
theta_1_grad_2= np.array([147, 146, 147, 148, 146, 146, 153, 153])
theta_2_grad_2 = np.array([160, 156, 156, 154, 156, 156, 164, 162])
theta_1_min_2 = np.array([7, 51, 6, 22, 47, 15, 27, 40])
theta_2_min_2 = np.array([25, 11, 9, 40, 8, 50, 51, 0])
|
StarcoderdataPython
|
4929975
|
<gh_stars>1-10
from anime2021.anime import AShape, RollingPolygon, AImage
import IPython
sinamon="https://pics.prcm.jp/647a40a3a449f/85207406/png/85207406.png"
shape = AImage(100,100,image=sinamon)
IPython.display.Image(test_shape(shape))
class GuruGurusinamon(AShape):
def __init__(self,width=50, height=None, cx=None, cy=None, N=8):
AShape.__init__(self, width, height, cx, cy)
self.poly = RollingPolygon(width, height, N=N)
self.sinamon=AImage(width, height, image=sinamon)
def render(self, canvas, tick):
self.poly.cx=self.cx
self.poly.cy=self.cy
self.sinamon.cx=self.cx
self.sinamon.cy=self.cy
self.poly.render(canvas, tick)
self.sinamon.render(canvas, tick)
|
StarcoderdataPython
|
1825586
|
<reponame>co2meal/-bnpy-dev
"""
The :mod:`viz` module provides visualization capability
"""
import BarsViz
import BernViz
import GaussViz
import SequenceViz
import PlotTrace
import PlotELBO
import PlotK
import PlotHeldoutLik
import PlotParamComparison
import PlotComps
import JobFilter
import TaskRanker
__all__ = ['GaussViz', 'BernViz', 'BarsViz', 'SequenceViz',
'PlotTrace', 'PlotELBO', 'PlotK',
'PlotComps', 'PlotParamComparison',
'PlotHeldoutLik', 'JobFilter', 'TaskRanker']
|
StarcoderdataPython
|
327827
|
## Python imports
import boto3
from botocore.exceptions import EndpointConnectionError, ClientError
import botocore
import collections
import csv
import json
import smtplib
import os, hmac, hashlib, sys
import pprint
import logging
from sys import exit
import time
import res.utils as utils
import config
# Consul imports
from consul_kv import Connection
conn = Connection(endpoint='http://consul.foo.bar:8500/v1/')
# AWS Services imports
import res.glob as glob
import res.compute as compute
import res.storage as storage
import res.db as db
import res.dev as dev
import res.iam as iam
import res.network as net
import res.fact as fact
import res.security as security
import res.analytics as analytics
import res.management as mgn
import res.business as bus
import res.integration as integ
import res.awsthread as awsthread
# --- AWS basic information
ownerId = utils.get_ownerID()
config.logger.info('OWNER ID:'+ownerId)
# --- AWS Regions
with open('aws_regions.json') as json_file:
aws_regions = json.load(json_file)
regions = aws_regions.get('Regions',[] )
# --- Inventory initialization
inventory = {}
# --- Argumentation. See function check_arguments.
#
# If we find log level parameter, we adjust log level.
# If we find no service name, we inventory all services.
# Else we only inventory services passed in cmd line.
arguments = utils.check_arguments(sys.argv[1:])
nb_arg = len(arguments)
# if no arguments, we try all AWS services
if (nb_arg == 0):
arguments = config.SUPPORTED_COMMANDS
arguments.remove('ce') # For it's not free, cost explorer is removed from defaults inventory. You need to call it explicitly.
# --- Displaying execution parameters
print('-'*100)
print ('Number of services :', len(arguments))
print ('Services List :', str(arguments))
print('-'*100)
print()
# --- Progression counter initialization
config.nb_units_done = 0
for svc in arguments:
config.nb_units_todo += (config.nb_regions * config.SUPPORTED_INVENTORIES[svc])
#
# Let's rock'n roll
#
thread_list = []
# Execution time, for information
t0 = time.time()
#################################################################
# COMPUTE #
#################################################################
#
# ----------------- EC2
#
if ('ec2' in arguments):
thread_list.append(awsthread.AWSThread("ec2", compute.get_ec2_inventory, ownerId))
thread_list.append(awsthread.AWSThread("ec2-network-interfaces", compute.get_interfaces_inventory, ownerId))
thread_list.append(awsthread.AWSThread("ec2-vpcs", compute.get_vpc_inventory, ownerId))
thread_list.append(awsthread.AWSThread("ec2-ebs", compute.get_ebs_inventory, ownerId))
thread_list.append(awsthread.AWSThread("ec2-security-groups", compute.get_sg_inventory, ownerId))
thread_list.append(awsthread.AWSThread("ec2-internet-gateways", compute.get_igw_inventory, ownerId))
thread_list.append(awsthread.AWSThread("ec2-nat-gateways", compute.get_ngw_inventory, ownerId))
thread_list.append(awsthread.AWSThread("ec2-subnets", compute.get_subnet_inventory, ownerId))
thread_list.append(awsthread.AWSThread("ec2-eips", compute.get_eips_inventory, ownerId))
thread_list.append(awsthread.AWSThread("ec2-egpus", compute.get_egpus_inventory, ownerId))
#
# ----------------- Lambda functions
#
if ('lambda' in arguments):
thread_list.append(awsthread.AWSThread("lambda", compute.get_lambda_inventory, ownerId))
#
# ----------------- Elastic beanstalk
#
if ('elasticbeanstalk' in arguments):
thread_list.append(awsthread.AWSThread("elasticbeanstalk-environments", compute.get_elasticbeanstalk_environments_inventory, ownerId))
thread_list.append(awsthread.AWSThread("elasticbeanstalk-applications", compute.get_elasticbeanstalk_applications_inventory, ownerId))
#
# ----------------- ECS
#
if ('ecs' in arguments):
thread_list.append(awsthread.AWSThread("ecs-clusters", compute.get_ecs_inventory, ownerId))
thread_list.append(awsthread.AWSThread("ecs-tasks", compute.get_ecs_tasks_inventory, ownerId))
#
# ----------------- Lighstail instances
#
if ('lightsail' in arguments):
thread_list.append(awsthread.AWSThread('lightsail', compute.get_lightsail_inventory, ownerId))
#
# ----------------- Autoscaling
#
if ('autoscaling' in arguments):
thread_list.append(awsthread.AWSThread('autoscaling', compute.get_autoscaling_inventory, ownerId))
#
# ----------------- EKS inventory
#
if ('eks' in arguments):
thread_list.append(awsthread.AWSThread('eks',compute.get_eks_inventory, ownerId))
#
# ----------------- Batch jobs inventory
#
if ('batch' in arguments):
thread_list.append(awsthread.AWSThread('batch', compute.get_batch_inventory, ownerId))
#################################################################
# STORAGE #
#################################################################
#
# ----------------- EFS inventory
#
if ('efs' in arguments):
thread_list.append(awsthread.AWSThread('efs', storage.get_efs_inventory, ownerId))
#
# ----------------- Glacier inventory
#
if ('glacier' in arguments):
thread_list.append(awsthread.AWSThread('glacier', storage.get_glacier_inventory, ownerId))
#
# ----------------- Storage gateway inventory
#
if ('storagegateway' in arguments):
thread_list.append(awsthread.AWSThread('storagegateway', storage.get_storagegateway_inventory, ownerId))
#################################################################
# DATABASES #
#################################################################
#
# ----------------- RDS inventory
#
if ('rds' in arguments):
thread_list.append(awsthread.AWSThread('rds', db.get_rds_inventory, ownerId))
#
# ----------------- dynamodb inventory
#
if ('dynamodb' in arguments):
thread_list.append(awsthread.AWSThread('dynamodb', db.get_dynamodb_inventory, ownerId))
#
# ----------------- Neptune inventory
#
if ('neptune' in arguments):
thread_list.append(awsthread.AWSThread('neptune', db.get_neptune_inventory, ownerId))
#
# ----------------- Redshift inventory
#
if ('redshift' in arguments):
thread_list.append(awsthread.AWSThread('redshift', db.get_redshift_inventory, ownerId))
#
# ----------------- Elasticache inventory
#
if ('elasticache' in arguments):
thread_list.append(awsthread.AWSThread('elasticache', db.get_elasticache_inventory, ownerId))
#################################################################
# SECURITY & IAM #
#################################################################
#
# ----------------- KMS inventory
#
if ('kms' in arguments):
thread_list.append(awsthread.AWSThread('kms', iam.get_kms_inventory, ownerId))
#
# ----------------- Cloud directory
#
if ('clouddirectory' in arguments):
thread_list.append(awsthread.AWSThread('clouddirectory', security.get_clouddirectory_inventory, ownerId))
#
# ----------------- ACM (Certificates) inventory
#
if ('acm' in arguments):
thread_list.append(awsthread.AWSThread('acm', security.get_acm_inventory, ownerId))
#
# ----------------- ACMPCA (Certificates) inventory Private Certificate Authority
#
if ('acm-pca' in arguments):
thread_list.append(awsthread.AWSThread('acm-pca', security.get_acm_inventory, ownerId))
#
# ----------------- Secrets Manager
#
if ('secrets' in arguments):
thread_list.append(awsthread.AWSThread('secrets', security.get_secrets_inventory, ownerId))
#
# ----------------- Cloud HSM
#
if ('hsm' in arguments):
thread_list.append(awsthread.AWSThread('hsm', security.get_hsm_inventory, ownerId))
#################################################################
# DEVELOPER TOOLS #
#################################################################
#
# ----------------- CodeStar inventory
#
if ('codestar' in arguments):
thread_list.append(awsthread.AWSThread('codestar', dev.get_codestar_inventory, ownerId))
#################################################################
# INTEGRATION #
#################################################################
#
# ----------------- Simple Queue Service inventory
#
if ('sqs' in arguments):
thread_list.append(awsthread.AWSThread('sqs', integ.get_sqs_inventory, ownerId))
#
# ----------------- Amazon MQ inventory
#
if ('mq' in arguments):
thread_list.append(awsthread.AWSThread('mq', integ.get_mq_inventory, ownerId))
#
# ----------------- Simple Notification Serv ice inventory
#
if ('sns' in arguments):
thread_list.append(awsthread.AWSThread('sns', integ.get_sns_inventory, ownerId))
#################################################################
# ANALYTICS #
#################################################################
#
# ----------------- ElasticSearch
#
if ('es' in arguments):
thread_list.append(awsthread.AWSThread('es', analytics.get_es_inventory, ownerId))
#
# ----------------- Cloudsearch
#
if ('cloudsearch' in arguments):
thread_list.append(awsthread.AWSThread('cloudsearch', analytics.get_cloudsearch_inventory, ownerId))
#
# ----------------- Data Pipeline
#
if ('datapipeline' in arguments):
thread_list.append(awsthread.AWSThread('datapipeline', analytics.get_datapipeline_inventory, ownerId))
#
# ----------------- Elastic MapReduce
#
if ('emr' in arguments):
thread_list.append(awsthread.AWSThread('emr', analytics.get_emr_inventory, ownerId))
#################################################################
# MANAGEMENT #
#################################################################
#
# ----------------- CloudFormation
#
if ('cloudformation' in arguments):
thread_list.append(awsthread.AWSThread('cloudformation', mgn.get_cloudformation_inventory, ownerId))
#
# ----------------- CloudTrail
#
if ('cloudtrail' in arguments):
thread_list.append(awsthread.AWSThread('cloudtrail', mgn.get_cloudtrail_inventory, ownerId))
# ----------------- CloudWatch
#
if ('cloudwatch' in arguments):
thread_list.append(awsthread.AWSThread('cloudwatch', mgn.get_cloudwatch_inventory, ownerId))
#################################################################
# NETWORK #
#################################################################
#
# ----------------- API Gateway inventory
#
if ('apigateway' in arguments):
thread_list.append(awsthread.AWSThread('apigateway', net.get_apigateway_inventory, ownerId))
#
# ----------------- Route 53 inventory
#
if ('route53' in arguments):
thread_list.append(awsthread.AWSThread('route53', net.get_route53_inventory, ownerId))
#
# ----------------- CloudFront inventory
#
if ('cloudfront' in arguments):
thread_list.append(awsthread.AWSThread('cloudfront', net.get_cloudfront_inventory, ownerId))
#
# ----------------- Load Balancer inventory
#
if ('elb' in arguments):
thread_list.append(awsthread.AWSThread('elb', net.get_elb_inventory, ownerId))
#
# ----------------- Load Balancer v2 inventory
#
if ('elbv2' in arguments):
thread_list.append(awsthread.AWSThread('elbv2', net.get_elbv2_inventory, ownerId))
#################################################################
# BUSINESS PRODUCTIVITY #
#################################################################
#
# ----------------- Alexa for Business
#
if ('alexa' in arguments):
thread_list.append(awsthread.AWSThread('alexa', bus.get_alexa_inventory, ownerId))
#
# ----------------- WorkDocs (not implemented)
#
if ('workdocs' in arguments):
thread_list.append(awsthread.AWSThread('workdocs', bus.get_workdocs_inventory, ownerId))
#
# ----------------- Workmail (not well tested, some rights issues)
#
if ('workmail' in arguments):
thread_list.append(awsthread.AWSThread('workmail', bus.get_workmail_inventory, ownerId))
#
# ----------------- Cost Explorer (experimental)
#
if ('ce' in arguments):
ce_inventory = []
utils.display(ownerId, 'global', "cost explorer inventory", "")
list_ce = fact.get_ce_inventory(ownerId, None).get('ResultsByTime')
for item in list_ce:
ce_inventory.append(json.loads(utils.json_datetime_converter(item)))
inventory['cost-explorer'] = ce_inventory
#################################################################
# International Resources (no region) #
#################################################################
region_name = 'global'
#
# ----------------- S3 quick inventory
#
if ('s3' in arguments):
thread_list.append(awsthread.AWSThread('s3', storage.get_s3_inventory, ownerId))
# -------------------------------------------------------------------
#
# Thread management
#
# -------------------------------------------------------------------
for th in thread_list:
th.start()
for th in thread_list:
th.join()
#
# ----------------- Gathering all inventories
#
for svc in arguments:
# Some particular cases
if (svc == "ec2"):
inventory["ec2"] = config.global_inventory["ec2"]
inventory["ec2-network-interfaces"] = config.global_inventory["ec2-network-interfaces"]
inventory["ec2-ebs"] = config.global_inventory["ec2-ebs"]
inventory["ec2-vpcs"] = config.global_inventory["ec2-vpcs"]
inventory["ec2-security-groups"] = config.global_inventory["ec2-security-groups"]
inventory["ec2-internet-gateways"] = config.global_inventory["ec2-internet-gateways"]
inventory["ec2-nat-gateways"] = config.global_inventory["ec2-nat-gateways"]
inventory["ec2-subnets"] = config.global_inventory["ec2-subnets"]
inventory["ec2-eips"] = config.global_inventory["ec2-eips"]
inventory["ec2-egpu"] = config.global_inventory["ec2-egpus"]
elif (svc == "ecs"):
inventory["ecs"] = {
"ecs-clusters": config.global_inventory["ecs-clusters"],
"ecs-tasks": config.global_inventory["ecs-tasks"]
}
elif (svc == "elasticbeanstalk"):
inventory["elasticbeanstalk"] = {
"elasticbeanstalk-environments": config.global_inventory["elasticbeanstalk-environments"],
"elasticbeanstalk-applications": config.global_inventory["elasticbeanstalk-applications"]
}
else:
# General case
inventory[svc] = config.global_inventory[svc]
execution_time = time.time() - t0
print("\n\nAll inventories are done. Duration: {:2f} seconds\n".format(execution_time))
#
# ----------------- Final inventory
#
filename_json = 'AWS_{}_{}.json'.format(ownerId, config.timestamp)
try:
json_file = open(config.filepath+filename_json,'w+')
except IOError as e:
config.logger.error("I/O error({0}): {1}".format(e.errno, e.strerror))
json_file.write(json.JSONEncoder().encode(inventory))
## Put JSON on Consul
conn.put('aws', json.JSONEncoder().encode(inventory))
#
# EOF
#
json_file.close()
#
#--- Json Converter Json to CSV
#
# This is the end
#
print("End of processing.\n")
|
StarcoderdataPython
|
11338467
|
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
import unittest
import os
from pyiron.base.project.generic import Project
class TestGenericJob(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.file_location = os.path.dirname(os.path.abspath(__file__))
cls.project = Project(os.path.join(cls.file_location, "jobs_testing"))
@classmethod
def tearDownClass(cls):
file_location = os.path.dirname(os.path.abspath(__file__))
project = Project(os.path.join(file_location, "jobs_testing"))
project.remove(enforce=True)
# def test_generic_jobs(self):
# ham = self.project.create_job("ExampleJob", "job_single")
# job_ser = self.project.create_job("GenericMaster", "job_list")
# job_ser.append(ham)
# job_ser.to_hdf()
# job_ser_reload = self.project.create_job("GenericMaster", "job_list")
# job_ser_reload.from_hdf()
# self.assertTrue(job_ser_reload['job_single/input/input_inp'])
# job_ser.remove()
# ham.remove()
#
# def test_generic_jobs_ex(self):
# ham = self.project.create_job("ExampleJob", "job_single_ex")
# ham.to_hdf()
# job_ser = self.project.create_job("GenericMaster", "job_list_ex")
# job_ser.append(ham)
# job_ser.to_hdf()
# self.assertTrue(job_ser['job_single_ex/input/input_inp'])
# job_ser_reload = self.project.create_job("GenericMaster", "job_list_ex")
# job_ser_reload.from_hdf()
# self.assertTrue(job_ser_reload['job_single_ex/input/input_inp'])
# job_ser.remove()
# ham.remove()
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
5107983
|
import os
import os.path as osp
import argparse
import glob
import lmdb
import pickle
import random
import cv2
import numpy as np
from tqdm import tqdm
import sys
sys.path.append(os.getcwd())
from codes.utils.base_utils import recompute_hw
def create_lmdb(dataset, raw_dir, lmdb_dir, filter_file='', downscale_factor=-1, compress=False):
# assert dataset in ('VimeoTecoGAN', 'VimeoTecoGAN-sub', 'Actors'), \
# 'Unknown Dataset: {}'.format(dataset)
print('Creating lmdb dataset: {}'.format(dataset))
# retrieve sequences
if filter_file: # dump selective data into lmdb
with open(filter_file, 'r') as f:
seq_dir_lst = sorted([line.strip() for line in f])
else:
seq_dir_lst = [sorted(os.listdir(rd)) for rd in raw_dir]
counter = 0
for i, _ in enumerate(raw_dir):
counter += len(seq_dir_lst[i])
print('Number of sequences: {}'.format(counter))
# compute space to allocate
print(len(seq_dir_lst))
print('Calculating space needed for LMDB generation ... ', end='')
nbytes = 0
extension = 'jpg' if dataset.startswith('Actors') else 'png'
for i, rd in enumerate(raw_dir):
for seq_dir in seq_dir_lst[i]:
if dataset.startswith('Actors'):
seq_dir = seq_dir + '/frames'
frm_path_lst = sorted(glob.glob(osp.join(rd, seq_dir, ('*.' + extension))))
img = cv2.imread(frm_path_lst[0], cv2.IMREAD_UNCHANGED)
if downscale_factor > 0:
h, w = img.shape[:2]
h, w = recompute_hw(h, w, factor=downscale_factor)
img = cv2.resize(img, dsize=(w, h))
nbytes_per_frm = img.nbytes
nbytes += len(frm_path_lst)*nbytes_per_frm
alloc_size = round(1.2*nbytes)
print('{:.2f} GB'.format(alloc_size / (1 << 30)))
# create lmdb environment
env = lmdb.open(lmdb_dir, map_size=alloc_size)
# write data to lmdb
commit_freq = 300
keys = []
txn = env.begin(write=True)
count = 0
for i, rd in enumerate(raw_dir):
for b, seq_dir in enumerate(seq_dir_lst[i]):
if dataset.startswith('Actors'):
seq_dir = seq_dir + '/frames'
# log
print('Processing {} ({}/{})\r'.format(
seq_dir, b, len(seq_dir_lst)), end='')
# setup
frm_path_lst = sorted(glob.glob(osp.join(rd, seq_dir, ('*.' + extension))))
n_frm = len(frm_path_lst)
# read frames
for i in tqdm(range(n_frm)):
count += 1
frm = cv2.imread(frm_path_lst[i], cv2.IMREAD_UNCHANGED)
if downscale_factor > 0:
h, w = frm.shape[:2]
h, w = recompute_hw(h, w, factor=downscale_factor)
frm = cv2.resize(frm, dsize=(w, h))
frm = np.ascontiguousarray(frm[..., ::-1]) # hwc|rgb|uint8
h, w, c = frm.shape
key = '{}_{}x{}x{}_{:04d}'.format(seq_dir, n_frm, h, w, i)
if compress:
frm = cv2.imencode('.jpg', frm)[1]
txn.put(key.encode('ascii'), frm)
keys.append(key)
# commit
if count % commit_freq == 0:
txn.commit()
txn = env.begin(write=True)
txn.commit()
env.close()
# create meta information
meta_info = {
'name': dataset,
'keys': keys
}
pickle.dump(meta_info, open(osp.join(lmdb_dir, 'meta_info.pkl'), 'wb'))
def check_lmdb(dataset, lmdb_dir, display=False, compress=False):
extension = 'jpg' if dataset.startswith('Actors') else 'png'
def visualize(win, img):
if display:
cv2.namedWindow(win, 0)
cv2.resizeWindow(win, img.shape[-2], img.shape[-3])
cv2.imshow(win, img[..., ::-1])
cv2.waitKey(0)
cv2.destroyAllWindows()
else:
cv2.imwrite('_'.join(win.split('/')) + '.' + extension, img[..., ::-1])
# assert dataset in ('VimeoTecoGAN', 'VimeoTecoGAN-sub', 'Actors'), \
# 'Unknown Dataset: {}'.format(dataset)
print('Checking lmdb dataset: {}'.format(dataset))
# load keys
meta_info = pickle.load(open(osp.join(lmdb_dir, 'meta_info.pkl'), 'rb'))
keys = meta_info['keys']
print('Number of keys: {}'.format(len(keys)))
# randomly select frame to visualize
with lmdb.open(lmdb_dir) as env:
for i in range(3): # replace to whatever you want
idx = random.randint(0, len(keys) - 1)
key = keys[idx]
# parse key
key_lst = key.split('_')
vid, sz, frm = '_'.join(key_lst[:-2]), key_lst[-2], key_lst[-1]
sz = tuple(map(int, sz.split('x')))
sz = (*sz[1:], 3)
print('video index: {} | size: {} | # of frame: {}'.format(
vid, sz, frm))
with env.begin() as txn:
buf = txn.get(key.encode('ascii'))
val = np.frombuffer(buf, dtype=np.uint8)
if compress:
val = cv2.imdecode(val, cv2.IMREAD_UNCHANGED)
print(val.shape)
val = val.reshape(*sz) # HWC
visualize(key, val)
if __name__ == '__main__':
# parse
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, required=True,
help='VimeoTecoGAN or VimeoTecoGAN-sub')
parser.add_argument('--data_type', type=str, required=True,
help='GT or Bicubic4xLR')
parser.add_argument('--actors', nargs='+', type=str, default='',
help="list of actors to process")
parser.add_argument('--downscale_factor', type=int, default=-1)
parser.add_argument('--compress', action='store_true')
parser.set_defaults(compress=False)
parser.add_argument('--group', dest='group', action='store_true')
parser.add_argument('--separate', dest='group', action='store_false')
parser.set_defaults(group=False)
args = parser.parse_args()
# setup
if args.dataset.startswith('Actors'):
if args.group:
if not len(args.actors):
paths = ['data/{}/train/*/{}/'.format(args.dataset, args.data_type)]
additional_name = ''
else:
paths = [
'data/{}/train/{}/{}/'.format(args.dataset, actor, args.data_type)
for actor in args.actors
]
additional_name = '_'.join(args.actors)
raw_dir_list = []
for path in paths:
raw_dir_list.extend(glob.glob(path))
raw_dir_list = [raw_dir_list]
lmdb_dir_list = ['data/{}/train/{}{}.lmdb'.format(args.dataset, args.data_type, additional_name)]
filter_file_list = ['']
else:
if not len(args.actors):
actor_list = glob(data_path + '/*/')
actor_list = [osp.basename(actor) for actor in args.actors]
raw_dir_list = [['data/{}/train/{}/{}'.format(args.dataset, actor, args.data_type)] for actor in args.actors]
lmdb_dir_list = ['data/{}/train/{}/{}.lmdb'.format(args.dataset, actor, args.data_type) for actor in args.actors]
filter_file_list = ['' for _ in range(len(args.actors))]
else:
raw_dir_list = [['data/{}/{}'.format(args.dataset, args.data_type)]]
lmdb_dir_list = [['data/{}/{}.lmdb'.format(args.dataset, args.data_type)]]
filter_file_list = ['']
# run
for raw_dir, lmdb_dir, filter_file in zip(raw_dir_list, lmdb_dir_list, filter_file_list):
print(lmdb_dir)
if osp.exists(lmdb_dir):
print('Dataset [{}] already exists'.format(args.dataset))
print('Checking the LMDB dataset ...')
check_lmdb(args.dataset, lmdb_dir, compress=args.compress)
else:
create_lmdb(args.dataset, raw_dir, lmdb_dir, filter_file,
downscale_factor=args.downscale_factor, compress=args.compress)
print('Checking the LMDB dataset ...')
check_lmdb(args.dataset, lmdb_dir, compress=args.compress)
|
StarcoderdataPython
|
3519800
|
<gh_stars>0
import numpy as np
from \
kgmk.dsa.algebra.modular \
.matrix.jit \
import (
mdot,
matpow,
)
def test():
mod = 10 ** 9 + 7
a = np.arange(
1,
10001,
).reshape((100, 100))
a = matpow(a, 1 << 8, mod)
print(a)
a = matpow(a, 0, mod)
print(a)
if __name__ == '__main__':
test()
|
StarcoderdataPython
|
6416574
|
from typing import List
from pydantic import validator
from fastapi import HTTPException
from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY
from fidesops.schemas.base_class import BaseSchema
from fidesops.api.v1.scope_registry import SCOPE_REGISTRY
class UserPermissionsCreate(BaseSchema):
"""Data required to create a FidesopsUserPermissions record"""
scopes: List[str]
@validator("scopes")
def validate_scopes(cls, scopes: List[str]) -> List[str]:
"""Validates that all incoming scopes are valid"""
diff = set(scopes).difference(set(SCOPE_REGISTRY))
if len(diff) > 0:
raise HTTPException(
status_code=HTTP_422_UNPROCESSABLE_ENTITY,
detail=f"Invalid Scope(s) {diff}. Scopes must be one of {SCOPE_REGISTRY}.",
)
return scopes
class UserPermissionsEdit(UserPermissionsCreate):
"""Data required to edit a FidesopsUserPermissions record"""
id: str
class UserPermissionsResponse(UserPermissionsCreate):
"""Response after creating, editing, or retrieving a FidesopsUserPermissions record"""
id: str
user_id: str
|
StarcoderdataPython
|
392611
|
""" New Providers Command """
from masonite.commands import BaseScaffoldCommand
class ProviderCommand(BaseScaffoldCommand):
"""
Creates a new Service Provider
provider
{name : Name of the Service Provider you want to create}
"""
scaffold_name = 'Service Provider'
base_directory = 'app/providers/'
template = '/masonite/snippets/scaffold/provider'
|
StarcoderdataPython
|
6630348
|
# Global paths
glob_lib_paths = [r'C:\Git\pyDMPC\pyDMPC\ModelicaModels\ModelicaModels',
r'C:\Git\modelica-buildings\Buildings',
r'C:\Git\AixLib\AixLib']
glob_res_path = r'C:\TEMP\Dymola'
glob_dym_path = r'C:\Program Files\Dymola 2018 FD01\Modelica\Library\python_interface\dymola.egg'
# Working directory
import time
timestr = time.strftime("%Y%m%d_%H%M%S")
name_wkdir = r'pyDMPC_' + 'wkdir' + timestr
# Controlled system
contr_sys_typ = "Modelica"
ads_id = '5.59.199.202.1.1'
ads_port = 851
name_fmu = 'pyDMPCFMU_Geo.fmu'
orig_fmu_path = glob_res_path + '\\' + name_fmu
dest_fmu_path = glob_res_path + '\\' + name_wkdir + '\\' + name_fmu
time_incr = 120
# States
inputs = []
input_names = []
traj_points = []
input_variables = []
commands = []
command_variables = []
output_names = []
set_points = []
state_var_names = []
model_state_var_names = []
traj_var = []
# Times
start = []
stop = []
incr = []
opt_time = []
samp_time = []
# Paths
lib_paths = []
res_path = []
dym_path = []
mod_path = []
command_names = []
# Modifiers
cost_fac = []
# Variation
min_var = []
max_var = []
inc_var = []
# Subsystem Config
model_type = []
name = []
sys_id = []
ups_neigh = []
downs_neigh = []
par_neigh = []
# Subsystems
sys_id.append(0)
name.append("Field")
model_type.append("Modelica")
ups_neigh.append(1)
downs_neigh.append(None)
par_neigh.append(None)
input_names.append(["returnTemperature.T"])
input_variables.append(["external"])
inputs.append([])
output_names.append(["returnTemperature.T"])
set_points.append([287])
state_var_names.append(["supplyTemperatureMeas"])
model_state_var_names.append(["vol.T_start"])
start.append(0.)
stop.append(3600.0*24*365.25*3)
incr.append(3600.)
opt_time.append(10800)
samp_time.append(10)
lib_paths.append(glob_lib_paths)
res_path.append(glob_res_path + "\\" + name_wkdir)
dym_path.append(glob_dym_path)
mod_path.append(r'ModelicaModels.SubsystemModels.DetailedModels.Geo.Field')
command_names.append(["heatShare"])
command_variables.append(["decisionVariables.table[1,2]"])
commands.append(range(0,105,5))
traj_points.append(range(278,310,1))
traj_var.append(["supplyTemperature.T"])
cost_fac.append([0.0, 0.0, 1.0])
sys_id.append(1)
name.append("Building")
model_type.append("Modelica")
ups_neigh.append(None)
downs_neigh.append([0])
par_neigh.append(None)
input_names.append(["supplyTemperature.T"])
input_variables.append([r"variation.table[1,2]"])
inputs.append(range(280,310,5))
output_names.append(["returnTemperature"])
set_points.append([287])
state_var_names.append(["sine.y"])
model_state_var_names.append(["const.k"])
start.append(0.)
stop.append(7200.)
incr.append(10.)
opt_time.append(600)
samp_time.append(10)
lib_paths.append(glob_lib_paths)
res_path.append(glob_res_path + "\\" + name_wkdir)
dym_path.append(glob_dym_path)
mod_path.append(r'ModelicaModels.SubsystemModels.DetailedModels.Geo.Building')
command_names.append([])
command_variables.append(["decisionVariables.table[1,2]"])
commands.append(range(0,105,5))
traj_points.append([])
traj_var.append([])
cost_fac.append([-0.01, 1.0, 0.0])
|
StarcoderdataPython
|
1619945
|
#desafio 8: conversor de medidas
m = float(input('Digite um valor em metros: '))
km = m / 1000
hm = m / 100
dam = m / 10
dm = m * 10
cm = m * 100
mm = m * 1000
print(f'A medida de {m}m corresponde a: \n {km:.5}km \n {hm}hm \n {dam}dam \n {dm}dm \n {cm:.0f}cm \n {mm:.0f}mm')
|
StarcoderdataPython
|
11205847
|
#!/usr/bin/env python2
# -- coding: utf-8 --
import urllib, os, json, datetime, requests, urlparse
import utils
url = utils.API_URL
token = utils.get_api_key()
headers = utils.get_headers(token)
user = raw_input('Username to export (case sensitive):')
# user = "USERNAME" #<--- Put username here. Case sensitive.
# request_pks = [6996] #<--- To export by MR number
page = 1
next_ = url+"foia/?user="+user
while next_ is not None: # Handling at the page level
r = requests.get(next_, headers=headers)
try:
json_data = r.json()
print 'Page %d of %d' % (page, json_data['count'] / 20 + 1)
next_ = json_data['next']
for request in json_data['results']:
print "Working on request " + str(request["id"])
request_url = url + 'foia/%d/' % request["id"]
r2 = requests.get(request_url, headers=headers) #original
request_data = r2.json()
print "Here is the agency number " + str(request_data["agency"])
agency = requests.get("https://www.muckrock.com/api_v1/agency/" + str(request_data["agency"]), headers=headers)
agency_data = agency.json()
# get communications third
communications = request_data['communications']
if communications is None:
print "No communications for request #%d." % request_data["id"]
if not os.path.exists(str(request_data["id"])): # Checks to see if the folder exists.
username = requests.get(url + 'user/%d/' % request_data['user'], headers=headers).json()['username']
print username
dirName = username + '_' + agency_data['name'] + '_' + request_data['tracking_id']
# TODO better sanitization on directory names
print "Creating directory " + dirName
dirName = dirName.replace(";", "") # to sanitize it from semi-colons
dirName = dirName.replace(":", "") # to sanitize it from colons
os.makedirs(dirName)
else:
print "The directory already exists. Phew."
for communication in communications:
#print communication
for file in communication['files']:
fileurl = file['ffile']
split = urlparse.urlsplit(fileurl) # grabbed from http://stackoverflow.com/questions/2795331/python-download-without-supplying-a-filename
filename = split.path.split("/")[-1]
filename = str(communication["date"])+" "+filename
filename = filename.replace(";", "") # to sanitize it from semi-colons
filename = filename.replace(":", "") # to sanitize it from colons
urllib.urlretrieve(fileurl, dirName + '/' + filename)
print "Trying to grab the text from the communication"
# eventually this should save to pdf, maybe using this: https://github.com/mstamy2/PyPDF2/tree/master/Sample_Code
communicationText = communication["communication"].encode('ascii', 'ignore')
text_file = open(dirName + '/' + communication["date"] + " Communication.txt", "w+")
text_file.write(communicationText)
text_file.close()
print "File closed"
except:
print "There was an error of unkown origin"
# print r
# print r.text
|
StarcoderdataPython
|
1877379
|
<reponame>FynnBe/typed-argparse
# type: ignore
import json
from pathlib import Path
from setuptools import find_namespace_packages, setup
# Get the long description from the README file
ROOT_DIR = Path(__file__).parent.resolve()
long_description = (ROOT_DIR / "README.md").read_text(encoding="utf-8")
VERSION_FILE = ROOT_DIR / "dataclass_argparse" / "VERSION"
VERSION = json.loads(VERSION_FILE.read_text(encoding="utf-8"))["version"]
setup(
name="dataclass-argparse",
version=VERSION,
description="argparse with a type annotated namespace dataclass",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/fynnbe/dataclass-argparse",
author="<NAME>",
classifiers=[ # Optional
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
packages=find_namespace_packages(exclude=["tests"]), # Required
install_requires=[],
extras_require={"test": ["mypy"], "dev": ["pre-commit"]},
include_package_data=True,
project_urls={ # Optional
"Bug Reports": "https://github.com/fynnbe/dataclass-argparse/issues",
"Source": "https://github.com/fynnbe/dataclass-argparse",
},
)
|
StarcoderdataPython
|
8152177
|
#!/usr/bin/python
import os
import sys
import math
import struct
import socket #for sockets
import shutil
time = 0
scoreLeft = 0
scoreRight = 0
agentsLeftStart = [False]*11
agentsRightStart = [False]*11
agentsLeftExisted = [False]*11
agentsRightExisted = [False]*11
agentsLeftHere = [False]*11
agentsRightHere = [False]*11
if len(sys.argv) < 2:
print "Usage: gameMonitor.py <output_file> [host] [port]"
sys.exit()
outputFile = sys.argv[1]
outputFile = os.path.dirname(outputFile) + "/" + os.path.basename(outputFile)
print outputFile
host = "localhost"
if len(sys.argv) > 2:
host = sys.argv[2]
port = 3200
if len(sys.argv) > 3:
port = int(sys.argv[3])
try:
#create an AF_INET, STREAM socket (TCP)
sserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket.error, msg:
print 'Failed to create socket. Error code: ' + str(msg[0]) + ' , Error message : ' + msg[1]
sys.exit();
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
host = socket.gethostbyname( host )
except socket.gaierror:
#could not resolve
print 'Hostname could not be resolved. Exiting'
sys.exit()
#Connect to remote server
sserver.connect((host, port))
reqfullstate = True
while True:
msgSize = sserver.recv(4)
msgSize = struct.unpack("!L", msgSize)
msgFromServer = sserver.recv(msgSize[0])
msgFromServer = msgFromServer.replace('(', ' ').replace(')', ' ')
timeIndex = msgFromServer.find("time")
if timeIndex != -1:
tokens = msgFromServer[timeIndex:].split()
time = float(tokens[1])
scoreLeftIndex = msgFromServer.find("score_left")
if scoreLeftIndex != -1:
tokens = msgFromServer[scoreLeftIndex:].split()
scoreLeft = int(tokens[1])
scoreRightIndex = msgFromServer.find("score_right")
if scoreRightIndex != -1:
tokens = msgFromServer[scoreRightIndex:].split()
scoreRight = int(tokens[1])
if msgFromServer.find("RSG") != -1:
agentsLeftHere = [False]*11
agentsRightHere = [False]*11
for i in range(1,12):
if msgFromServer.find("Num" + str(i) + " matLeft") != -1:
if time == 0:
agentsLeftStart[i-1] = True
agentsLeftExisted[i-1] = True
agentsLeftHere[i-1] = True
if msgFromServer.find("Num" + str(i) + " matRight") != -1:
if time == 0:
agentsRightStart[i-1] = True
agentsRightExisted[i-1] = True
agentsRightHere[i-1] = True
if time >= 300.0:
#65479 is a message length when a message is truncated in error
if scoreLeftIndex == -1 or scoreRightIndex == -1 or reqfullstate or len(msgFromServer) == 65479:
msg = "(reqfullstate)"
try :
#Set the whole string
sserver.send(struct.pack("!I", len(msg)) + msg)
reqfullstate = False
except socket.error:
#Send failed
print 'Send failed'
continue
allCrashedLeft = True
allCrashedRight = True
f = open(outputFile, 'w')
scoreMe = scoreLeft
scoreOpp = scoreRight
'''
if os.path.basename(outputFile).find("_right") != -1:
scoreMe = scoreRight
scoreOpp = scoreLeft
'''
f.write("score = " + str(scoreMe) + " " + str(scoreOpp) + "\n")
fMissing = False
fCrash = False
for i in range(len(agentsLeftStart)):
if not agentsLeftExisted[i]:
f.write("missing_left " + str(i+1) + "\n")
fMissing = True
elif not agentsLeftStart[i]:
f.write("late_left " + str(i+1) + "\n")
fMissing = True
if not agentsLeftHere[i] and agentsLeftExisted[i]:
f.write("crash_left " + str(i+1) + "\n")
fCrash = True
elif agentsLeftExisted[i]:
allCrashedLeft = False
if not agentsRightExisted[i]:
f.write("missing_right " + str(i+1) + "\n")
fMissing = True
elif not agentsRightStart[i]:
f.write("late_right " + str(i+1) + "\n")
fMissing = True
if not agentsRightHere[i] and agentsRightExisted[i]:
f.write("crash_right " + str(i+1) + "\n")
fCrash = True
elif agentsRightExisted[i]:
allCrashedRight = False
if allCrashedLeft:
f.write("all_crashed_left\n")
if allCrashedRight:
f.write("all_crashed_right\n")
#f.write(str(msgSize) + "\n")
if fMissing or fCrash:
f.write("host " + socket.getfqdn(host) + "\n")
f.close()
break
sserver.close()
|
StarcoderdataPython
|
1935122
|
<filename>Lib/distutils/version.py
#
# distutils/version.py
#
# Implements multiple version numbering conventions for the
# Python Module Distribution Utilities.
#
# written by <NAME>, 1998/12/17
#
# $Id$
#
"""Provides classes to represent module version numbers (one class for
each style of version numbering). There are currently two such classes
implemented: StrictVersion and LooseVersion.
Every version number class implements the following interface:
* the 'parse' method takes a string and parses it to some internal
representation; if the string is an invalid version number,
'parse' raises a ValueError exception
* the class constructor takes an optional string argument which,
if supplied, is passed to 'parse'
* __str__ reconstructs the string that was passed to 'parse' (or
an equivalent string -- ie. one that will generate an equivalent
version number instance)
* __repr__ generates Python code to recreate the version number instance
* __cmp__ compares the current instance with either another instance
of the same class or a string (which will be parsed to an instance
of the same class, thus must follow the same rules)
"""
import string, re
from types import StringType
class Version:
"""Abstract base class for version numbering classes. Just provides
constructor (__init__) and reproducer (__repr__), because those
seem to be the same for all version numbering classes.
"""
def __init__ (self, vstring=None):
if vstring:
self.parse (vstring)
def __repr__ (self):
return "%s ('%s')" % (self.__class__.__name__, str (self))
# Interface for version-number classes -- must be implemented
# by the following classes (the concrete ones -- Version should
# be treated as an abstract class).
# __init__ (string) - create and take same action as 'parse'
# (string parameter is optional)
# parse (string) - convert a string representation to whatever
# internal representation is appropriate for
# this style of version numbering
# __str__ (self) - convert back to a string; should be very similar
# (if not identical to) the string supplied to parse
# __repr__ (self) - generate Python code to recreate
# the instance
# __cmp__ (self, other) - compare two version numbers ('other' may
# be an unparsed version string, or another
# instance of your version class)
class StrictVersion (Version):
"""Version numbering for anal retentives and software idealists.
Implements the standard interface for version number classes as
described above. A version number consists of two or three
dot-separated numeric components, with an optional "pre-release" tag
on the end. The pre-release tag consists of the letter 'a' or 'b'
followed by a number. If the numeric components of two version
numbers are equal, then one with a pre-release tag will always
be deemed earlier (lesser) than one without.
The following are valid version numbers (shown in the order that
would be obtained by sorting according to the supplied cmp function):
0.4 0.4.0 (these two are equivalent)
0.4.1
0.5a1
0.5b3
0.5
0.9.6
1.0
1.0.4a3
1.0.4b1
1.0.4
The following are examples of invalid version numbers:
1
2.7.2.2
1.3.a4
1.3pl1
1.3c4
The rationale for this version numbering system will be explained
in the distutils documentation.
"""
version_re = re.compile (r'^(\d+) \. (\d+) (\. (\d+))? ([ab](\d+))?$',
re.VERBOSE)
def parse (self, vstring):
match = self.version_re.match (vstring)
if not match:
raise ValueError, "invalid version number '%s'" % vstring
(major, minor, patch, prerelease, prerelease_num) = \
match.group (1, 2, 4, 5, 6)
if patch:
self.version = tuple (map (string.atoi, [major, minor, patch]))
else:
self.version = tuple (map (string.atoi, [major, minor]) + [0])
if prerelease:
self.prerelease = (prerelease[0], string.atoi (prerelease_num))
else:
self.prerelease = None
def __str__ (self):
if self.version[2] == 0:
vstring = string.join (map (str, self.version[0:2]), '.')
else:
vstring = string.join (map (str, self.version), '.')
if self.prerelease:
vstring = vstring + self.prerelease[0] + str (self.prerelease[1])
return vstring
def __cmp__ (self, other):
if isinstance (other, StringType):
other = StrictVersion (other)
compare = cmp (self.version, other.version)
if (compare == 0): # have to compare prerelease
# case 1: neither has prerelease; they're equal
# case 2: self has prerelease, other doesn't; other is greater
# case 3: self doesn't have prerelease, other does: self is greater
# case 4: both have prerelease: must compare them!
if (not self.prerelease and not other.prerelease):
return 0
elif (self.prerelease and not other.prerelease):
return -1
elif (not self.prerelease and other.prerelease):
return 1
elif (self.prerelease and other.prerelease):
return cmp (self.prerelease, other.prerelease)
else: # numeric versions don't match --
return compare # prerelease stuff doesn't matter
# end class StrictVersion
# The rules according to <NAME>:
# 1) a version number has 1 or more numbers separate by a period or by
# sequences of letters. If only periods, then these are compared
# left-to-right to determine an ordering.
# 2) sequences of letters are part of the tuple for comparison and are
# compared lexicographically
# 3) recognize the numeric components may have leading zeroes
#
# The LooseVersion class below implements these rules: a version number
# string is split up into a tuple of integer and string components, and
# comparison is a simple tuple comparison. This means that version
# numbers behave in a predictable and obvious way, but a way that might
# not necessarily be how people *want* version numbers to behave. There
# wouldn't be a problem if people could stick to purely numeric version
# numbers: just split on period and compare the numbers as tuples.
# However, people insist on putting letters into their version numbers;
# the most common purpose seems to be:
# - indicating a "pre-release" version
# ('alpha', 'beta', 'a', 'b', 'pre', 'p')
# - indicating a post-release patch ('p', 'pl', 'patch')
# but of course this can't cover all version number schemes, and there's
# no way to know what a programmer means without asking him.
#
# The problem is what to do with letters (and other non-numeric
# characters) in a version number. The current implementation does the
# obvious and predictable thing: keep them as strings and compare
# lexically within a tuple comparison. This has the desired effect if
# an appended letter sequence implies something "post-release":
# eg. "0.99" < "0.99pl14" < "1.0", and "5.001" < "5.001m" < "5.002".
#
# However, if letters in a version number imply a pre-release version,
# the "obvious" thing isn't correct. Eg. you would expect that
# "1.5.1" < "1.5.2a2" < "1.5.2", but under the tuple/lexical comparison
# implemented here, this just isn't so.
#
# Two possible solutions come to mind. The first is to tie the
# comparison algorithm to a particular set of semantic rules, as has
# been done in the StrictVersion class above. This works great as long
# as everyone can go along with bondage and discipline. Hopefully a
# (large) subset of Python module programmers will agree that the
# particular flavour of bondage and discipline provided by StrictVersion
# provides enough benefit to be worth using, and will submit their
# version numbering scheme to its domination. The free-thinking
# anarchists in the lot will never give in, though, and something needs
# to be done to accomodate them.
#
# Perhaps a "moderately strict" version class could be implemented that
# lets almost anything slide (syntactically), and makes some heuristic
# assumptions about non-digits in version number strings. This could
# sink into special-case-hell, though; if I was as talented and
# idiosyncratic as <NAME>, I'd go ahead and implement a class that
# somehow knows that "1.2.1" < "1.2.2a2" < "1.2.2" < "1.2.2pl3", and is
# just as happy dealing with things like "2g6" and "1.13++". I don't
# think I'm smart enough to do it right though.
#
# In any case, I've coded the test suite for this module (see
# ../test/test_version.py) specifically to fail on things like comparing
# "1.2a2" and "1.2". That's not because the *code* is doing anything
# wrong, it's because the simple, obvious design doesn't match my
# complicated, hairy expectations for real-world version numbers. It
# would be a snap to fix the test suite to say, "Yep, LooseVersion does
# the Right Thing" (ie. the code matches the conception). But I'd rather
# have a conception that matches common notions about version numbers.
class LooseVersion (Version):
"""Version numbering for anarchists and software realists.
Implements the standard interface for version number classes as
described above. A version number consists of a series of numbers,
separated by either periods or strings of letters. When comparing
version numbers, the numeric components will be compared
numerically, and the alphabetic components lexically. The following
are all valid version numbers, in no particular order:
1.5.1
1.5.2b2
161
3.10a
8.02
3.4j
1996.07.12
3.2.pl0
3.1.1.6
2g6
11g
0.960923
2.2beta29
1.13++
5.5.kw
2.0b1pl0
In fact, there is no such thing as an invalid version number under
this scheme; the rules for comparison are simple and predictable,
but may not always give the results you want (for some definition
of "want").
"""
component_re = re.compile(r'(\d+ | [a-z]+ | \.)', re.VERBOSE)
def __init__ (self, vstring=None):
if vstring:
self.parse (vstring)
def parse (self, vstring):
# I've given up on thinking I can reconstruct the version string
# from the parsed tuple -- so I just store the string here for
# use by __str__
self.vstring = vstring
components = filter (lambda x: x and x != '.',
self.component_re.split (vstring))
for i in range (len (components)):
try:
components[i] = int (components[i])
except ValueError:
pass
self.version = components
def __str__ (self):
return self.vstring
def __repr__ (self):
return "LooseVersion ('%s')" % str (self)
def __cmp__ (self, other):
if isinstance (other, StringType):
other = LooseVersion (other)
return cmp (self.version, other.version)
# end class LooseVersion
|
StarcoderdataPython
|
6595186
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 13 20:00:24 2019
@author: <NAME>
"""
#coding:utf-8
import os
import requests
import pic_snapshots
import time
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
while(1):
file_name = pic_snapshots.snapshot_fun()
path=os.path.join(BASE_DIR,'snapshots_imgs',file_name)
img = {'image01': (file_name,open(path,'rb'),'image/jpg')}
r = requests.post("http://xx.xx.xx.xx:1234", files = img)
print(file_name)
time.sleep(5)
|
StarcoderdataPython
|
5092147
|
<gh_stars>0
import pytest
from numpy.random import randint, rand
import numpy as np
import scipy.io as sio
from helpers import *
from helpers_jpeg import *
@pytest.fixture(scope="module")
def X():
'''Return the lighthouse image X'''
return sio.loadmat('test_mat/lighthouse.mat')['X'].astype(float)
@pytest.fixture(scope="module")
def jpegout():
'''Return the lighthouse image X'''
return sio.loadmat('test_mat/jpegout.mat')
@pytest.fixture
def jpeg():
'''Return a new jpeghelper class'''
return JpegHuff()
@pytest.fixture
def dwtHuff():
'''Return a new jpeghelper class'''
return DwtHuff()
def test_jpegenc(X, jpeg, jpegout):
'''Test jpegenc with the lighthouse image and qstep=17'''
vlc, _bits, _huffval = jpeg.jpegenc(X-128, 17)
diff = vlc - jpegout['vlc'].astype(int) # index 17548 off by one on Mac
assert (np.array_equal(vlc, jpegout['vlc'].astype(int)) or
(np.where(diff != 0)[0] == np.array(17548) and diff[17548, 0] == -1))
def test_jpegdec(X, jpeg, jpegout):
vlc = jpegout['vlc'].astype(int)
Z = jpeg.jpegdec(vlc, 17)
assert np.allclose(Z, jpegout['Z'].astype(float))
def test_dwtgroup(X, dwtHuff, jpegout):
test = jpegout['test'].astype(float)
tested = dwtHuff.dwtgroup(test, 2)
assert np.array_equal(tested, jpegout['test_dwtgrouped'].astype(float))
test_reverse = dwtHuff.dwtgroup(tested, -2)
assert np.array_equal(test_reverse, test)
|
StarcoderdataPython
|
1711078
|
import cv2 as cv
import numpy as np
from Step_2_normalize_data import normalize_data
from Step_6_load_model import load_model
from Step_7_predict import predict
frame = np.zeros((400, 400, 1))
model, labels = load_model("model")
def do_predict():
global frame, model, labels
image = frame
image = cv.resize(image, (28, 28))
image = np.reshape(image, (28, 28, 1))
cv.imwrite('temp.jpg', image)
X = np.array([image])
X = X / 255
X = normalize_data(X)
p = np.argmax(predict(X, model)[0])
print(p)
cv.waitKey(1000)
frame = np.zeros((400, 400, 1))
def mouse_callback(event, x, y, flags, param):
global frame
if flags == cv.EVENT_FLAG_LBUTTON:
cv.circle(frame, (x, y), 15, (255, 255, 255), -1)
while True:
cv.namedWindow("frame")
cv.setMouseCallback("frame", mouse_callback)
cv.imshow('frame', frame)
k = cv.waitKey(1)
if k in [ord('q'), 27]:
break
if k in [32]:
do_predict()
|
StarcoderdataPython
|
11234288
|
<filename>from_python_community/find_values.py
# Условие:
# Ваша задача — написать функцию, которая принимает неограниченное количество массивов и возвращает только те элементы, что есть в каждом списке.
# Пример:
# find_values([11, 10, 3], [10, 3, 5, 11], [11, 10]) -> [11, 10]
# find_values([8, 4, 7, "hi"], [8, "hi"], [4, "hi"]) -> ['hi']
# find_values([1, 4, 3], [6, 2, 8], ["4", "hi"]) -> []
import unittest
def find_values(*args: list) -> list:
return sorted(list(set(args[0]).intersection(*args)), reverse=True)
class TestFindValues(unittest.TestCase):
def test_one(self):
""" Should return items that are in each list """
self.assertEqual([11, 10], find_values([11, 10, 3], [10, 3, 5, 11], [11, 10]))
self.assertEqual(['hi'], find_values([8, 4, 7, "hi"], [8, "hi"], [4, "hi"]))
self.assertEqual([], find_values([1, 4, 3], [6, 2, 8], ["4", "hi"]))
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
4813081
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import unittest
from unittest.mock import MagicMock
from ..get_exit_nodes import ExitNodeGenerator
from .test_functions import __name__ as qualifier, all_functions
class GetExitNodesTest(unittest.TestCase):
def test_compute_models(self) -> None:
self.maxDiff = None
sink = "TaintSink[ReturnedToUser]"
self.assertEqual(
[
*map(
str,
ExitNodeGenerator(django_urls=MagicMock()).compute_models(
all_functions
),
)
],
[
f"def {qualifier}.TestClass.methodA(self, x) -> {sink}: ...",
f"def {qualifier}.TestClass.methodB(self, *args) -> {sink}: ...",
f"def {qualifier}.testA() -> {sink}: ...",
f"def {qualifier}.testB(x) -> {sink}: ...",
f"def {qualifier}.testC(x) -> {sink}: ...",
f"def {qualifier}.testD(x, *args) -> {sink}: ...",
f"def {qualifier}.testE(x, **kwargs) -> {sink}: ...",
],
)
self.assertEqual(
[
*map(
str,
ExitNodeGenerator(
django_urls=MagicMock(),
whitelisted_views=[f"{qualifier}.TestClass.methodA"],
).compute_models(all_functions),
)
],
[
f"def {qualifier}.TestClass.methodB(self, *args) -> {sink}: ...",
f"def {qualifier}.testA() -> {sink}: ...",
f"def {qualifier}.testB(x) -> {sink}: ...",
f"def {qualifier}.testC(x) -> {sink}: ...",
f"def {qualifier}.testD(x, *args) -> {sink}: ...",
f"def {qualifier}.testE(x, **kwargs) -> {sink}: ...",
],
)
|
StarcoderdataPython
|
8101771
|
<filename>mlnn1hl_script.py<gh_stars>10-100
from sklearn.model_selection import ParameterGrid
from model.main.traditional_ffnn import Mlnn1HL
from utils.IOUtil import read_dataset_file
from utils.SettingPaper import mlnn1hl_paras_final as param_grid
from utils.SettingPaper import ggtrace_cpu, ggtrace_ram, ggtrace_multi_cpu, ggtrace_multi_ram
rv_data = [ggtrace_cpu, ggtrace_ram, ggtrace_multi_cpu, ggtrace_multi_ram]
data_file = ["google_5m", "google_5m", "google_5m", "google_5m"]
test_type = "normal" ### normal: for normal test, stability: for n_times test
run_times = None
if test_type == "normal": ### For normal test
run_times = 1
pathsave = "paper/results/test/"
all_model_file_name = "log_models"
elif test_type == "stability": ### For stability test (n times run with the same parameters)
run_times = 15
pathsave = "paper/results/stability/"
all_model_file_name = "stability_mlnn"
else:
pass
def train_model(item):
root_base_paras = {
"dataset": dataset,
"data_idx": (0.8, 0, 0.2),
"sliding": item["sliding_window"],
"expand_function": item["expand_function"],
"multi_output": requirement_variables[2],
"output_idx": requirement_variables[3],
"method_statistic": 0, # 0: sliding window, 1: mean, 2: min-mean-max, 3: min-median-max
"log_filename": all_model_file_name,
"path_save_result": pathsave + requirement_variables[4],
"test_type": test_type,
"draw": True,
"print_train": 2 # 0: nothing, 1: full details, 2: short
}
root_mlnn1hl_paras = {
"hidden_sizes": item["hidden_sizes"], "epoch": item["epoch"], "batch_size": item["batch_size"],
"lr": item["learning_rate"], "activations": item["activations"], "optimizer": item["optimizer"], "loss": item["loss"]
}
md = Mlnn1HL(root_base_paras=root_base_paras, root_mlnn1hl_paras=root_mlnn1hl_paras)
md._running__()
for _ in range(run_times):
for loop in range(len(rv_data)):
requirement_variables = rv_data[loop]
filename = requirement_variables[0] + data_file[loop] + ".csv"
dataset = read_dataset_file(filename, requirement_variables[1])
# Create combination of params.
for item in list(ParameterGrid(param_grid)):
train_model(item)
|
StarcoderdataPython
|
4870301
|
<filename>django_pages/admin.py
# -*- encoding: utf-8 -*-
"""
This file just imports admins from all packages so Django finds them
"""
from django.contrib import admin
from django_pages.comments.models import Comment
from django_pages.feed.models import FeedSettings
from django_pages.language.models import Language
from django_pages.looks.models import Template
from django_pages.menu.models import Menu, MenuItem
from django_pages.metadata.models import MetaSet, MetaData
from django_pages.pages.models import Page, Post
from django_pages.site.models import Site, Script
from django_pages.comments.admin import CommentAdmin
from django_pages.feed.admin import FeedAdmin
from django_pages.language.admin import LanguageAdmin
from django_pages.looks.admin import LooksAdmin
from django_pages.menu.admin import MenuAdmin, MenuItemAdmin
from django_pages.metadata.admin import MetaSetAdmin, MetaDataAdmin
from django_pages.pages.admin import PageAdmin, PostAdmin
from django_pages.site.admin import SiteAdmin, ScriptAdmin
admin.site.register(Comment, CommentAdmin)
admin.site.register(FeedSettings, FeedAdmin)
admin.site.register(Language, LanguageAdmin)
admin.site.register(Template, LooksAdmin)
admin.site.register(Menu, MenuAdmin)
admin.site.register(MenuItem, MenuItemAdmin)
admin.site.register(MetaSet, MetaSetAdmin)
admin.site.register(MetaData, MetaDataAdmin)
admin.site.register(Page, PageAdmin)
admin.site.register(Post, PostAdmin)
admin.site.register(Site, SiteAdmin)
admin.site.register(Script, ScriptAdmin)
|
StarcoderdataPython
|
4888078
|
import time
from typing import List, Optional, Tuple, Union
import headers
import requests
field: List[List[Optional[Union[int, str]]]] = [[]]
def get_node() -> Tuple[str, str]:
r = requests.post('http://localhost/start')
assert r.status_code == 201, r
print('Got Node')
time.sleep(2)
print('Starting')
return r.headers[headers.LOCATION], r.headers[headers.AUTHORIZATION]
def init_field(node_start: str) -> str:
global field
while True:
try:
width = int(input('Width: '))
height = int(input('Height: '))
mines = int(input('Mines: '))
r = client.post(node_start, params=dict(width=width, height=height, mines=mines))
assert r.status_code == 201, r
field = [[None for _ in range(0, width)] for _ in range(0, height)]
print('Field Done')
return r.headers[headers.LOCATION]
except (ValueError, AssertionError):
print('Failed...')
def print_field():
print(' ' + ' '.join(f'{i}' for i in range(0, len(field[0]))))
for idx, row in enumerate(field):
print(f'{idx}|' + ' '.join(' ' if v is None else f'{v}' for v in row))
def get_input() -> Tuple[bool, int, int]:
while True:
input_ = input('Give (f,)x,y: ')
parts = input_.split(',')
try:
if len(parts) == 3:
f, x, y = parts
x = int(x)
y = int(y)
f = True
else:
x, y = parts
x = int(x)
y = int(y)
f = False
break
except ValueError:
print('Invalid Input')
return f, x, y
def handle_flag(x: int, y: int):
if field[y][x] == 'f':
client.delete(f'{node}flag', params=dict(x=x, y=y))
field[y][x] = None
elif field[y][x] is None:
client.post(f'{node}flag', params=dict(x=x, y=y))
field[y][x] = 'f'
def handle_open(x: int, y: int) -> bool:
r = client.post(f'{node}open', params=dict(x=x, y=y))
if r.status_code == 200:
for s in r.json()['items']:
if s.get('flag', False):
value = 'f'
elif s.get('mine', False):
if s.get('open', False):
value = 'x'
else:
value = '*'
else:
value = s.get('value', None)
field[s['y']][s['x']] = value
if r.json().get('status', 'ongoing') != 'ongoing':
print_field()
print(r.json()['status'])
return True
return False
LOCATION, AUTH = get_node()
with requests.Session() as client:
client.headers[headers.AUTHORIZATION] = AUTH
node = init_field(LOCATION)
while True:
print_field()
f_, x_, y_ = get_input()
if f_:
handle_flag(x_, y_)
elif handle_open(x_, y_):
break
|
StarcoderdataPython
|
8024122
|
import tensorflow as tf
# configure as needed
input_model_dir = "nudenet/default"
frozen_model_dir = "nudenet/frozen"
saved_model_dir = "nudenet/saved"
# from "saved_model_cli show --dir nudenet/default --all"
tag = "serve"
signature = "predict" # tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY is default
input_nodes = ["input_1"]
output_nodes = ["filtered_detections/map/TensorArrayStack/TensorArrayGatherV3", "filtered_detections/map/TensorArrayStack_1/TensorArrayGatherV3", "filtered_detections/map/TensorArrayStack_2/TensorArrayGatherV3"]
# load saved model with variables
# everything is based on tf.compat.v1 since tf v2 handles things totally differently
tf.compat.v1.disable_eager_execution()
sess = tf.compat.v1.Session(graph=tf.Graph())
model = tf.compat.v1.saved_model.loader.load(sess, [tag], input_model_dir)
graph_def = sess.graph.as_graph_def()
# clean it up
clean = tf.compat.v1.graph_util.remove_training_nodes(graph_def)
# convert variables to constants
frozen = tf.compat.v1.graph_util.convert_variables_to_constants(sess, clean, output_nodes)
# write frozen graph if needed for future usage, but can be skipped here since it's already in current session
tf.io.write_graph(frozen, frozen_model_dir, "saved_model.pb", as_text=False)
# rename model input/outputs to expected format
def get_ops_dict(ops, graph, name):
out_dict = dict()
for i, op in enumerate(ops):
out_dict[name + str(i)] = tf.compat.v1.saved_model.build_tensor_info(graph.get_tensor_by_name(op + ':0'))
return out_dict
# finally create a clean saved model
with tf.Graph().as_default() as graph:
tf.python.framework.importer.import_graph_def(frozen, name="")
inputs_dict = get_ops_dict(input_nodes, graph, name='input_')
outputs_dict = get_ops_dict(output_nodes, graph, name='output_')
prediction_signature = (
tf.compat.v1.saved_model.signature_def_utils.build_signature_def(
inputs=inputs_dict,
outputs=outputs_dict,
method_name=tf.saved_model.PREDICT_METHOD_NAME))
legacy_init_op = tf.group(tf.compat.v1.tables_initializer(), name='legacy_init_op')
builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(saved_model_dir)
builder.add_meta_graph_and_variables(
sess,
tags=[tag],
signature_def_map={signature: prediction_signature},
legacy_init_op=legacy_init_op)
builder.save()
|
StarcoderdataPython
|
12801904
|
<reponame>SimonLovskog/SolarManager-HA
"""Constants for the SolarManager integration."""
DOMAIN = "solarmanager"
|
StarcoderdataPython
|
183501
|
<filename>core/lexers/lexers.py<gh_stars>1-10
from PyQt5.Qsci import *
__all__ = ['get_lexer_by_ext', 'set_lexer_by_menu']
LEXERS = {
('AVS', QsciLexerAVS): (),
('Bash', QsciLexerBash): ('sh', 'ksh', 'bash', 'ebuild', 'eclass', 'exheres-0', 'exlib'),
('Batch', QsciLexerBatch): ('cmd', 'btm'),
('Cmake', QsciLexerCMake): ('cmake'),
('CoffeeScript', QsciLexerCoffeeScript): ('coffee'),
('C++', QsciLexerCPP): ('cpp', 'hpp', 'c++', 'h++', 'cc', 'hh', 'cxx', 'hxx', 'C', 'H', 'cp', 'CPP'),
('C#', QsciLexerCSharp): ('cs'),
('CSS', QsciLexerCSS): ('css'),
('D', QsciLexerD): ('d', 'di'),
('Diff', QsciLexerDiff): ('diff', 'patch'),
('Fortran', QsciLexerFortran): ('f03', 'f90', 'F03', 'F90'),
('Fortran77', QsciLexerFortran77): ('f', 'for'),
('HTML', QsciLexerHTML): ('html', 'htm', 'xhtml', 'xslt'),
('IDL', QsciLexerIDL): ('pro'),
('Java', QsciLexerJava): ('java'),
('JavaScript', QsciLexerJavaScript): ('js', 'jsm'),
('Lua', QsciLexerLua): ('lua', 'wlua'),
('Makefile', QsciLexerMakefile): ('mak', 'mk'),
('Matlab', QsciLexerMatlab): ('m'),
('Octave', QsciLexerOctave): ('m'),
('Pascal', QsciLexerPascal): (),
('Perl', QsciLexerPerl): ('pl', 'pm', 't'),
('PO', QsciLexerPO): ('pas', 'inc'),
('PostScript', QsciLexerPostScript): ('ps', 'eps'),
('POV', QsciLexerPOV): ('pov', 'inc'),
('Properties', QsciLexerProperties): ('properties'),
('Python', QsciLexerPython): ('py', 'pyw', 'sc', 'tac', 'sage'),
('Ruby', QsciLexerRuby): ('rb', 'rbw', 'rake', 'gemspec', 'rbx', 'duby'),
('Spice', QsciLexerSpice): ('cir'),
('SQL', QsciLexerSQL): ('sql'),
('TCL', QsciLexerTCL): ('tcl', 'rvt'),
('TeX', QsciLexerTeX): ('tex', 'aux', 'toc'),
('Verilog', QsciLexerVerilog): ('verilog', 'v'),
('VHDL', QsciLexerVHDL): ('vhdl', 'vhd'),
('XML', QsciLexerXML): ('xml', 'xsl', 'rss', 'xslt', 'xsd', 'wsdl', 'wsf'),
('YAML', QsciLexerYAML): ('yaml', 'yml'),
}
def get_lexer_by_ext(file):
"""Function return lexer according file extension"""
file_name, file_ext = file.split('.')
for key, value in LEXERS.items():
if file_ext in value:
lexer = key[1]
return lexer
def set_lexer_by_menu(item):
"""Function return lexer according menu item"""
for key, value in LEXERS.items():
if item in key[0]:
lexer = key[1]
return lexer
def testing():
"""Function for testing"""
print(get_lexer_by_ext('file.py'))
print(set_lexer_by_menu('Python'))
if __name__ == '__main__':
testing()
|
StarcoderdataPython
|
336602
|
<filename>processing/pcap.py
#!/usr/bin/python
# Cuckoo Sandbox - Automated Malware Analysis
# Copyright (C) 2010-2011 Claudio "nex" Guarnieri (<EMAIL>)
# http://www.cuckoobox.org
#
# This file is part of Cuckoo.
#
# Cuckoo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Cuckoo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
import os
import re
import sys
import socket
try:
import dpkt
except ImportError:
sys.exit(1)
class Pcap:
def __init__(self, filepath):
self.filepath = filepath
self.tcp_connections = []
self.udp_connections = []
self.http_requests = []
self.dns_requests = []
self.dns_performed = []
self.results = {}
def check_http(self, tcpdata):
try:
dpkt.http.Request(tcpdata)
return True
except dpkt.dpkt.UnpackError:
return False
def add_http(self, tcpdata, dport):
http = dpkt.http.Request(tcpdata)
entry = {}
entry["host"] = http.headers['host']
entry["port"] = dport
entry["data"] = tcpdata
entry["uri"] = http.uri
self.http_requests.append(entry)
return True
def check_dns(self, udpdata):
try:
dpkt.dns.DNS(udpdata)
return True
except:
return False
def add_dns(self, udpdata):
dns = dpkt.dns.DNS(udpdata)
name = dns.qd[0].name
if name not in self.dns_performed:
if re.search("in-addr.arpa", name):
return False
entry = {}
entry["hostname"] = name
try:
ip = socket.gethostbyname(name)
except socket.gaierror:
ip = ""
entry["ip"] = ip
self.dns_requests.append(entry)
self.dns_performed.append(name)
return True
return False
def process(self):
if not os.path.exists(self.filepath):
return None
if os.path.getsize(self.filepath) == 0:
return None
file = open(self.filepath, "rb")
try:
pcap = dpkt.pcap.Reader(file)
except dpkt.dpkt.NeedData:
return None
try:
for ts, buf in pcap:
try:
eth = dpkt.ethernet.Ethernet(buf)
ip = eth.data
if ip.p == dpkt.ip.IP_PROTO_TCP:
tcp = ip.data
if len(tcp.data) > 0:
if self.check_http(tcp.data):
self.add_http(tcp.data, tcp.dport)
connection = {}
connection["src"] = socket.inet_ntoa(ip.src)
connection["dst"] = socket.inet_ntoa(ip.dst)
connection["sport"] = tcp.sport
connection["dport"] = tcp.dport
self.tcp_connections.append(connection)
else:
continue
elif ip.p == dpkt.ip.IP_PROTO_UDP:
udp = ip.data
if len(udp.data) > 0:
if udp.dport == 53:
if self.check_dns(udp.data):
self.add_dns(udp.data)
#elif ip.p == dpkt.ip.IP_PROTO_ICMP:
#icmp = ip.data
except AttributeError:
continue
except dpkt.dpkt.NeedData:
pass
file.close()
self.results["tcp"] = self.tcp_connections
self.results["http"] = self.http_requests
self.results["dns"] = self.dns_requests
return self.results
|
StarcoderdataPython
|
8176285
|
<reponame>SolbiatiAlessandro/xbot
"""core e2e translation logic"""
import ast
import logging
from typing import Iterator
from dataclasses import asdict
import xbot.constants
import xbot.utils
import xbot.templates
def parse_source_code(
filename: str,
from_library = xbot.constants.LIBRARIES.PYTHON_TELEGRAM_BOT,
to_library = xbot.constants.LIBRARIES.DISCORD_PY
) -> Iterator[xbot.templates.FunctionTemplateParams]:
"""
Translates source code written in `from_library` into a
temporary meta-language, using MetaDictionary and Templates.
"""
logging.info("parse_source_code..")
sourcecode = ""
with open(filename, "r") as f:
sourcecode = f.read()
Parser = xbot.utils.get_parser(from_library)
dictionary = xbot.utils.make_dictionary(from_library, to_library)
parser = Parser(sourcecode, dictionary)
translated_functions = parser.parse()
return translated_functions
def generate_destination_code(
bot_functions: Iterator[xbot.templates.FunctionTemplateParams],
output_file: str,
output_library: xbot.constants.LIBRARIES
):
"""
Translates the temporary meta-language in
output_library executable code.
"""
logging.info("generate_destination_code..")
template = xbot.utils.template_filename(
output_library,
xbot.constants.TEMPLATES.REPLY
)
translated_functions = map(
lambda params:
xbot.utils.render_jinja_template(template, asdict(params)),
bot_functions
)
translated_code = "\n\n".join(translated_functions)
logging.info("\n\n"+translated_code)
with open(output_file, "w") as f:
f.write(translated_code)
def translate(
sourcecode_filename,
input_library=xbot.constants.LIBRARIES.PYTHON_TELEGRAM_BOT,
output_file=None,
output_library=xbot.constants.LIBRARIES.DISCORD_PY
):
"""
entrypoint for the translation
"""
logging.info("translate {}..".format(sourcecode_filename))
params = parse_source_code(sourcecode_filename)
if output_file is None or output_file == "":
output_file = xbot.constants.DEFAULT_OUTPUT_FILENAME
generate_destination_code(
params,
output_file,
output_library
)
|
StarcoderdataPython
|
6451545
|
<reponame>JonathanFromm/HackerspaceTemplatePackage
from django.template import Library
register = Library()
@register.filter
def landingpage(text, language):
from django.template.loader import get_template
try:
return get_template('translations/landingpage/'+language+'.html').render({
'word': text
})
except:
try:
return get_template('translations/landingpage/english.html').render({
'word': text
})
except:
return text
@register.filter
def donate(text, language):
from django.template.loader import get_template
try:
return get_template('translations/donate/'+language+'.html').render({
'word': text
})
except:
try:
return get_template('translations/donate/english.html').render({
'word': text
})
except:
return text
@register.filter
def menu(text, language):
from django.template.loader import get_template
try:
return get_template('translations/menu/'+language+'.html').render({
'word': text
})
except:
try:
return get_template('translations/menu/english.html').render({
'word': text
})
except:
return text
@register.filter
def values(text, language):
from django.template.loader import get_template
try:
return get_template('translations/values/'+language+'.html').render({
'word': text
})
except:
try:
return get_template('translations/values/english.html').render({
'word': text
})
except:
return text
@register.filter
def events(text, language):
from django.template.loader import get_template
try:
return get_template('translations/events/'+language+'.html').render({
'word': text
})
except:
try:
return get_template('translations/events/english.html').render({
'word': text
})
except:
return text
@register.filter
def photos(text, language):
from django.template.loader import get_template
try:
return get_template('translations/photos/'+language+'.html').render({
'word': text
})
except:
try:
return get_template('translations/photos/english.html').render({
'word': text
})
except:
return text
|
StarcoderdataPython
|
8076181
|
<gh_stars>0
"""Tests for Wolk."""
# Copyright 2020 WolkAbout Technology s.r.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
import unittest
from typing import List
sys.path.append("..") # noqa
from wolk_gateway_module.connectivity.connectivity_service import (
ConnectivityService,
)
from wolk_gateway_module.wolk import Wolk
from wolk_gateway_module.model.actuator_state import ActuatorState
from wolk_gateway_module.model.device import Device
from wolk_gateway_module.model.device_template import DeviceTemplate
from wolk_gateway_module.model.message import Message
from wolk_gateway_module.model.device_status import DeviceStatus
from wolk_gateway_module.interface.firmware_handler import FirmwareHandler
from wolk_gateway_module.model.firmware_update_status import (
FirmwareUpdateStatus,
FirmwareUpdateState,
)
from wolk_gateway_module.model.sensor_template import SensorTemplate
from wolk_gateway_module.model.data_type import DataType
from wolk_gateway_module.json_data_protocol import JsonDataProtocol
from wolk_gateway_module.json_status_protocol import JsonStatusProtocol
from wolk_gateway_module.json_firmware_update_protocol import (
JsonFirmwareUpdateProtocol,
)
from wolk_gateway_module.json_registration_protocol import (
JsonRegistrationProtocol,
)
from wolk_gateway_module.outbound_message_deque import OutboundMessageDeque
class MockConnectivityService(ConnectivityService):
_connected = False
_topics: List[str] = []
def set_inbound_message_listener(self, on_inbound_message):
pass
def set_lastwill_message(self, message):
self._lastwill = message
def add_subscription_topics(self, topics):
self._topics.extend(topics)
def remove_topics_for_device(self, device_key):
pass
def connected(self):
return self._connected
def connect(self):
return self._connected
def reconnect(self):
return self._connected
def disconnect(self):
pass
def publish(self, message):
return self._connected
actuator_1 = False
actuator_2 = 0
actuator_3 = "default"
def mock_actuator_handler(device_key, reference, value):
if device_key == "key1":
if reference == "REF1":
global actuator_1
actuator_1 = value
elif reference == "REF2":
global actuator_2
actuator_2 = value
elif device_key == "key2":
if reference == "REF3":
global actuator_3
actuator_3 = value
def mock_actuator_status_provider(device_key, reference):
if device_key == "key1":
if reference == "REF1":
global actuator_1
return ActuatorState.READY, actuator_1
elif reference == "REF2":
global actuator_2
return ActuatorState.READY, actuator_2
elif device_key == "key2":
if reference == "REF3":
global actuator_3
return ActuatorState.READY, actuator_3
configuration_1 = False
configuration_2 = 3.14
configuration_3 = ("string1", "string2")
def mock_configuration_provider(device_key):
if device_key == "key1":
global configuration_1
global configuration_2
return {
"configuration_1": configuration_1,
"configuration_2": configuration_2,
}
elif device_key == "key2":
global configuration_3
return {"configuration_3": configuration_3}
def mock_configuration_handler(device_key, configuration):
if device_key == "key1":
global configuration_1
global configuration_2
for key, value in configuration.items():
if key == "configuration_1":
configuration_1 = value
elif key == "configuration_2":
configuration_2 = value
elif device_key == "key2":
global configuration_3
for key, value in configuration.items():
if key == "configuration_3":
configuration_3 = value
class MockFirmwareHandler(FirmwareHandler):
def install_firmware(
self, device_key: str, firmware_file_path: str
) -> None:
if device_key == "key1":
self.on_install_success(device_key)
def abort_installation(self, device_key: str) -> None:
if device_key == "key1":
status = FirmwareUpdateStatus(FirmwareUpdateState.ABORTED)
self.on_install_fail(device_key, status)
def get_firmware_version(self, device_key: str) -> str:
if device_key == "key1":
return "v1.0"
elif device_key == "key2":
return "v0.1"
class WolkTests(unittest.TestCase):
"""Wolk Tests."""
def test_bad_status_provider_callable(self):
"""Test that exception is raised for bad device status provider."""
with self.assertRaises(ValueError):
wolk = Wolk( # noqa
"host", 1883, "module_name", lambda a, b: a * b
)
def test_bad_status_provider_not_callable(self):
"""Test that exception is raised for bad device status provider."""
with self.assertRaises(ValueError):
wolk = Wolk("host", 1883, "module_name", False) # noqa
def test_add_sensor_reading(self):
"""Test adding a sensor reading to storage and then publish."""
wolk = Wolk(
"host",
1883,
"module_name",
lambda a: a,
connectivity_service=MockConnectivityService(),
)
wolk.add_sensor_reading("device_key", "REF", 13)
self.assertEqual(1, wolk.outbound_message_queue.queue_size())
wolk.connectivity_service._connected = True
wolk.publish()
self.assertEqual(0, wolk.outbound_message_queue.queue_size())
def test_add_alarm(self):
"""Test adding a alarm event to storage and then publish."""
wolk = Wolk(
"host",
1883,
"module_name",
lambda a: a,
connectivity_service=MockConnectivityService(),
)
wolk.add_alarm("device_key", "REF", False)
self.assertEqual(1, wolk.outbound_message_queue.queue_size())
wolk.connectivity_service._connected = True
wolk.publish()
self.assertEqual(0, wolk.outbound_message_queue.queue_size())
def test_publish_actuator(self):
"""Test publishing actuator status."""
wolk = Wolk(
"host",
1883,
"module_name",
lambda a: a,
connectivity_service=MockConnectivityService(),
actuation_handler=mock_actuator_handler,
actuator_status_provider=mock_actuator_status_provider,
)
wolk.log.setLevel(logging.CRITICAL)
wolk.publish_actuator_status("key1", "REF1")
self.assertEqual(1, wolk.outbound_message_queue.queue_size())
wolk.connectivity_service._connected = True
wolk.publish()
self.assertEqual(0, wolk.outbound_message_queue.queue_size())
def test_publish_actuator_explicit(self):
"""Test publishing explicit actuator status."""
wolk = Wolk(
"host",
1883,
"module_name",
lambda a: a,
connectivity_service=MockConnectivityService(),
actuation_handler=mock_actuator_handler,
actuator_status_provider=mock_actuator_status_provider,
)
wolk.log.setLevel(logging.CRITICAL)
wolk.publish_actuator_status("key1", "REF1", ActuatorState.READY, True)
self.assertEqual(1, wolk.outbound_message_queue.queue_size())
wolk.connectivity_service._connected = True
wolk.publish()
self.assertEqual(0, wolk.outbound_message_queue.queue_size())
def test_receive_actuation(self):
"""Test receiving actuator set command."""
wolk = Wolk(
"host",
1883,
"module_name",
lambda a: DeviceStatus.CONNECTED,
connectivity_service=MockConnectivityService(),
actuation_handler=mock_actuator_handler,
actuator_status_provider=mock_actuator_status_provider,
)
wolk.log.setLevel(logging.CRITICAL)
wolk.connectivity_service._connected = True
message = Message("p2d/actuator_set/d/key1/r/REF2", '{"value": "3"}')
wolk._on_inbound_message(message)
global actuator_2
self.assertEqual(3, actuator_2)
def test_publish_configuration(self):
"""Test publishing configuration."""
wolk = Wolk(
"host",
1883,
"module_name",
lambda a: a,
connectivity_service=MockConnectivityService(),
configuration_handler=mock_configuration_handler,
configuration_provider=mock_configuration_provider,
)
wolk.log.setLevel(logging.CRITICAL)
wolk.publish_configuration("key1")
self.assertEqual(1, wolk.outbound_message_queue.queue_size())
wolk.connectivity_service._connected = True
wolk.publish()
self.assertEqual(0, wolk.outbound_message_queue.queue_size())
def test_receive_configuration(self):
"""Test receiving configuration set command."""
wolk = Wolk(
"host",
1883,
"module_name",
lambda a: DeviceStatus.CONNECTED,
connectivity_service=MockConnectivityService(),
configuration_handler=mock_configuration_handler,
configuration_provider=mock_configuration_provider,
)
wolk.log.setLevel(logging.CRITICAL)
wolk.connectivity_service._connected = True
message = Message(
"p2d/configuration_set/d/key1",
'{"configuration_1": "true", "configuration_2": "4.2"}',
)
wolk._on_inbound_message(message)
self.assertEqual(True, configuration_1)
self.assertEqual(4.2, configuration_2)
message = Message(
"p2d/configuration_set/d/key2",
'{"configuration_3": "newstring1,newstring2"}',
)
wolk._on_inbound_message(message)
self.assertEqual(("newstring1", "newstring2"), configuration_3)
def test_publish_device_status(self):
"""Test sending device status."""
wolk = Wolk(
"host",
1883,
"module_name",
lambda device_key: DeviceStatus.CONNECTED,
connectivity_service=MockConnectivityService(),
)
wolk.log.setLevel(logging.CRITICAL)
wolk.publish_device_status("key1")
self.assertEqual(1, wolk.outbound_message_queue.queue_size())
wolk.connectivity_service._connected = True
wolk.publish()
self.assertEqual(0, wolk.outbound_message_queue.queue_size())
def test_publish_device_status_explicit(self):
"""Test sending explicit device status."""
wolk = Wolk(
"host",
1883,
"module_name",
lambda device_key: DeviceStatus.CONNECTED,
connectivity_service=MockConnectivityService(),
)
wolk.log.setLevel(logging.CRITICAL)
wolk.publish_device_status("key1", DeviceStatus.OFFLINE)
self.assertEqual(1, wolk.outbound_message_queue.queue_size())
wolk.connectivity_service._connected = True
wolk.publish()
self.assertEqual(0, wolk.outbound_message_queue.queue_size())
def test_on_status_request(self):
"""Test receiving device status request."""
wolk = Wolk(
"host",
1883,
"module_name",
lambda device_key: DeviceStatus.CONNECTED,
connectivity_service=MockConnectivityService(),
)
wolk.log.setLevel(logging.CRITICAL)
message = Message("p2d/subdevice_status_request/d/key1")
wolk._on_inbound_message(message)
self.assertEqual(1, wolk.outbound_message_queue.queue_size())
def test_firmware_update(self):
"""Test receiving firmware installation command."""
wolk = Wolk(
"host",
1883,
"module_name",
lambda device_key: DeviceStatus.CONNECTED,
connectivity_service=MockConnectivityService(),
firmware_handler=MockFirmwareHandler(),
)
wolk.log.setLevel(logging.CRITICAL)
message = Message(
"p2d/firmware_update_install/d/key1", '{ "fileName": "some_path"}'
)
wolk._on_inbound_message(message)
self.assertEqual(3, wolk.outbound_message_queue.queue_size())
def test_firmware_abort(self):
"""Test receiving firmware abort command."""
wolk = Wolk(
"host",
1883,
"module_name",
lambda device_key: DeviceStatus.CONNECTED,
connectivity_service=MockConnectivityService(),
firmware_handler=MockFirmwareHandler(),
)
wolk.log.setLevel(logging.CRITICAL)
message = Message("p2d/firmware_update_abort/d/key1")
wolk._on_inbound_message(message)
self.assertEqual(1, wolk.outbound_message_queue.queue_size())
def test_add_simple_device(self):
"""Test adding a simple device."""
wolk = Wolk(
"host",
1883,
"module_name",
lambda device_key: DeviceStatus.CONNECTED,
connectivity_service=MockConnectivityService(),
)
wolk.log.setLevel(logging.CRITICAL)
sensor1 = SensorTemplate("sensor1", "s1", DataType.NUMERIC)
device_template = DeviceTemplate(sensors=[sensor1])
device = Device("device1", "key1", device_template)
wolk.add_device(device)
self.assertEqual(1, wolk.outbound_message_queue.queue_size())
def test_bad_actuation_handler_not_callable(self):
"""Test passing something that isn't callable raises ValueError."""
with self.assertRaises(ValueError):
wolk = Wolk( # noqa
"host",
1883,
"module_name",
lambda device_key: DeviceStatus.CONNECTED,
actuation_handler=False,
)
def test_bad_actuation_handler_callable(self):
"""Test passing something that isn't callable raises ValueError."""
with self.assertRaises(ValueError):
wolk = Wolk( # noqa
"host",
1883,
"module_name",
lambda device_key: DeviceStatus.CONNECTED,
actuation_handler=lambda a: a,
)
def test_bad_actuation_provider_not_callable(self):
"""Test passing something that isn't callable raises ValueError."""
with self.assertRaises(ValueError):
wolk = Wolk( # noqa
"host",
1883,
"module_name",
lambda device_key: DeviceStatus.CONNECTED,
actuator_status_provider=False,
)
def test_bad_actuation_provider_callable(self):
"""Test passing something that isn't callable raises ValueError."""
with self.assertRaises(ValueError):
wolk = Wolk( # noqa
"host",
1883,
"module_name",
lambda device_key: DeviceStatus.CONNECTED,
actuator_status_provider=lambda a: a,
)
def test_missing_actuator_status_provider(self):
"""Test passing an actuator handler but no provider raises an exception."""
with self.assertRaises(RuntimeError):
wolk = Wolk( # noqa
"host",
1883,
"module_name",
lambda a: a,
connectivity_service=MockConnectivityService(),
actuation_handler=mock_actuator_handler,
)
def test_bad_configuration_handler_not_callable(self):
"""Test passing something that isn't callable raises ValueError."""
with self.assertRaises(ValueError):
wolk = Wolk( # noqa
"host",
1883,
"module_name",
lambda device_key: DeviceStatus.CONNECTED,
configuration_handler=False,
)
def test_bad_configuration_handler_callable(self):
"""Test passing something that isn't callable raises ValueError."""
with self.assertRaises(ValueError):
wolk = Wolk( # noqa
"host",
1883,
"module_name",
lambda device_key: DeviceStatus.CONNECTED,
configuration_handler=lambda a: a,
)
def test_bad_configuration_provider_not_callable(self):
"""Test passing something that isn't callable raises ValueError."""
with self.assertRaises(ValueError):
wolk = Wolk( # noqa
"host",
1883,
"module_name",
lambda device_key: DeviceStatus.CONNECTED,
configuration_provider=False,
)
def test_bad_configuration_provider_callable(self):
"""Test passing something that isn't callable raises ValueError."""
with self.assertRaises(ValueError):
wolk = Wolk( # noqa
"host",
1883,
"module_name",
lambda device_key: DeviceStatus.CONNECTED,
configuration_provider=lambda a, b: a,
)
def test_missing_configuration_provider(self):
"""Test passing an config handler but no provider raises an exception."""
with self.assertRaises(RuntimeError):
wolk = Wolk( # noqa
"host",
1883,
"module_name",
lambda a: a,
connectivity_service=MockConnectivityService(),
configuration_handler=mock_configuration_handler,
)
def test_bad_firmware_handler(self):
"""Test passing a bad firmware handler raises an exception."""
with self.assertRaises(ValueError):
wolk = Wolk( # noqa
"host",
1883,
"module_name",
lambda a: a,
firmware_handler=False,
)
def test_bad_data_protocol(self):
"""Test passing a bad data protocol raises an exception."""
with self.assertRaises(ValueError):
wolk = Wolk( # noqa
"host",
1883,
"module_name",
lambda a: a,
data_protocol=False,
)
def test_good_data_protocol(self):
"""Test passing a good data protocol doesn't raise an exception."""
data_protocol = JsonDataProtocol()
wolk = Wolk(
"host",
1883,
"module_name",
lambda a: a,
data_protocol=data_protocol,
)
self.assertEqual(data_protocol, wolk.data_protocol)
def test_bad_firmware_update_protocol(self):
"""Test passing a bad firmware update protocol raises an exception."""
with self.assertRaises(ValueError):
wolk = Wolk( # noqa
"host",
1883,
"module_name",
lambda a: a,
firmware_update_protocol=False,
)
def test_good_firmware_update_protocol(self):
"""Test passing a good firmware update protocol doesn't raise an exception."""
firmware_update_protocol = JsonFirmwareUpdateProtocol()
wolk = Wolk(
"host",
1883,
"module_name",
lambda a: a,
firmware_update_protocol=firmware_update_protocol,
)
self.assertEqual(
firmware_update_protocol, wolk.firmware_update_protocol
)
def test_bad_status_protocol(self):
"""Test passing a bad status protocol raises an exception."""
with self.assertRaises(ValueError):
wolk = Wolk( # noqa
"host",
1883,
"module_name",
lambda a: a,
status_protocol=False,
)
def test_good_status_protocol(self):
"""Test passing a good status protocol doesn't raise an exception."""
status_protocol = JsonStatusProtocol()
wolk = Wolk(
"host",
1883,
"module_name",
lambda a: a,
status_protocol=status_protocol,
)
self.assertEqual(status_protocol, wolk.status_protocol)
def test_bad_registration_protocol(self):
"""Test passing a bad registration protocol raises an exception."""
with self.assertRaises(ValueError):
wolk = Wolk( # noqa
"host",
1883,
"module_name",
lambda a: a,
registration_protocol=False,
)
def test_good_registration_protocol(self):
"""Test passing a good registration protocol doesn't raise an exception."""
registration_protocol = JsonRegistrationProtocol()
wolk = Wolk(
"host",
1883,
"module_name",
lambda a: a,
registration_protocol=registration_protocol,
)
self.assertEqual(registration_protocol, wolk.registration_protocol)
def test_bad_outbound_message_queue(self):
"""Test passing a bad outbound message queue raises an exception."""
with self.assertRaises(ValueError):
wolk = Wolk( # noqa
"host",
1883,
"module_name",
lambda a: a,
outbound_message_queue=False,
)
def test_good_outbound_message_queue(self):
"""Test passing a good outbound message queue doesn't raise an exception."""
outbound_message_queue = OutboundMessageDeque()
wolk = Wolk(
"host",
1883,
"module_name",
lambda a: a,
outbound_message_queue=outbound_message_queue,
)
self.assertEqual(outbound_message_queue, wolk.outbound_message_queue)
def test_bad_connectivity_service(self):
"""Test passing a bad connectivity service raises an exception."""
with self.assertRaises(ValueError):
wolk = Wolk( # noqa
"host",
1883,
"module_name",
lambda a: a,
connectivity_service=False,
)
def test_good_connectivity_service(self):
"""Test passing a good connectivity service doesn't raise an exception."""
connectivity_service = MockConnectivityService()
wolk = Wolk(
"host",
1883,
"module_name",
lambda a: a,
connectivity_service=connectivity_service,
)
self.assertEqual(connectivity_service, wolk.connectivity_service)
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
3371284
|
<reponame>VadymV/clinica
def eddy_fsl_pipeline(low_bval, use_cuda, initrand, name="eddy_fsl"):
"""Use FSL eddy for head motion correction and eddy current distortion correction."""
import nipype.interfaces.utility as niu
import nipype.pipeline.engine as pe
from nipype.interfaces.fsl.epi import Eddy
from clinica.utils.dwi import generate_acq_file, generate_index_file
inputnode = pe.Node(
niu.IdentityInterface(
fields=[
"in_file",
"in_bvec",
"in_bval",
"in_mask",
"ref_b0",
"total_readout_time",
"phase_encoding_direction",
]
),
name="inputnode",
)
generate_acq = pe.Node(
niu.Function(
input_names=[
"in_dwi",
"fsl_phase_encoding_direction",
"total_readout_time",
],
output_names=["out_file"],
function=generate_acq_file,
),
name="generate_acq",
)
generate_index = pe.Node(
niu.Function(
input_names=["in_bval", "low_bval"],
output_names=["out_file"],
function=generate_index_file,
),
name="generate_index",
)
generate_index.inputs.low_bval = low_bval
eddy = pe.Node(interface=Eddy(), name="eddy_fsl")
eddy.inputs.repol = True
eddy.inputs.use_cuda = use_cuda
eddy.inputs.initrand = initrand
outputnode = pe.Node(
niu.IdentityInterface(
fields=["out_parameter", "out_corrected", "out_rotated_bvecs"]
),
name="outputnode",
)
wf = pe.Workflow(name=name)
# fmt: off
wf.connect(
[
(inputnode, generate_acq, [('in_file', 'in_dwi')]),
(inputnode, generate_acq, [('total_readout_time', 'total_readout_time')]),
(inputnode, generate_acq, [('phase_encoding_direction', 'fsl_phase_encoding_direction')]),
(inputnode, generate_index, [('in_bval', 'in_bval')]),
(inputnode, eddy, [('in_bvec', 'in_bvec')]),
(inputnode, eddy, [('in_bval', 'in_bval')]),
(inputnode, eddy, [('in_file', 'in_file')]),
(inputnode, eddy, [('in_mask', 'in_mask')]),
(generate_acq, eddy, [('out_file', 'in_acqp')]),
(generate_index, eddy, [('out_file', 'in_index')]),
(eddy, outputnode, [('out_parameter', 'out_parameter')]),
(eddy, outputnode, [('out_corrected', 'out_corrected')]),
(eddy, outputnode, [('out_rotated_bvecs', 'out_rotated_bvecs')])
]
)
# fmt: on
return wf
def epi_pipeline(name="susceptibility_distortion_correction_using_t1"):
"""Perform EPI correction.
This workflow allows to correct for echo-planar induced susceptibility artifacts without fieldmap
(e.g. ADNI Database) by elastically register DWIs to their respective baseline T1-weighted
structural scans using an inverse consistent registration algorithm with a mutual information cost
function (SyN algorithm). This workflow allows also a coregistration of DWIs with their respective
baseline T1-weighted structural scans in order to latter combine tracks and cortex parcellation.
Warnings:
This workflow rotates the b-vectors.
Notes:
Nir et al. (2015): Connectivity network measures predict volumetric atrophy in mild cognitive impairment
Leow et al. (2007): Statistical Properties of Jacobian Maps and the Realization of
Unbiased Large Deformation Nonlinear Image Registration
"""
import nipype.interfaces.ants as ants
import nipype.interfaces.c3 as c3
import nipype.interfaces.fsl as fsl
import nipype.interfaces.utility as niu
import nipype.pipeline.engine as pe
from .dwi_preprocessing_using_t1_utils import (
ants_combine_transform,
change_itk_transform_type,
create_jacobian_determinant_image,
expend_matrix_list,
rotate_bvecs,
)
inputnode = pe.Node(
niu.IdentityInterface(fields=["T1", "DWI", "bvec"]), name="inputnode"
)
split = pe.Node(fsl.Split(dimension="t"), name="SplitDWIs")
pick_ref = pe.Node(niu.Select(), name="Pick_b0")
pick_ref.inputs.index = [0]
flirt_b0_2_t1 = pe.Node(interface=fsl.FLIRT(dof=6), name="flirt_B0_2_T1")
flirt_b0_2_t1.inputs.interp = "spline"
flirt_b0_2_t1.inputs.cost = "normmi"
flirt_b0_2_t1.inputs.cost_func = "normmi"
apply_xfm = pe.Node(interface=fsl.preprocess.ApplyXFM(), name="apply_xfm")
apply_xfm.inputs.apply_xfm = True
expend_matrix = pe.Node(
interface=niu.Function(
input_names=["in_matrix", "in_bvec"],
output_names=["out_matrix_list"],
function=expend_matrix_list,
),
name="expend_matrix",
)
rot_bvec = pe.Node(
niu.Function(
input_names=["in_matrix", "in_bvec"],
output_names=["out_file"],
function=rotate_bvecs,
),
name="Rotate_Bvec",
)
ants_registration = pe.Node(
interface=ants.registration.RegistrationSynQuick(
transform_type="br", dimension=3
),
name="antsRegistrationSyNQuick",
)
c3d_flirt2ants = pe.Node(c3.C3dAffineTool(), name="fsl_reg_2_itk")
c3d_flirt2ants.inputs.itk_transform = True
c3d_flirt2ants.inputs.fsl2ras = True
change_transform = pe.Node(
niu.Function(
input_names=["input_affine_file"],
output_names=["updated_affine_file"],
function=change_itk_transform_type,
),
name="change_transform_type",
)
merge_transform = pe.Node(niu.Merge(3), name="MergeTransforms")
apply_transform = pe.MapNode(
interface=niu.Function(
input_names=["fix_image", "moving_image", "ants_warp_affine"],
output_names=["out_warp_field", "out_warped"],
function=ants_combine_transform,
),
iterfield=["moving_image"],
name="warp_filed",
)
jacobian = pe.MapNode(
interface=niu.Function(
input_names=["imageDimension", "deformationField", "outputImage"],
output_names=["outputImage"],
function=create_jacobian_determinant_image,
),
iterfield=["deformationField"],
name="jacobian",
)
jacobian.inputs.imageDimension = 3
jacobian.inputs.outputImage = "Jacobian_image.nii.gz"
jacmult = pe.MapNode(
fsl.MultiImageMaths(op_string="-mul %s"),
iterfield=["in_file", "operand_files"],
name="ModulateDWIs",
)
thres = pe.MapNode(
fsl.Threshold(thresh=0.0), iterfield=["in_file"], name="RemoveNegative"
)
merge = pe.Node(fsl.Merge(dimension="t"), name="MergeDWIs")
outputnode = pe.Node(
niu.IdentityInterface(
fields=[
"DWI_2_T1_Coregistration_matrix",
"epi_correction_deformation_field",
"epi_correction_affine_transform",
"epi_correction_image_warped",
"DWIs_epicorrected",
"warp_epi",
"out_bvec",
]
),
name="outputnode",
)
wf = pe.Workflow(name="epi_pipeline")
# fmt: off
wf.connect(
[
(inputnode, split, [("DWI", "in_file")]),
(split, pick_ref, [("out_files", "inlist")]),
(pick_ref, flirt_b0_2_t1, [("out", "in_file")]),
(inputnode, flirt_b0_2_t1, [("T1", "reference")]),
(inputnode, rot_bvec, [("bvec", "in_bvec")]),
(flirt_b0_2_t1, expend_matrix, [("out_matrix_file", "in_matrix")]),
(inputnode, expend_matrix, [("bvec", "in_bvec")]),
(expend_matrix, rot_bvec, [("out_matrix_list", "in_matrix")]),
(inputnode, ants_registration, [("T1", "fixed_image")]),
(flirt_b0_2_t1, ants_registration, [("out_file", "moving_image")]),
(inputnode, c3d_flirt2ants, [("T1", "reference_file")]),
(pick_ref, c3d_flirt2ants, [("out", "source_file")]),
(flirt_b0_2_t1, c3d_flirt2ants, [("out_matrix_file", "transform_file")]),
(c3d_flirt2ants, change_transform, [("itk_transform", "input_affine_file")]),
(ants_registration, merge_transform, [("forward_warp_field", "in1")]),
(ants_registration, merge_transform, [("out_matrix", "in2")]),
(change_transform, merge_transform, [("updated_affine_file", "in3")]),
(inputnode, apply_transform, [("T1", "fix_image")]),
(split, apply_transform, [("out_files", "moving_image")]),
(merge_transform, apply_transform, [("out", "ants_warp_affine")]),
(apply_transform, jacobian, [("out_warp_field", "deformationField")]),
(apply_transform, jacmult, [("out_warped", "operand_files")]),
(jacobian, jacmult, [("outputImage", "in_file")]),
(jacmult, thres, [("out_file", "in_file")]),
(thres, merge, [("out_file", "in_files")]),
(merge, outputnode, [("merged_file", "DWIs_epicorrected")]),
(flirt_b0_2_t1, outputnode, [("out_matrix_file", "DWI_2_T1_Coregistration_matrix")]),
(ants_registration, outputnode, [("forward_warp_field", "epi_correction_deformation_field"),
("out_matrix", "epi_correction_affine_transform"),
("warped_image", "epi_correction_image_warped")]),
(merge_transform, outputnode, [("out", "warp_epi")]),
(rot_bvec, outputnode, [("out_file", "out_bvec")]),
]
)
# fmt: on
return wf
def b0_flirt_pipeline(num_b0s, name="b0_coregistration"):
"""Rigid registration of the B0 dataset onto the first volume.
Rigid registration is achieved using FLIRT and the normalized correlation.
Args:
num_b0s (int): Number of the B0 volumes in the dataset.
name (str): Name of the workflow.
Inputnode:
in_file(str): B0 dataset.
Outputnode
out_b0_reg(str): The set of B0 volumes registered to the first volume.
"""
import nipype.interfaces.utility as niu
import nipype.pipeline.engine as pe
from nipype.interfaces import fsl
from clinica.utils.dwi import merge_volumes_tdim
inputnode = pe.Node(niu.IdentityInterface(fields=["in_file"]), name="inputnode")
fslroi_ref = pe.Node(fsl.ExtractROI(args="0 1"), name="b0_reference")
tsize = num_b0s - 1
fslroi_moving = pe.Node(fsl.ExtractROI(args="1 " + str(tsize)), name="b0_moving")
split_moving = pe.Node(fsl.Split(dimension="t"), name="split_b0_moving")
bet_ref = pe.Node(fsl.BET(frac=0.3, mask=True, robust=True), name="bet_ref")
dilate = pe.Node(
fsl.maths.MathsCommand(nan2zeros=True, args="-kernel sphere 5 -dilM"),
name="mask_dilate",
)
flirt = pe.MapNode(
fsl.FLIRT(
interp="spline",
dof=6,
bins=50,
save_log=True,
cost="corratio",
cost_func="corratio",
padding_size=10,
searchr_x=[-4, 4],
searchr_y=[-4, 4],
searchr_z=[-4, 4],
fine_search=1,
coarse_search=10,
),
name="b0_co_registration",
iterfield=["in_file"],
)
merge = pe.Node(fsl.Merge(dimension="t"), name="merge_registered_b0s")
thres = pe.MapNode(
fsl.Threshold(thresh=0.0), iterfield=["in_file"], name="remove_negative"
)
insert_ref = pe.Node(
niu.Function(
input_names=["in_file1", "in_file2"],
output_names=["out_file"],
function=merge_volumes_tdim,
),
name="concat_ref_moving",
)
outputnode = pe.Node(
niu.IdentityInterface(fields=["out_file", "out_xfms"]), name="outputnode"
)
wf = pe.Workflow(name=name)
# fmt: off
wf.connect(
[
(inputnode, fslroi_ref, [("in_file", "in_file")]),
(inputnode, fslroi_moving, [("in_file", "in_file")]),
(fslroi_moving, split_moving, [("roi_file", "in_file")]),
(fslroi_ref, bet_ref, [("roi_file", "in_file")]),
(bet_ref, dilate, [("mask_file", "in_file")]),
(dilate, flirt, [("out_file", "ref_weight"),
("out_file", "in_weight")]),
(fslroi_ref, flirt, [("roi_file", "reference")]),
(split_moving, flirt, [("out_files", "in_file")]),
(flirt, thres, [("out_file", "in_file")]),
(thres, merge, [("out_file", "in_files")]),
(merge, insert_ref, [("merged_file", "in_file2")]),
(fslroi_ref, insert_ref, [("roi_file", "in_file1")]),
(insert_ref, outputnode, [("out_file", "out_file")]),
(flirt, outputnode, [("out_matrix_file", "out_xfms")])
]
)
# fmt: off
return wf
|
StarcoderdataPython
|
3437551
|
<filename>walle/views.py
# -*- coding: utf-8 -*-
from app import app
from flask import render_template
@app.route('/')
def index():
websocket_url = 'ws://172.16.58.3:5001'
return render_template('index.html', websocket_url=websocket_url)
|
StarcoderdataPython
|
5072535
|
from config_utils import user_enter, function_maker, is_type, get_command_type, get_command_definition, on_not_valid_type, type_to_input_functions
import constants
import data_types.did_do as bool_lib
def build_if_yes_function(definition):
"""
Builds a function given a conditional bool response to a question to the user gets the data for a given data type from the user.
Raises an error if the definition is not valid.
str -> (() -> (label: str, value: float)) or error
"""
return function_maker(user_enter_if_yes,
definition, is_valid_if_yes_definition,
"Not a valid if_yes definition: " + definition)
def is_valid_if_yes_definition(definition):
"""
Returns true if the if_yes definition is valid.
str -> bool
"""
if definition.count(constants.QUESTION_DENOTE) < 2 or definition[0] != constants.QUESTION_DENOTE:
return False
# First char is white space so it needs to be removed.
if_true = get_data_logged_on_true(definition)
data_type = get_command_type(if_true)
if not is_type(data_type):
on_not_valid_type(data_type)
data_type_definition = get_command_definition(if_true)
# The validation logic is called upon this function call.
type_to_input_functions(data_type, data_type_definition)
return True
def user_enter_if_yes(definition):
"""
Asks the user a question the text of which is defineind in the definition.
If true, prompts the user the enter a value of a data type.
Returns it along with label for the value.
Signature in config: if "boolean question" data_type data_type_definition
Note: Quotes are included
str -> (label: str, value: any)
"""
update_question = get_question_text(definition)
update = user_enter(get_question_text,
bool_lib.user_enter_boolean_response,
bool_lib.get_bool_from_response,
bool_lib.is_valid_boolean_response,
update_question,
bool_lib.VALID_BOOL_RESPONSE,
definition)
# If the user enters "yes"
if not update[1]:
return (update[0], constants.WHEN_NOTHING_TO_lOG)
if_true = get_data_logged_on_true(definition)
data_type = get_command_type(if_true)
data_type_definition = get_command_definition(if_true)
# The validation logic is called upon this function call.
return type_to_input_functions(data_type, data_type_definition)()
def get_question_text(definition):
"""
Returns the text for the question that is being asked to the user.
str -> str
"""
return definition.split(constants.QUESTION_DENOTE, 1)[1].rsplit(constants.QUESTION_DENOTE, 1)[0]
def get_data_logged_on_true(definition):
"""
Returns the description of the data that is logged if the user's answer is yes.
Basically everything based the question.
str -> str
"""
# First char is white space so it needs to be removed.
return definition.rsplit(constants.QUESTION_DENOTE, 1)[1][1:]
|
StarcoderdataPython
|
3254747
|
from htun.args import args
from htun.tools import stop_running, create_iptables_rules, \
delete_ip_tables_rules
from htun.http_server import run_server
from htun.tun_iface import TunnelServer
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
if args.uri:
is_server = False
proto = args.uri["proto"]
if proto == "http":
from htun.http_client import server_socket
reconnect = None
elif proto == "tcp":
from htun.tcp_client import server_socket, create_socket
reconnect = create_socket
else:
logging.error("Unknown URI protocol: %s (must be one of tcp or http)" %
args.server)
exit(1)
else:
is_server = True
if args.server == "http":
from htun.http_server import server_socket
run_server()
reconnect = None
elif args.server == "tcp":
from htun.tcp_server import server_socket, create_socket
reconnect = create_socket
else:
logging.error("Unknown URI protocol: %s (must be one of tcp or http)" %
args.server)
exit(1)
def main():
if is_server:
server = TunnelServer(server_socket, args.saddr, args.caddr, reconnect)
create_iptables_rules()
else:
server = TunnelServer(server_socket, args.caddr, args.saddr,
reconnect)
# drop privs?
try:
server.run()
except KeyboardInterrupt:
logging.info("CTRL-c caught, exiting...")
delete_ip_tables_rules()
stop_running()
|
StarcoderdataPython
|
11395139
|
'''
@author: <NAME>
'''
from utils.dictionare import *
from domeniu.entitati import Numar
from erori.exceptii import RepoError
class Repo(object):
def __init__(self):
'''
Functie care creeaza un obiect de tip Repo
Input: -
Output: -
'''
pass
def add(self, stNumber, ndNumber, baza):
'''
Functie care aduna doua numere intr-o anumita baza
Input: stNumber - un obiect de tip Numar
ndNumber - un obiect de tip Numar
baza - un numar intreg, pozitiv
Output: un obiect de tip Numar
'''
self.__stNumber = stNumber
self.__ndNumber = ndNumber
self.__baza = baza
rezultatNumber_value = ''
carryDigit = 0
while len(self.__stNumber.get_valoare()) > 0 or len(self.__ndNumber.get_valoare()) > 0:
# Convertim cele mai din dreapta cifre ale celor doua numere la integer.
# Totodata, elimanam cea mai dreapta cifra a fiecarui numar
if len(self.__stNumber.get_valoare()) == 0:
digit_stNumber = 0
else: # Numarul nu este inca zero
digit_stNumber = character_to_decimal[self.__stNumber.get_valoare()[-1]]
self.__stNumber.set_valoare(self.__stNumber.get_valoare()[:-1])
if len(self.__ndNumber.get_valoare()) == 0:
digit_ndNumber = 0
else: # Numarul nu este inca zero
digit_ndNumber = character_to_decimal[self.__ndNumber.get_valoare()[-1]]
self.__ndNumber.set_valoare(self.__ndNumber.get_valoare()[:-1])
rezultat_adunare = digit_stNumber + digit_ndNumber + carryDigit
digit_rezultat = rezultat_adunare % self.__baza
carryDigit = rezultat_adunare // self.__baza
character_rezultat = decimal_to_character[digit_rezultat] # Transformam cifra in caracter
rezultatNumber_value = character_rezultat + rezultatNumber_value
# Verificam daca a mai ramas o cifra de transport
if carryDigit != 0:
character_rezultat = decimal_to_character[carryDigit] # Transformam cifra in caracter
rezultatNumber_value = character_rezultat + rezultatNumber_value
return Numar(rezultatNumber_value, self.__baza)
def multiply(self, stNumber, ndNumber, baza):
'''
Functie care inmulteste doua numere intr-o anumita baza
Input: stNumber - un obiect de tip Numar
ndNumber - un obiect de tip Numar
baza - un numar intreg, pozitiv
Output: un obiect de tip Numar
'''
self.__baza = baza
if (stNumber.get_valoare() == '0' or ndNumber.get_valoare() == '0'):
return Numar('0', self.__baza)
intermediaryResult_value = ''
finalResult_value = ''
carryDigit = 0
numberOfDigits_stNumber = len(stNumber.get_valoare())
numberOfDigits_ndNumber = len(ndNumber.get_valoare())
firstNumber = stNumber.get_valoare()
secondNumber = ndNumber.get_valoare()
for index_SecondNumber in range (numberOfDigits_ndNumber - 1, -1, -1):
for index_FirstNumber in range(numberOfDigits_stNumber - 1, -1, -1):
# Convert rightmost digits to decimal
digit_stNumber = character_to_decimal[firstNumber[index_FirstNumber]]
digit_ndNumber = character_to_decimal[secondNumber[index_SecondNumber]]
result_Multplication = digit_stNumber * digit_ndNumber + carryDigit
digit_result = result_Multplication % self.__baza
carryDigit = result_Multplication // self.__baza
character_result = decimal_to_character[digit_result] # Digit to character
intermediaryResult_value = character_result + intermediaryResult_value
# Check for carry digit
if carryDigit != 0:
character_result = decimal_to_character[carryDigit] # Digit to character
intermediaryResult_value = character_result + intermediaryResult_value
carryDigit = 0
if numberOfDigits_ndNumber >= 2: # Multidigit Multiplication, then add an extra zero to the end of the final result
intermediaryResult_value = intermediaryResult_value + (numberOfDigits_ndNumber - index_SecondNumber - 1) * '0'
# Perform a simple addition between the final result and the intermediary result
dummy_FirstNumber = Numar(finalResult_value, self.__baza)
dummy_SecondNumber = Numar(intermediaryResult_value, self.__baza)
dummy_ResultNumber = self.add(dummy_FirstNumber, dummy_SecondNumber, self.__baza)
finalResult_value = dummy_ResultNumber.get_valoare() # Update the final result
# Update the intermediary result
intermediaryResult_value = ''
return Numar(finalResult_value, self.__baza)
def substract(self, stNumber, ndNumber, baza):
'''
Functie care scade doua numere intr-o anumita baza
Input: stNumber - un obiect de tip Numar
ndNumber - un obiect de tip Numar
baza - un numar intreg, pozitiv
Output: un obiect de tip Numar
'''
self.__stNumber = stNumber
self.__ndNumber = ndNumber
self.__baza = baza
rezultatNumber_value = ''
borrowDigit = 0
while len(self.__stNumber.get_valoare()) > 0 or len(self.__ndNumber.get_valoare()) > 0:
# Convertim cele mai din dreapta cifre ale celor doua numere la integer.
# Totodata, elimanam cea mai dreapta cifra a fiecarui numar
if len(self.__stNumber.get_valoare()) == 0:
digit_stNumber = 0
else: # Numarul nu este inca zero
digit_stNumber = character_to_decimal[self.__stNumber.get_valoare()[-1]]
self.__stNumber.set_valoare(self.__stNumber.get_valoare()[:-1])
if len(self.__ndNumber.get_valoare()) == 0:
digit_ndNumber = 0
else: # Numarul nu este inca zero
digit_ndNumber = character_to_decimal[self.__ndNumber.get_valoare()[-1]]
self.__ndNumber.set_valoare(self.__ndNumber.get_valoare()[:-1])
rezultat_scadere = digit_stNumber - digit_ndNumber + borrowDigit
if rezultat_scadere < 0:
rezultat_scadere = self.__baza + rezultat_scadere
borrowDigit = -1
else:
borrowDigit = 0
character_rezultat = decimal_to_character[rezultat_scadere] # Transformam cifra in caracter
rezultatNumber_value = character_rezultat + rezultatNumber_value
# Elimin zerourile din fata numarului
while len(rezultatNumber_value) > 1 and rezultatNumber_value[0] == '0':
rezultatNumber_value = rezultatNumber_value[1:]
return Numar(rezultatNumber_value, self.__baza)
def division_decimalBase(self, stNumber, ndNumber):
'''
Function that divides two numbers in base 10
Input: stNumber, ndNumber - entities of type Numar, with decimal base
Output: two objects of type Numar, one representing the quotient, while the other representing the remainder, both in base 10
'''
self.__stNumber = stNumber
self.__ndNumber = ndNumber
quotient = int(self.__stNumber.get_valoare()) // int(self.__ndNumber.get_valoare())
remainder = int(self.__stNumber.get_valoare()) % int(self.__ndNumber.get_valoare())
return Numar(str(quotient), 10), Numar(str(remainder), 10)
def divide(self, stNumber, ndNumber, base):
'''
Functie care inmulteste doua numere intr-o anumita baza
Input: stNumber - un obiect de tip Numar
ndNumber - un obiect de tip Numar
base - un numar intreg, pozitiv
Output: doua obiecte de tip Numar, primul reprezentand catul impartirii, iar al doilea restul
'''
self.__stNumber = stNumber
self.__ndNumber = ndNumber
# Convert both numbers to base 10
self.__stNumber = self.convert_to_another_base(self.__stNumber, 10)
self.__ndNumber = self.convert_to_another_base(self.__ndNumber, 10)
# Now that they're both in decimal base, perform simple division between them
quotient, remainder = self.division_decimalBase(self.__stNumber, self.__ndNumber)
# Now, convert the quotient and remainder to the given base
quotient = self.convert_to_another_base(quotient, base)
remainder = self.convert_to_another_base(remainder, base)
return quotient, remainder
def convert_subtitutie(self, number):
'''
Functie care converteste un numar dintr-o baza diferita de 10 in baza 10. (Metoda subtitutiei)
Input: number - un obiect de tip Numar, pentru care baza este diferita de 10
Output: un obiect de tip Numar, pentru care baza este egala cu 10
Raises: Exception
daca numarul transmis ca parametru are baza 10 -> "Baza nevalida!\n"
'''
self.__number = number
if self.__number.get_baza() == 10:
raise RepoError("Baza nevalida!\n")
rezultat_integer = 0
factor = 1
for index in range(len(self.__number.get_valoare()) -1, -1, -1):
digit = character_to_decimal[self.__number.get_valoare()[index]]
rezultat_integer = rezultat_integer + factor * digit
factor = factor * self.__number.get_baza()
return Numar(str(rezultat_integer), 10)
def convert_impartiri_succesive(self, number, baza):
'''
Functie care converteste un numar din baza 10 intr-o alta baza. (Metoda impartirii succesive)
Input: number - un obiect de tip Numar, pentru care baza este egala cu 10
baza - un numar intreg, pozitiv, ce apartine multimii {2, 3, ..., 10, 16}
Output: un obiect de tip Numar, pentru care baza este diferita de 10
Raises: Exception
daca numarul transmis ca parametru are baza diferita de 10 -> "Baza nevalida!\n"
'''
self.__number = number
if self.__number.get_baza() != 10:
raise RepoError("Baza nevalida!\n")
rezultatNumber_value = ''
if self.__number.get_valoare() == '0':
return Numar('0', baza)
while self.__number.get_valoare() != '0':
number_value_integer = int(self.__number.get_valoare())
digit = number_value_integer % baza
digit = decimal_to_character[digit]
rezultatNumber_value = digit + rezultatNumber_value
# Actualizam numarul
number_value_integer = number_value_integer // baza
self.__number.set_valoare(str(number_value_integer))
return Numar(rezultatNumber_value, baza)
def convert_to_another_base(self, number, noua_baza):
'''
Functie care converteste un numar dintr-o baza in alta baza
Input: number - un obiect de tip Numar
noua_baza - un numar intreg, pozitiv
Output: un obiect de tip Numar
'''
self.__number = number
conversie_rapida = False # Daca se foloseste conversia rapida, nu mai convertim prin subtitutie si impartiri succesive
# Daca baza numarului este egala cu 2, iar noua_baza este 4, 8 sau 16 convertim numarul prin Metoda Conversiei Rapide
if self.__number.get_baza() == 2:
if noua_baza == 4:
self.__number = self.convert_base2_to_base4(self.__number)
conversie_rapida = True
elif noua_baza == 8:
self.__number = self.convert_base2_to_base8(self.__number)
conversie_rapida = True
elif noua_baza == 16:
self.__number = self.convert_base2_to_base16(self.__number)
conversie_rapida = True
# Daca baza numarul este egala cu 4, 8 sau 16, iar noua_baza este 2 convertim numarul prin Metoda Conversiei Rapide
if noua_baza == 2:
if self.__number.get_baza() == 4:
self.__number = self.convert_base4_to_base2(self.__number)
conversie_rapida = True
if self.__number.get_baza() == 8:
self.__number = self.convert_base8_to_base2(self.__number)
conversie_rapida = True
if self.__number.get_baza() == 16:
self.__number = self.convert_base16_to_base2(self.__number)
conversie_rapida = True
if conversie_rapida == False: # Nu s-a folosit metoda Conversiilor Rapide
# Daca baza numarului este diferita de 10, atunci convertim numarul prin Metoda Subtitutiei la baza 10
# este egala cu 10, atunci nu mai este nevoie sa il convertim.
if self.__number.get_baza() != 10:
self.__number = self.convert_subtitutie(self.__number)
# Daca noua baza este diferita de 10, atunci convertim numarul prin Metoda Impartirilor Succesive la noua baza
# este egala cu 10, atunci nu mai este nevoie sa il convertim.
if noua_baza != 10:
self.__number = self.convert_impartiri_succesive(self.__number, noua_baza)
return Numar(self.__number.get_valoare(), self.__number.get_baza())
def convert_base4_to_base2(self, number):
'''
Functie care converteste un numar din baza 4 in baza 2 (Conversie rapida)
Input: number - obiect de tip Numar, pentru care baza este egala cu 4
Output: un obiect de tip Numar, pentru care baza este egala cu 2
Raises: Exception
daca baza numarului transmis ca parametru nu este 4 -> "Baza nevalida!\n"
'''
self.__number = number
if self.__number.get_baza() != 4:
raise RepoError("Baza nevalida!\n")
rezultatNumber_value = ''
for index in range(len(self.__number.get_valoare())):
digit = self.__number.get_valoare()[index]
# Convertim cifra in baza 4 a numarului la grupul de 2 cifre binare
grup_cifre_binare = base4_to_base2[digit]
# Adaugam grupul de cifre binare la numar
rezultatNumber_value = rezultatNumber_value + grup_cifre_binare
# Elimin zerourile din fata numarului
while rezultatNumber_value[0] == '0':
rezultatNumber_value = rezultatNumber_value[1:]
return Numar(rezultatNumber_value, 2)
def convert_base8_to_base2(self, number):
'''
Functie care converteste un numar din baza 8 in baza 2 (Conversie rapida)
Input: number - obiect de tip Numar, pentru care baza este egala cu 8
Output: un obiect de tip Numar, pentru care baza este egala cu 2
Raises: Exception
daca baza numarului transmis ca parametru nu este 8 -> "Baza nevalida!\n"
'''
self.__number = number
if self.__number.get_baza() != 8:
raise RepoError("Baza nevalida!\n")
rezultatNumber_value = ''
for index in range(len(self.__number.get_valoare())):
digit = self.__number.get_valoare()[index]
# Convertim cifra in baza 8 a numarului la grupul de 3 cifre binare
grup_cifre_binare = base8_to_base2[digit]
# Adaugam grupul de cifre binare la numar
rezultatNumber_value = rezultatNumber_value + grup_cifre_binare
# Elimin zerourile din fata numarului
while rezultatNumber_value[0] == '0':
rezultatNumber_value = rezultatNumber_value[1:]
return Numar(rezultatNumber_value, 2)
def convert_base16_to_base2(self, number):
'''
Functie care converteste un numar din baza 16 in baza 2 (Conversie rapida)
Input: number - obiect de tip Numar, pentru care baza este egala cu 16
Output: un obiect de tip Numar, pentru care baza este egala cu 2
Raises: Exception
daca baza numarului transmis ca parametru nu este 16 -> "Baza nevalida!\n"
'''
self.__number = number
if self.__number.get_baza() != 16:
raise RepoError("Baza nevalida!\n")
rezultatNumber_value = ''
for index in range(len(self.__number.get_valoare())):
digit = self.__number.get_valoare()[index]
# Convertim cifra in baza 16 a numarului la grupul de 4 cifre binare
grup_cifre_binare = base16_to_base2[digit]
# Adaugam grupul de cifre binare la numar
rezultatNumber_value = rezultatNumber_value + grup_cifre_binare
# Elimin zerourile din fata numarului
while rezultatNumber_value[0] == '0':
rezultatNumber_value = rezultatNumber_value[1:]
return Numar(rezultatNumber_value, 2)
def convert_base2_to_base4(self, number):
'''
Functie care converteste un numar din baza 2 in baza 4 (Conversie rapida)
Input: number - obiect de tip Numar, pentru care baza este egala cu 2
Output: un obiect de tip Numar, pentru care baza este egala cu 4
Raises: Exception
daca baza numarului transmis ca parametru nu este 2 -> "Baza nevalida!\n"
'''
self.__number = number
if self.__number.get_baza() != 2:
raise RepoError("Baza nevalida!\n")
rezultatNumber_value = ''
while self.__number.get_valoare() != '':
# Luam cel mai din dreapta grup de 2 cifre binare (sau de o singura cifra binara, daca a mai ramas una singura)
grup_cifre_binare = self.__number.get_valoare()[-2:]
# Convertim grupul de 2 cifre binare (sau de o singura cifra binara, daca a mai ramas una singura)
digit = base2_to_base4[grup_cifre_binare]
# Adaugam grupul de cifre binare la numar
rezultatNumber_value = digit + rezultatNumber_value
# Scoatem cele mai din dreapta grup de 2 cifre binare
self.__number.set_valoare(self.__number.get_valoare()[:-2])
return Numar(rezultatNumber_value, 4)
def convert_base2_to_base8(self, number):
'''
Functie care converteste un numar din baza 2 in baza 8 (Conversie rapida)
Input: number - obiect de tip Numar, pentru care baza este egala cu 2
Output: un obiect de tip Numar, pentru care baza este egala cu 8
Raises: Exception
daca baza numarului transmis ca parametru nu este 2 -> "Baza nevalida!\n"
'''
self.__number = number
if self.__number.get_baza() != 2:
raise RepoError("Baza nevalida!\n")
rezultatNumber_value = ''
while self.__number.get_valoare() != '':
# Luam cel mai din dreapta grup de 3 cifre binare (sau de cate au mai ramas)
grup_cifre_binare = self.__number.get_valoare()[-3:]
# Convertim grupul de 3 cifre binare (sau de cate au mai ramas)
digit = base2_to_base8[grup_cifre_binare]
# Adaugam grupul de cifre binare la numar
rezultatNumber_value = digit + rezultatNumber_value
# Scoatem cele mai din dreapta grup de 3 cifre binare
self.__number.set_valoare(self.__number.get_valoare()[:-3])
return Numar(rezultatNumber_value, 8)
def convert_base2_to_base16(self, number):
'''
Functie care converteste un numar din baza 2 in baza 16 (Conversie rapida)
Input: number - obiect de tip Numar, pentru care baza este egala cu 2
Output: un obiect de tip Numar, pentru care baza este egala cu 16
Raises: Exception
daca baza numarului transmis ca parametru nu este 2 -> "Baza nevalida!\n"
'''
self.__number = number
if self.__number.get_baza() != 2:
raise RepoError("Baza nevalida!\n")
rezultatNumber_value = ''
while self.__number.get_valoare() != '':
# Luam cel mai din dreapta grup de 4 cifre binare (sau de cate au mai ramas)
grup_cifre_binare = self.__number.get_valoare()[-4:]
# Convertim grupul de 4 cifre binare (sau de cate au mai ramas)
digit = base2_to_base16[grup_cifre_binare]
# Adaugam grupul de cifre binare la numar
rezultatNumber_value = digit + rezultatNumber_value
# Scoatem cele mai din dreapta grup de 4 cifre binare
self.__number.set_valoare(self.__number.get_valoare()[:-4])
return Numar(rezultatNumber_value, 16)
|
StarcoderdataPython
|
3225272
|
from typing import Any, Union, Type
from enum import Enum
from pydantic import BaseModel, validator, validate_arguments, FilePath, HttpUrl, Field
from .main import app
class Photo(BaseModel):
name: str
file: Any
@validator("file")
def valid_file(self, v, vv):
"""
Meta valid file
"""
return v
class Item(BaseModel):
url: HttpUrl
label: str
title: str
caption: str = Field(max_length=70)
thumb: str
@validator('thumb', pre=True)
def cart_bg(cls, v, vv):
"""
This change image for bg
"""
return v
class GroupItem(BaseModel):
type_name: str
items: list[Item]
class F (str, Enum):
lenta = ' Лента'
calendar = 'Календарь мероприятий'
history = 'История города'
good = 'Поздравь Иркутск'
galery = 'Галерея'
class Tabs(BaseModel):
name: str
url:str
class Elems(BaseModel):
label: str
title: str
caption: str
url: str
image_url: str
class CalendarElems(Elems):
pass
class HistoryElems(Elems):
@classmethod
async def from_orm(cls):
pool = app.state.pool
async with pool.acquire() as conn:
async with conn.cursor() as cur:
await cur.execute("SELECT id,title FROM special_project ;")
(r,) = await cur.fetchone()
cls(
label = r.title ,
title = r.title ,
caption = r.
url: str
image_url: str
)
class PrideElems(Elems):
pass
class GaleryElems(Elems):
pass
class Card(BaseModel):
obj: Any
type: str
|
StarcoderdataPython
|
9610016
|
<gh_stars>0
"""
@apiDefine ProductGetParams
@apiSuccess title
@apiQueryParam model
@apiQueryParam purchasable
@apiQueryParam manufacturedAt
"""
class Product:
def get(self):
"""
@api {get} /product Get all products
@apiVersion 1.0.0
@apiGroup Product
@apiPermission god
@apiPermission operator
@apiQueryParam [sort]
@apiUse ProductGetParams Some product params
"""
"""
@api {get} /product/:productId Get a seller
@apiVersion 1.0.0
@apiGroup Product
@apiUrlParam {Integer} productId
"""
return self
def put(self):
"""
@api {put} /product/:productId Update a product
@apiVersion 1.0.0
@apiGroup Product
@apiUrlParam {Integer} productId
@apiParam {String} title (Product title)
@apiParam {String} model Product Model
@apiParam {Boolean} purchasable Can purchase this product
@apiParam {DateTime} manufacturedAt When product manufactured
"""
return self
def delete(self):
"""
@api {delete} /product/:productId Delete a product
@apiVersion 1.0.0
@apiGroup Product
@apiUrlParam productId Product ID
@apiHeadParam Authorization Access Token
@apiDescription
Delete a product with product ID, but actually its
marked as deleted.
After review the product can delete permanently.
List of products cannot delete:
- Products purchased on time
- Products related to a `seller`
"""
return self
|
StarcoderdataPython
|
3475280
|
<gh_stars>1-10
from typing import Callable, Optional
from gi.repository import Gtk
from gaphas.handlemove import HandleMove
from gaphas.item import Item
from gaphas.move import MoveType
from gaphas.view import GtkView
FactoryType = Callable[[], Item]
def placement_tool(
view: GtkView, factory: FactoryType, handle_index: int
) -> Gtk.GestureDrag:
"""Place a new item on the model."""
gesture = (
Gtk.GestureDrag.new(view)
if Gtk.get_major_version() == 3
else Gtk.GestureDrag.new()
)
placement_state = PlacementState(factory, handle_index)
gesture.connect("drag-begin", on_drag_begin, placement_state)
gesture.connect("drag-update", on_drag_update, placement_state)
gesture.connect("drag-end", on_drag_end, placement_state)
return gesture
class PlacementState:
def __init__(self, factory: FactoryType, handle_index: int):
self.factory = factory
self.handle_index = handle_index
self.moving: Optional[MoveType] = None
def on_drag_begin(gesture, start_x, start_y, placement_state):
view = gesture.get_widget()
item = placement_state.factory()
x, y = view.get_matrix_v2i(item).transform_point(start_x, start_y)
item.matrix.translate(x, y)
view.selection.unselect_all()
view.selection.focused_item = item
gesture.set_state(Gtk.EventSequenceState.CLAIMED)
handle = item.handles()[placement_state.handle_index]
if handle.movable:
placement_state.moving = HandleMove(item, handle, view)
placement_state.moving.start_move((start_x, start_y))
def on_drag_update(gesture, offset_x, offset_y, placement_state):
if placement_state.moving:
_, x, y = gesture.get_start_point()
placement_state.moving.move((x + offset_x, y + offset_y))
def on_drag_end(gesture, offset_x, offset_y, placement_state):
if placement_state.moving:
_, x, y = gesture.get_start_point()
placement_state.moving.stop_move((x + offset_x, y + offset_y))
|
StarcoderdataPython
|
6651605
|
from .sandbox import * # noqa
OSCARAPI_BLOCK_ADMIN_API_ACCESS = True
|
StarcoderdataPython
|
4863403
|
# author: <NAME>
import json
import glob
import os
import sys
import gc
import collections
def openMiners():
minersList = {}
addressList = {}
count = 0
firstBlock = input('first')
lastBlock = input('last')
listofDirectories = []
a = os.listdir('./')
interval = []
for i in range(firstBlock, lastBlock+1):
dir = 'res_blk%05d.dat' %i
interval.append(dir)
for i in a:
for j in range(len(interval)):
if interval[j] == i:
listofDirectories.append(i)
listofDirectories.sort()
for directory in listofDirectories:
print('NEW_DIRECTORY %s' %directory)
abs_file_path = os.getcwd() + '/' + directory + '/miners.txt'
with open(abs_file_path) as json_file:
temp = json.load(json_file)
json_file.close()
for group in temp:
minersList[count] = {}
for add in group:
minersList[count][add] = None
if add not in addressList:
addressList[add] = {}
if count not in addressList[add]:
addressList[add][count] = None
count = count + 1
del temp
users = groupHomonyms(minersList, addressList)
print('MINERS %s' %len(users))
abs_file_path = os.getcwd() + '/minersCompleted.txt'
with open(abs_file_path, 'w') as outfile:
json.dump(users, outfile)
del users
gc.collect()
def groupHomonyms(mList, aList):
finalListUsers = []
visitedGroups = {}
matchedAddresses = {}
print(len(mList))
for address in aList:
if address not in matchedAddresses:
groups = {}
homonyms = []
for gr in aList[address]:
groups[gr] = None
while (len(groups)>0):
g = groups.keys()[0]
if g not in visitedGroups:
for minerAddress in mList[g]:
if minerAddress not in matchedAddresses:
matchedAddresses[minerAddress] = None
homonyms.append(minerAddress)
for homonym in aList[minerAddress]:
if homonym not in groups and homonym not in visitedGroups:
groups[homonym] = None
visitedGroups[g] = None
del groups[g]
finalListUsers.append(homonyms)
remaining = len(mList) - len(visitedGroups)
gc.collect()
file1 = os.getcwd() + '/rewardingTransactions.txt'
with open(file1) as json_file1:
RWTs = json.load(json_file1)
json_file1.close()
receivers = {}
for rwt in RWTs:
adds = rwt['receiver'].split('_')
del adds[0]
for el in adds:
if el not in receivers:
receivers[el] = None
countsingle = 0
for el in receivers:
if el not in matchedAddresses:
#single = []
#single.append(el)
#finalListUsers.append(single)
countsingle = countsingle + 1
print('not assigned addresses %s on total %s' %(countsingle, len(receivers)))
return finalListUsers
openMiners()
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.