id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
4873479
|
import asyncio
from datetime import datetime
from typing import Callable, Awaitable, List, Union
from temporalcache.utils import should_expire # type: ignore
class Periodic(object):
def __init__(
self,
loop: asyncio.AbstractEventLoop,
last_ts: datetime,
function: Callable[..., Awaitable[None]],
second: Union[int, str],
minute: Union[int, str],
hour: Union[int, str],
) -> None:
self._loop = loop
self._function: Callable[..., Awaitable[None]] = function
self._second = second
self._minute = minute
self._hour = hour
self._last = last_ts
self._continue = True
def stop(self) -> None:
self._continue = False
def expires(self, timestamp: datetime) -> bool:
return should_expire(
self._last, timestamp, self._second, self._minute, self._hour
)
async def execute(self, timestamp: datetime) -> None:
if self.expires(timestamp):
await self._function()
self._last = timestamp
class PeriodicManagerMixin(object):
_periodics: List[Periodic] = []
def periodics(self) -> List[Periodic]:
return self._periodics
def periodicIntervals(self) -> int:
"""return the interval required for periodics, to optimize call times
1 - secondly
60 - minutely
3600 - hourly
"""
ret = 3600
for p in self._periodics:
if p._second == "*":
# if any secondly, return 0 right away
return 1
elif p._minute == "*":
# if any require minutely, drop to 1
ret = 60
return ret
|
StarcoderdataPython
|
189155
|
<gh_stars>1-10
def swap_case(s):
t=""
for i in s:
if i.isalpha():
if i.isupper():
t=t+i.lower()
else:
t=t+i.upper()
else:
t=t+i
return (t)
|
StarcoderdataPython
|
378762
|
<filename>JavaScripts/Image/PixelArea_qgis.py
import ee
from ee_plugin import Map
# Displays the decreasing area covered by a single pixel at
# higher latitudes using the Image.pixelArea() function.
# Create an image in which the value of each pixel is its area.
img = ee.Image.pixelArea()
Map.setCenter(0, 0, 3)
Map.addLayer(img, {'min': 2e8, 'max': 4e8, 'opacity': 0.85}, 'pixel area')
|
StarcoderdataPython
|
11265313
|
<gh_stars>0
"""Contains geographic mapping tools."""
from delphi_utils import GeoMapper
DATE_COL = "timestamp"
DATA_COLS = ['totalTest', 'numUniqueDevices', 'positiveTest', "population"]
GMPR = GeoMapper() # Use geo utils
GEO_KEY_DICT = {
"county": "fips",
"msa": "msa",
"hrr": "hrr",
"state": "state_id",
"nation": "nation",
"hhs": "hhs"
}
def geo_map(geo_res, df):
"""Map a geocode to a new value."""
data = df.copy()
geo_key = GEO_KEY_DICT[geo_res]
# Add population for each zipcode
data = GMPR.add_population_column(data, "zip")
# zip -> geo_res
data = GMPR.replace_geocode(data, "zip", geo_key,
date_col=DATE_COL, data_cols=DATA_COLS)
if geo_res in ["state", "hhs", "nation"]:
return data, geo_key
# Add parent state
data = add_parent_state(data, geo_res, geo_key)
return data, geo_key
def add_parent_state(data, geo_res, geo_key):
"""
Add parent state column to DataFrame.
- map from msa/hrr to state, going by the state with the largest
population (since a msa/hrr may span multiple states)
- map from county to the corresponding state
"""
fips_to_state = GMPR._load_crosswalk(from_code="fips", to_code="state") # pylint: disable=protected-access
if geo_res == "county":
mix_map = fips_to_state[["fips", "state_id"]] # pylint: disable=unsubscriptable-object
else:
fips_to_geo_res = GMPR._load_crosswalk(from_code="fips", to_code=geo_res) # pylint: disable=protected-access
mix_map = fips_to_geo_res[["fips", geo_res]].merge(
fips_to_state[["fips", "state_id"]], # pylint: disable=unsubscriptable-object
on="fips",
how="inner")
mix_map = GMPR.add_population_column(mix_map, "fips").groupby(
geo_res).max().reset_index().drop(
["fips", "population"], axis = 1)
# Merge the info of parent state to the data
data = data.merge(mix_map, how="left", on=geo_key).drop(
columns=["population"]).dropna()
data = data.groupby(["timestamp", geo_key, "state_id"]).sum().reset_index()
return data
|
StarcoderdataPython
|
4945301
|
class SpireError(Exception):
"""..."""
class ConfigurationError(SpireError):
"""..."""
class LocalError(SpireError):
"""..."""
@classmethod
def construct(cls, name):
return cls('a value for %r is not available in the local context' % name)
class TemporaryStartupError(SpireError):
"""..."""
|
StarcoderdataPython
|
1953534
|
# -*- coding: utf-8 -*-
#
# BitcoinLib - Python Cryptocurrency Library
# MAIN - Load configs, initialize logging and database
# ยฉ 2017 - 2020 February - 1200 Web Development <http://1200wd.com/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Do not remove any of the imports below, used by other files
import os
import sys
import functools
import logging
from logging.handlers import RotatingFileHandler
from bitcoinlib.config.config import *
# Initialize logging
logger = logging.getLogger('bitcoinlib')
logger.setLevel(LOGLEVEL)
if ENABLE_BITCOINLIB_LOGGING:
handler = RotatingFileHandler(str(BCL_LOG_FILE), maxBytes=100 * 1024 * 1024, backupCount=2)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(funcName)s(%(lineno)d) %(message)s',
datefmt='%Y/%m/%d %H:%M:%S')
handler.setFormatter(formatter)
handler.setLevel(LOGLEVEL)
logger.addHandler(handler)
_logger = logging.getLogger(__name__)
logger.info('WELCOME TO BITCOINLIB - CRYPTOCURRENCY LIBRARY')
logger.info('Version: %s' % BITCOINLIB_VERSION)
logger.info('Logger name: %s' % logging.__name__)
logger.info('Read config from: %s' % BCL_CONFIG_FILE)
logger.info('Directory databases: %s' % BCL_DATABASE_DIR)
logger.info('Default database: %s' % DEFAULT_DATABASE)
logger.info('Logging to: %s' % BCL_LOG_FILE)
logger.info('Directory for data files: %s' % BCL_DATA_DIR)
def script_type_default(witness_type=None, multisig=False, locking_script=False):
"""
Determine default script type for provided witness type and key type combination used in this library.
>>> script_type_default('segwit', locking_script=True)
'p2wpkh'
:param witness_type: Witness type used: standard, p2sh-segwit or segwit
:type witness_type: str
:param multisig: Multi-signature key or not, default is False
:type multisig: bool
:param locking_script: Limit search to locking_script. Specify False for locking scripts and True for unlocking scripts
:type locking_script: bool
:return str: Default script type
"""
if not witness_type:
return None
if witness_type == 'legacy' and not multisig:
return 'p2pkh' if locking_script else 'sig_pubkey'
elif witness_type == 'legacy' and multisig:
return 'p2sh' if locking_script else 'p2sh_multisig'
elif witness_type == 'segwit' and not multisig:
return 'p2wpkh' if locking_script else 'sig_pubkey'
elif witness_type == 'segwit' and multisig:
return 'p2wsh' if locking_script else 'p2sh_multisig'
elif witness_type == 'p2sh-segwit' and not multisig:
return 'p2sh' if locking_script else 'p2sh_p2wpkh'
elif witness_type == 'p2sh-segwit' and multisig:
return 'p2sh' if locking_script else 'p2sh_p2wsh'
else:
raise ValueError("Wallet and key type combination not supported: %s / %s" % (witness_type, multisig))
def get_encoding_from_witness(witness_type=None):
"""
Derive address encoding (base58 or bech32) from transaction witness type.
Returns 'base58' for legacy and p2sh-segwit witness type and 'bech32' for segwit
:param witness_type: Witness type: legacy, p2sh-segwit or segwit
:type witness_type: str
:return str:
"""
if witness_type == 'segwit':
return 'bech32'
elif witness_type in [None, 'legacy', 'p2sh-segwit']:
return 'base58'
else:
raise ValueError("Unknown witness type %s" % witness_type)
def deprecated(func):
"""
This is a decorator which can be used to mark functions as deprecated. It will result in a warning being emitted
when the function is used.
"""
@functools.wraps(func)
def new_func(*args, **kwargs):
logging.warning("Call to deprecated function {}.".format(func.__name__))
return func(*args, **kwargs)
return new_func
|
StarcoderdataPython
|
6491703
|
from bs4 import BeautifulSoup
import requests, dbconfig
db_Class = dbconfig.DataBase()
Cube_Option_Grade_List = {
"๋ ์ด": "Rare",
"์ํฝ": "Epic",
"์ ๋ํฌ": "Unique",
"๋ ์ ๋๋ฆฌ": "Legendary"
}
Cube_Option_Item_Type_List = {
"๋ฌด๊ธฐ": "Weapon",
"์ ๋ธ๋ ": "Emblem",
"๋ณด์กฐ๋ฌด๊ธฐ (ํฌ์ค์ค๋, ์์ธ๋ง ์ ์ธ)": "SubWeapon",
"ํฌ์ค์ค๋, ์์ธ๋ง": "ForceShield_SoulRing",
"๋ฐฉํจ": "Shield",
"๋ชจ์": "Cap",
"์์": "Top",
"ํ๋ฒ์ท": "Clothes",
"ํ์": "Bottom",
"์ ๋ฐ": "Shoes",
"์ฅ๊ฐ": "Gloves",
"๋งํ ": "Cloak",
"๋ฒจํธ": "Belt",
"์ด๊นจ์ฅ์": "Shoulder",
"์ผ๊ตด์ฅ์": "Face",
"๋์ฅ์": "Eye",
"๊ท๊ณ ๋ฆฌ": "EarRing",
"๋ฐ์ง": "Ring",
"ํ๋ํธ": "Pendant",
"๊ธฐ๊ณ์ฌ์ฅ": "MachineHeart"
}
Cube_Option_Item_Level_List = {
"120๋ ๋ฒจ ์ด์": "120",
"100๋ ๋ฒจ": "100"
}
Cube_Option_List = {
0: "์ฒซ ๋ฒ์งธ ์ต์
",
1: "๋ ๋ฒ์งธ ์ต์
",
2: "์ธ ๋ฒ์งธ ์ต์
"
}
Cube_Type_List = ["Red", "Black", "Addi", "Strange", "Master", "Artisan"]
Cube_Type_Name_List = ["๋ ๋", "๋ธ๋", "์๋์
๋", "์์ํ", "์ฅ์ธ์", "๋ช
์ฅ์"]
for i in range(len(Cube_Type_List)):
Cube_GetData_Url = f"https://maplestory.nexon.com/Guide/OtherProbability/cube/{Cube_Type_List[i]}"
Cube_GetData_Get = requests.get(Cube_GetData_Url)
Cube_GetData_Html = Cube_GetData_Get.text
Cube_GetData_Soup = BeautifulSoup(Cube_GetData_Html, "html.parser")
SQL = f"""CREATE TABLE Cube_Option_Probability_{Cube_Type_List[i]}(
Grade char(10),
Item_Type char(30),
Item_Level char(10),
Option_Line char(10),
Name char(200),
Probability char(30)
);"""
db_Class.execute(SQL)
print("ํ
์ด๋ธ ์์ฑ ์๋ฃ.")
Cube_GetData_Option_List = Cube_GetData_Soup.find_all("div", attrs={"class": "cube_option"})
Cube_GetData_Data_List = Cube_GetData_Soup.find_all("table", attrs={"class": "cube_data"})
for j in range(len(Cube_GetData_Option_List)):
Cube_GetData_Option_Grade = Cube_GetData_Option_List[j].find_all("span")[0].get_text()
Cube_GetData_Option_Item_Type = Cube_GetData_Option_List[j].find_all("span")[1].get_text()
Cube_GetData_Option_Item_Level = Cube_GetData_Option_List[j].find_all("span")[2].get_text()
Cube_GetData_Data_TD = Cube_GetData_Data_List[j].find_all("td")
for k in range(0, len(Cube_GetData_Data_TD), 2):
Cube_GetData_Data_Name = Cube_GetData_Data_TD[k].get_text()
Cube_GetData_Data_Probability = Cube_GetData_Data_TD[k + 1].get_text()
if Cube_GetData_Data_Name == "" or Cube_GetData_Data_Name == "์ ์ฌ์ต์
":
continue
SQL = f"INSERT INTO Cube_Option_Probability_{Cube_Type_List[i]} VALUES ('{Cube_Option_Grade_List[Cube_GetData_Option_Grade]}', '{Cube_Option_Item_Type_List[Cube_GetData_Option_Item_Type]}', '{Cube_Option_Item_Level_List[Cube_GetData_Option_Item_Level]}', '{int((k / 2) % 3)}', '{Cube_GetData_Data_Name.replace('%', '%%').strip()}', '{Cube_GetData_Data_Probability.replace('%', '')}')"
db_Class.execute(SQL)
print(f"ํ๋ธ: {Cube_Type_Name_List[i]} ํ๋ธ, ์์ดํ
๋ฑ๊ธ: {Cube_GetData_Option_Grade}, ์์ดํ
์ข
๋ฅ: {Cube_GetData_Option_Item_Type}, ์์ดํ
๋ ๋ฒจ: {Cube_GetData_Option_Item_Level}, ์ต์
์ด๋ฆ: {Cube_GetData_Data_Name}, ์ต์
ํ๋ฅ : {Cube_GetData_Data_Probability}, ์ต์
๋ผ์ธ: {Cube_Option_List[int((k / 2) % 3)]}")
db_Class.commit()
db_Class.close()
print("๊ฐฑ์ ์ด ์๋ฃ๋์์ต๋๋ค.")
|
StarcoderdataPython
|
4806812
|
<reponame>yatao91/learning_road
# -*- coding: utf-8 -*-
from celery import Celery
app = Celery("demo", broker='redis://127.0.0.1:6379/4', backend='redis://127.0.0.1:6379/5')
|
StarcoderdataPython
|
8195333
|
from django.apps import AppConfig
class TTestConfig(AppConfig):
name = 'ttest'
|
StarcoderdataPython
|
3324086
|
import unittest
from .utilities import get_vault_object, generate_random_uuid, get_parameters_json
from vvrest.services.group_service import GroupService
class GroupServiceTest(unittest.TestCase):
vault = None
@classmethod
def setUpClass(cls):
if not cls.vault:
cls.vault = get_vault_object()
test_parameters = get_parameters_json()
cls.site_id = test_parameters['site_id']
cls.group_id = test_parameters['group_id']
cls.user_id = test_parameters['user_id']
def test_get_groups(self):
"""
tests GroupService.get_groups
"""
group_service = GroupService(self.vault)
resp = group_service.get_groups()
self.assertEqual(resp['meta']['status'], 200)
self.assertGreater(len(resp['data']), 0)
self.assertEqual(resp['data'][0]['dataType'], 'Group')
def test_get_group(self):
"""
tests GroupService.get_group
"""
group_service = GroupService(self.vault)
resp = group_service.get_group(self.group_id)
self.assertEqual(resp['meta']['status'], 200)
self.assertEqual(len(resp['data']), 1) # TODO: report returns list instead of object
self.assertEqual(resp['data'][0]['dataType'], 'Group')
self.assertEqual(resp['data'][0]['id'], self.group_id)
def test_get_group_users(self):
"""
tests GroupService.get_group_users
"""
group_service = GroupService(self.vault)
resp = group_service.get_group_users(self.group_id)
self.assertEqual(resp['meta']['status'], 200)
self.assertGreater(len(resp['data']), 0)
self.assertEqual(resp['data'][0]['dataType'], 'User')
def test_get_group_user(self):
"""
tests GroupService.get_group_user
"""
group_service = GroupService(self.vault)
resp = group_service.get_group_user(self.group_id, self.user_id)
self.assertEqual(resp['meta']['status'], 200)
self.assertEqual(resp['data']['dataType'], 'User')
self.assertEqual(resp['data']['id'], self.user_id)
def test_create_and_update_group(self):
"""
tests GroupService.create_group, GroupService.update_group, and GroupService.add_user_to_group
"""
group_service = GroupService(self.vault)
# create new group
expected_group_name = generate_random_uuid()
expected_group_description = expected_group_name + ' description'
resp = group_service.create_group(expected_group_name, expected_group_description, self.site_id)
self.assertEqual(resp['meta']['status'], 200)
self.assertEqual(resp['data']['dataType'], 'Group')
self.assertEqual(resp['data']['name'], expected_group_name)
self.assertEqual(resp['data']['description'], expected_group_description)
group_id = resp['data']['id']
expected_group_name = generate_random_uuid()
expected_group_description = expected_group_name + ' description'
# update group
resp = group_service.update_group(group_id, expected_group_name, expected_group_description)
self.assertEqual(resp['meta']['status'], 200)
self.assertEqual(resp['data']['dataType'], 'Group')
self.assertEqual(resp['data']['name'], expected_group_name)
self.assertEqual(resp['data']['description'], expected_group_description)
# validate updated group data
resp = group_service.get_group(group_id)
self.assertEqual(resp['meta']['status'], 200)
self.assertEqual(len(resp['data']), 1) # TODO: report returns list instead of object
self.assertEqual(resp['data'][0]['id'], group_id)
self.assertEqual(resp['data'][0]['dataType'], 'Group')
self.assertEqual(resp['data'][0]['name'], expected_group_name)
self.assertEqual(resp['data'][0]['description'], expected_group_description)
# validate user is not in newly created group
resp = group_service.get_group_user(group_id, self.user_id)
self.assertEqual(resp['meta']['status'], 404)
# add user to group
resp = group_service.add_user_to_group(group_id, self.user_id)
self.assertEqual(resp['meta']['status'], 201)
self.assertEqual(len(resp['data']), 1) # TODO: report returns list instead of object
self.assertEqual(resp['data'][0]['dataType'], 'NotifyUser') # TODO: report dataType validation
self.assertEqual(resp['data'][0]['id'], self.user_id)
# validate user is now a member of new group
resp = group_service.get_group_user(group_id, self.user_id)
self.assertEqual(resp['meta']['status'], 200)
self.assertEqual(resp['data']['dataType'], 'User')
self.assertEqual(resp['data']['id'], self.user_id)
|
StarcoderdataPython
|
6448601
|
#Import the library needed
import pyautogui as p
import webbrowser as w
import time
#Taking input from user
x = input("Type whatever you want to search: ")
link = 'https://www.google.com/search?q={}'.format(x)
w.open(link)
#Delay for stability
time.sleep(1)
#Delay to let the page load
time.sleep(5)
#Moves the cursor to the search bar
p.moveTo(550,420,0.3)
#Presses enter
p.press('enter')
#Types the input provided
p.typewrite(x,0.1)
#Delay for stability
time.sleep(1)
#Presses enter
p.press('enter')
|
StarcoderdataPython
|
1822677
|
<reponame>rsadaphule/nlp
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License
import random
import pytest
import numpy as np
import torch
from torch import nn
from utils_nlp.interpreter.Interpreter import (
Interpreter,
calculate_regularization,
)
def fixed_length_Phi(x):
return x[0] * 10 + x[1] * 20 - x[2] * 20 - x[3] * 10
def variable_length_Phi(function):
return lambda x: (function(x.unsqueeze(0))[0][0])
@pytest.fixture
def fixed_length_interp():
x = torch.randn(4, 10)
regular = torch.randn(10)
return Interpreter(x, fixed_length_Phi, regularization=regular)
@pytest.fixture
def variable_length_interp():
function = nn.LSTM(10, 10)
x = torch.randn(4, 10)
regular = torch.randn(1, 10)
return Interpreter(
x, variable_length_Phi(function), regularization=regular
)
def test_fixed_length_regularization():
dataset = torch.randn(10, 4, 10)
# calculate all hidden states
hidden = [fixed_length_Phi(x).tolist() for x in dataset]
# calculate the standard deviation
hidden = np.array(hidden)
regular_gt = np.std(hidden, axis=0)
regular = calculate_regularization(dataset, fixed_length_Phi)
assert np.sum(np.abs(regular - regular_gt)) < 1e-5
def test_variable_length_regularization():
function = nn.LSTM(10, 10)
dataset = [torch.randn(random.randint(5, 9), 10) for _ in range(10)]
# calculate all hidden states
hidden = [
np.mean(
variable_length_Phi(function)(x).tolist(), axis=0, keepdims=True
)
for x in dataset
]
# calculate the standard deviation
hidden = np.array(hidden)
regular_gt = np.std(hidden, axis=0)
regular = calculate_regularization(
dataset, variable_length_Phi(function), reduced_axes=[0]
)
assert np.sum(np.abs(regular - regular_gt)) < 1e-5
def test_initialize_interpreter():
x = torch.randn(4, 10)
regular = torch.randn(10)
interpreter = Interpreter(x, fixed_length_Phi, regularization=regular)
assert interpreter.s == 4
assert interpreter.d == 10
assert interpreter.regular.tolist() == regular.tolist()
def test_train_fixed_length_interp(fixed_length_interp):
init_ratio = fixed_length_interp.ratio + 0.0 # make a copy
init_regular = fixed_length_interp.regular + 0.0
fixed_length_interp.optimize(iteration=10)
after_ratio = fixed_length_interp.ratio + 0.0
after_regular = fixed_length_interp.regular + 0.0
# make sure the ratio is changed when optimizing
assert torch.sum(torch.abs(after_ratio - init_ratio)) > 1e-5
# make sure the regular is not changed when optimizing
assert torch.sum(torch.abs(after_regular - init_regular)) < 1e-5
def test_train_variable_length_interp(variable_length_interp):
init_ratio = variable_length_interp.ratio + 0.0 # make a copy
init_regular = variable_length_interp.regular + 0.0
variable_length_interp.optimize(iteration=10)
after_ratio = variable_length_interp.ratio + 0.0
after_regular = variable_length_interp.regular + 0.0
# make sure the ratio is changed when optimizing
assert torch.sum(torch.abs(after_ratio - init_ratio)) > 1e-5
# make sure the regular is not changed when optimizing
assert torch.sum(torch.abs(after_regular - init_regular)) < 1e-5
def test_interpreter_get_simga(fixed_length_interp):
sigma = fixed_length_interp.get_sigma()
assert sigma.shape == (4,)
|
StarcoderdataPython
|
6441589
|
<gh_stars>10-100
import http.client
import hashlib
import urllib
import random
import json
import nltk
from nltk.tokenize import sent_tokenize
from BackTranslation.translated import Translated
try:
nltk.data.find('tokenizers/punkt')
except LookupError:
nltk.download('punkt')
class BackTranslation_Baidu(object):
def __init__(self, appid, secretKey):
if not appid:
raise ValueError("'{}': INVALID appid, please register in http://api.fanyi.baidu.com/.".format(appid))
if not secretKey:
raise ValueError(
"'{}': INVALID secretKey, please register in http://api.fanyi.baidu.com/.".format(secretKey))
self.appid = appid
self.secretKey = secretKey
self.MAX_LENGTH = 6000
self.httpClient = http.client.HTTPConnection('api.fanyi.baidu.com')
self.queryURL = '/api/trans/vip/translate'
def translate(self, text, src='auto', tmp=None):
if src == 'auto':
src = self._get_srcLang(self._sendRequest(text, src, 'en'))
if not tmp: # if tmp is None, set the default tmp language
if src == 'en':
tmp = 'zh'
else:
tmp = 'en'
if src == tmp:
raise ValueError(
"Transited language ({tmp}) should different from srouce language ({src}).".format(tmp=tmp, src=src))
# check the length of sentence
if len(text.encode('utf-8')) > self.MAX_LENGTH:
original_sentences = self._split_segement(sent_tokenize(text))
# translate the sentence one by one
tran_text = []
back_text = []
for sentence in original_sentences:
if sentence == "":
continue
# language A --> language B
mid = self._get_translatedText(self._sendRequest(sentence, src, tmp))
tran_text.append(mid)
# language B --> language A
back_text.append(self._get_translatedText(self._sendRequest(mid, tmp, src)))
tran_text = ' '.join(tran_text)
back_text = ' '.join(back_text)
else:
tran_text = self._get_translatedText(self._sendRequest(text, src, tmp))
back_text = self._get_translatedText(self._sendRequest(text, tmp, src))
result = Translated(src_lang=src, tmp_lang=tmp, text=text, trans_text=tran_text, back_text=back_text)
return result
def _sendRequest(self, text, src, tmp):
salt = random.randint(32768, 65536)
sign = hashlib.md5((self.appid + text + str(salt) + self.secretKey).encode()).hexdigest()
url = self.queryURL + '?appid=' + self.appid + '&q=' + urllib.parse.quote(
text) + '&from=' + src + '&to=' + tmp + '&salt=' + str(salt) + '&sign=' + sign
try:
self.httpClient.request('GET', url)
response = self.httpClient.getresponse()
result_all = response.read().decode("utf-8")
result = json.loads(result_all)
except Exception as e:
print("'{}': Connection error.".format(e))
return result
def _split_segement(self, sentences):
"""
Split the long sentences into multiple sentences whose lengths are less than MAX_LENGTH.
:param sentences: the list of tokenized sentences from source text
:return: the list of sentences with proper length
:rtype: list
"""
sentences_list = []
block = ""
for sentence in sentences:
if len((block.rstrip() + ' ' + sentence).encode('utf-8')) > self.MAX_LENGTH:
sentences_list.append(block.rstrip())
block = sentence
else:
block = block + sentence + ' '
sentences_list.append(block.rstrip())
return sentences_list
def closeHTTP(self):
self.httpClient.close()
def _get_srcLang(self, result):
return result['from']
def _get_translatedText(self, result):
return result['trans_result'][0]['dst']
|
StarcoderdataPython
|
12845109
|
<reponame>IntelLabs/OSCAR<gh_stars>10-100
#
# Copyright (C) 2020 Intel Corporation
#
# SPDX-License-Identifier: BSD-3-Clause
#
import logging
from collections import Counter
import torch
from torch.utils.data import DataLoader
from torchvision.transforms import transforms as T
from torchvision.transforms import functional as TF
import pytorch_lightning as pl
from sklearn.model_selection import StratifiedShuffleSplit
from oscar.data.ucf101 import UCF101Dataset
from oscar.data.video import ClipSampler, MiddleClipSampler
from oscar.data.transforms import ExCompose, Permute, Squeeze, Unsqueeze, ExSplitLambda
from MARS.dataset.preprocess_data import get_mean
logger = logging.getLogger(__name__)
class MARSDataModule(pl.LightningDataModule):
def __init__(
self,
modality,
frames_root,
annotation_dir,
fold=1,
batch_size=16,
num_workers=1,
frame_size=112,
clip_length=16,
clip_step=1,
mid_clip_only=False,
random_resized_crop_scale=(0.5, 1.0),
test_indices=None,
test_size=0,
random_seed=0,
collate_fn=None,
frame_cache_dir=None,
train_file_patterns=["{:05d}.jpg", "TVL1jpg_x_{:05d}.jpg", "TVL1jpg_y_{:05d}.jpg"],
test_file_patterns=["{:05d}.jpg"],
):
super().__init__()
assert modality in ['RGB', 'RGB_Flow',
'RGBMasked_Flow', 'RGBMasked_FlowMasked',
'RGBSeg_Flow',
'RGBSegMC_Flow',
'RGBSegSC_Flow', 'RGBKeySC_Flow']
self.modality = modality
self.frames_root = frames_root
self.annotation_dir = annotation_dir
self.fold = fold
self.batch_size = batch_size
self.num_workers = num_workers
self.frame_size = frame_size
self.clip_length = clip_length
self.clip_step = clip_step
self.mid_clip_only = mid_clip_only
self.random_resized_crop_scale = random_resized_crop_scale
self.test_indices = test_indices
self.test_size = test_size
self.random_seed = random_seed
self.collate_fn = collate_fn
self.frame_cache_dir = frame_cache_dir
self.train_file_patterns = train_file_patterns
self.test_file_patterns = test_file_patterns
from detectron2.data import MetadataCatalog
self.palette = MetadataCatalog.get('coco_2017_val').thing_colors
if 'RGBSegMC_' in self.modality:
self.input_channels = len(self.palette) + 2 # COCO-things + XY
elif 'RGBSegSC_' in self.modality or 'RGBKeySC_' in self.modality:
self.input_channels = 1 + 2 # Mask + XY
else:
self.input_channels = 3 + 2 # RGB + XY
@classmethod
def add_argparse_args(cls, parser):
group = parser.add_argument_group(cls.__name__)
group.add_argument('--modality', default='RGB', type=str, choices=['RGB', 'RGB_Flow', 'RGBMasked_Flow', 'RGBMasked_FlowMasked', 'RGBSeg_Flow', 'RGBSegMC_Flow', 'RGBSegSC_Flow', 'RGBKeySC_Flow'])
group.add_argument('--dataset', default='UCF101', type=str, choices=['UCF101'])
group.add_argument('--only_RGB', default=False, action='store_true')
group.add_argument('--batch_size', default=32, type=int)
group.add_argument('--frame_dir', default=None, type=str)
group.add_argument('--annotation_path', default=None, type=str)
group.add_argument('--frame_mask_dir', default=None, type=str)
group.add_argument('--n_workers', default=4, type=int)
group.add_argument('--split', default=1, type=int, choices=[1, 2, 3])
group.add_argument('--sample_size', default=112, type=int)
group.add_argument('--sample_duration', default=16, type=int)
group.add_argument('--step_between_clips', default=1, type=int)
group.add_argument('--random_resized_crop_scale_min', default=0.5, type=float)
group.add_argument('--random_resized_crop_scale_max', default=1.0, type=float)
group.add_argument('--test_size', default=0, type=int)
group.add_argument('--test_index', default=None, type=int, nargs='+')
group.add_argument('--random_seed', default=1, type=bool, help='Manually set random seed of sampling validation clip')
group.add_argument('--mid_clip_only', default=False, type=bool)
group.add_argument('--shuffle_axes', default=None, type=int, nargs='+')
return parser
def prepare_data(self):
UCF101Dataset(self.frames_root,
self.annotation_dir,
self.train_file_patterns,
fold=self.fold)
def setup(self, stage=None):
logger.info("Setting up data module for stage: %s", stage)
channels_mean = torch.tensor([*get_mean('activitynet'), 127.5, 127.5])
train_channels_mean = channels_mean
test_channels_mean = channels_mean[0:3]
# Create robust feature transform
robust_extractor = None
if 'RGBMasked_' in self.modality:
from oscar.defences.preprocessor.detectron2 import CachedDetectron2Preprocessor
from oscar.defences.preprocessor.ablator import AblatorPyTorch
dt2 = CachedDetectron2Preprocessor(self.frame_cache_dir)
robust_extractor = AblatorPyTorch(channels_mean / 255, detectron2=dt2)
elif 'RGBSeg_' in self.modality:
from oscar.defences.preprocessor.detectron2 import CachedDetectron2Preprocessor
from oscar.defences.preprocessor.paletted_semantic_segmentor import PalettedSemanticSegmentorPyTorch
dt2 = CachedDetectron2Preprocessor(self.frame_cache_dir)
robust_extractor = PalettedSemanticSegmentorPyTorch(channels_mean[0:3] / 255, detectron2=dt2, palette=self.palette)
elif 'RGBSegMC_' in self.modality:
from oscar.defences.preprocessor.detectron2 import CachedDetectron2Preprocessor
from oscar.defences.preprocessor.multichannel_semantic_segmentor import MultichannelSemanticSegmentorPyTorch
dt2 = CachedDetectron2Preprocessor(self.frame_cache_dir)
robust_extractor = MultichannelSemanticSegmentorPyTorch(detectron2=dt2, nb_channels=len(self.palette))
train_channels_mean = 127.5
test_channels_mean = 127.5
elif 'RGBSegSC_' in self.modality or 'RGBKeySC_' in self.modality:
# TODO: Create another segmentor class that is faster and selects objects relevant to UCF101
from oscar.defences.preprocessor.detectron2 import CachedDetectron2Preprocessor
from oscar.defences.preprocessor.multichannel_semantic_segmentor import MultichannelSemanticSegmentorPyTorch
dt2 = CachedDetectron2Preprocessor(self.frame_cache_dir)
robust_extractor = MultichannelSemanticSegmentorPyTorch(detectron2=dt2, nb_channels=1) # 1 channel == person mask
train_channels_mean = 127.5
test_channels_mean = 127.5
# Apply robust feature extractor to RGB channels only if not _FlowMasked
if robust_extractor is not None and '_FlowMasked' not in self.modality:
robust_extractor = ExSplitLambda(robust_extractor, 3, 0, dim=-1)
robust_transform = ExCompose([
T.Normalize(0, 255), # [0, 255] -> [0, 1]
Permute(0, 2, 3, 1), # TCHW -> THWC
Unsqueeze(0), # THWC -> NTHWC
robust_extractor, # Apply robust feature extractor
Squeeze(0), # NTHWC -> THWC
Permute(0, 3, 1, 2), # THWC -> TCHW
T.Normalize(0, 1/255), # [0, 1] -> [0, 255]
])
# Train transform
# FIXME: Don't load flow when modality does not specify _Flow!
# FIXME: Is there a way to decouple rgb and flow datasets like we did above?
# The problem is they need to be synchronized somehow.
train_transform = ExCompose([
robust_transform,
T.RandomResizedCrop(self.frame_size, scale=self.random_resized_crop_scale, ratio=(1., 1.)), # Crop then Resize
T.RandomApply([TF.hflip, ExSplitLambda(T.Normalize(255, -1), 1, -2, dim=-1)]), # Horizontal flip and invert x-flow randomly
T.Normalize(train_channels_mean, 1), # [0, 255] -> ~[-128, 128]
Permute(1, 0, 2, 3), # TCHW -> CTHW
])
train_sampler = ClipSampler(self.clip_length, self.clip_step)
# Test transform
test_transform = ExCompose([
robust_transform,
T.Resize(self.frame_size),
T.CenterCrop(self.frame_size),
T.Normalize(test_channels_mean, 1), # [0, 255] -> ~[-128, 128]
Permute(1, 0, 2, 3), # TCHW -> CTHW
])
test_sampler = range
if self.mid_clip_only:
test_sampler = MiddleClipSampler(self.clip_length, self.clip_step)
if stage == 'fit' or stage is None:
logger.info("Loading training data...")
self.train_dataset = UCF101Dataset(self.frames_root,
self.annotation_dir,
self.train_file_patterns,
train=True,
fold=self.fold,
transform=train_transform,
sampler=train_sampler)
logger.info("train data = %d", len(self.train_dataset))
logger.info("Loading validation data...")
self.val_dataset = UCF101Dataset(self.frames_root,
self.annotation_dir,
self.test_file_patterns,
train=False,
fold=self.fold,
transform=test_transform,
sampler=train_sampler)
logger.info("val data = %d", len(self.val_dataset))
if stage == 'test' or stage is None:
logger.info("Loading test data...")
test_dataset = UCF101Dataset(self.frames_root,
self.annotation_dir,
self.test_file_patterns,
train=False,
fold=self.fold,
transform=test_transform,
sampler=test_sampler)
# Select test indices...
if self.test_indices is not None:
logger.info("Selecting data indices: %s", self.test_indices)
test_dataset = torch.utils.data.Subset(test_dataset, self.test_indices)
# ...or subsample test_dataset using a stratified split of test_size elements.
elif self.test_size > 0:
y = test_dataset.targets
if test_dataset.target_transform is not None:
y_transform = [test_dataset.target_transform(y_) for y_ in y]
sss = StratifiedShuffleSplit(n_splits=1, test_size=self.test_size, random_state=self.random_seed)
_, indices = next(sss.split(y, y_transform))
y_selected = [y[i] for i in indices]
logger.info("Stratified subsampling test dataset to %d samples: %s", self.test_size, Counter(y_selected))
test_dataset = torch.utils.data.Subset(test_dataset, indices)
self.test_dataset = test_dataset
logger.info("test data = %d", len(self.test_dataset))
def train_dataloader(self):
return DataLoader(self.train_dataset,
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers,
pin_memory=True,
drop_last=True,
collate_fn=self.collate_fn)
def val_dataloader(self):
return DataLoader(self.val_dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
pin_memory=True,
drop_last=True,
collate_fn=self.collate_fn)
def test_dataloader(self):
return DataLoader(self.test_dataset,
batch_size=1, # Must be 1 because we can't batch whole videos
shuffle=False,
num_workers=self.num_workers,
pin_memory=True,
drop_last=False,
collate_fn=self.collate_fn)
|
StarcoderdataPython
|
5128902
|
"""
Base class for ensemble models
Based on: https://github.com/yzhao062/combo/blob/master/combo/models/base.py
and extended for distributed incremental models
"""
import warnings
from collections import defaultdict
from abc import ABC, abstractmethod
from sklearn.utils import column_or_1d
from sklearn.utils.multiclass import check_classification_targets
import numpy as np
from inspect import signature
class BaseAggregator(ABC):
@abstractmethod
def __init__(self, base_estimators, fitted_estimators=[], window_size=200, pre_fitted=False):
"""
:param base_estimators: Estimator objects with partial_fit defined
(incremental learning models).
:param fitted_estimators: Batch learning estimators.
Used only for scoring.
:param metrics: Object that defines the quality of predictions
(ex. metrics.accuracy_score in scikit-learn)
"""
assert (isinstance(base_estimators, (list)))
assert (isinstance(fitted_estimators, (list)))
if (len(base_estimators) + len(fitted_estimators)) < 2:
raise ValueError('At least 2 estimators are required')
self.base_estimators = base_estimators
self.fitted_estimators = fitted_estimators
self.n_base_estimators_ = len(self.base_estimators)
self.n_fitted_estimators_ = len(self.fitted_estimators)
self.window_size = window_size
def fit(self, X, y=None):
"""
:param X: numpy.ndarray of shape (n_samples, n_features).
Input samples.
:param y: numpy.ndarray of shape (n_samples)
Labels for the target variable.
:return:
"""
return self.partial_fit(X,y)
@abstractmethod
def partial_fit(self, X, y=None, classes=None):
"""
:param X: numpy.ndarray of shape (n_samples, n_features).
Input samples.
:param y: numpy.ndarray of shape (n_samples)
Labels for the target variable.
:param classes: numpy.ndarray, optional.
Unique classes in the data y.
:return:
"""
pass
@abstractmethod
def predict(self, X):
"""
:param X: numpy.ndarray of shape (n_samples, n_features).
Input samples.
:return: numpy array of shape (n_samples,).
Class labels/predictions for input samples.
"""
pass
# @abstractmethod
# def predict_proba(self, X):
# """Return probability estimates for the test data X.
# Parameters
# ----------
# X : numpy array of shape (n_samples, n_features)
# The input samples.
# Returns
# -------
# p : numpy array of shape (n_samples,)
# The class probabilities of the input samples.
# Classes are ordered by lexicographic order.
# """
# pass
def _set_n_classes(self, y):
self._classes = 2 # default as binary classification
if y is not None:
check_classification_targets(y)
self._classes = len(np.unique(y))
return self
def _set_weights(self, weights):
"""Internal function to set estimator weights.
Parameters
----------
weights : numpy array of shape (n_estimators,)
Estimator weights. May be used after the alignment.
Returns
-------
self
"""
if weights is None:
self.weights = np.ones([1, self.n_base_estimators_])
else:
self.weights = column_or_1d(weights).reshape(1, len(weights))
assert (self.weights.shape[1] == self.n_base_estimators_)
# adjust probability by a factor for integrity ๏ผadded to 1๏ผ
adjust_factor = self.weights.shape[1] / np.sum(weights)
self.weights = self.weights * adjust_factor
print(self.weights)
return self
def __len__(self):
"""Returns the number of estimators in the ensemble."""
return len(self.base_estimators)
def __getitem__(self, index):
"""Returns the index'th estimator in the ensemble."""
return self.base_estimators[index]
def __iter__(self):
"""Returns iterator over estimators in the ensemble."""
return iter(self.estimators)
# noinspection PyMethodParameters
def _get_param_names(cls):
# noinspection PyPep8
"""Get parameter names for the estimator
See http://scikit-learn.org/stable/modules/generated/sklearn.base.BaseEstimator.html
and sklearn/base.py for more information.
"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
parameters = [p for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD]
for p in parameters:
if p.kind == p.VAR_POSITIONAL:
raise RuntimeError("scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s with constructor %s doesn't "
" follow this convention."
% (cls, init_signature))
# Extract and sort argument names excluding 'self'
return sorted([p.name for p in parameters])
# noinspection PyPep8
def get_params(self, deep=True):
"""Get parameters for this estimator.
See http://scikit-learn.org/stable/modules/generated/sklearn.base.BaseEstimator.html
and sklearn/base.py for more information.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
# noinspection PyPep8
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The latter have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
See http://scikit-learn.org/stable/modules/generated/sklearn.base.BaseEstimator.html
and sklearn/base.py for more information.
Returns
-------
self : object
"""
if not params:
# Simple optimization to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
nested_params = defaultdict(dict) # grouped by prefix
for key, value in params.items():
key, delim, sub_key = key.partition('__')
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self))
if delim:
nested_params[key][sub_key] = value
else:
setattr(self, key, value)
for key, sub_params in nested_params.items():
valid_params[key].set_params(**sub_params)
return self
# @property
def estimators(self):
return self.base_estimators, self.fitted_estimators
# @property
def base_estimators(self):
return self.base_estimators
# @property
def fitted_estimators(self):
return self.fitted_estimators
|
StarcoderdataPython
|
4909835
|
<reponame>Kymartin45/twitter-oauth2
from dotenv import dotenv_values
import base64
import requests
import json
config = dotenv_values('.env')
CLIENT_ID = config.get('TWITTER_CLIENT_ID')
CLIENT_SECRET = config.get('TWITTER_CLIENT_SECRET')
REDIRECT_URI = config.get('TWITTER_REDIRECT_URI')
# Authorize user account
def authUrl():
auth_url = 'https://twitter.com/i/oauth2/authorize'
scopes = ['tweet.write', 'tweet.read', 'users.read', 'offline.access']
state = 'state'
param = {
'response_type': 'code',
'client_id': CLIENT_ID,
'redirect_uri': REDIRECT_URI,
'scope': ' '.join(scopes),
'state': state,
'code_challenge': 'challenge',
'code_challenge_method': 'plain'
}
r = requests.get(auth_url, params=param)
if r.status_code != 200:
print(f'Error: {r.status_code}')
return r.close()
print(r.url)
def reqToken(code):
token_url = 'https://api.twitter.com/2/oauth2/token'
rawAuth = f'{CLIENT_ID}:{CLIENT_SECRET}'
auth = base64.b64encode(rawAuth.encode('ascii')).decode('ascii')
header = {
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': f'Basic {auth}'
}
data = {
'code': code,
'grant_type': 'authorization_code',
'client_id': CLIENT_ID,
'redirect_uri': REDIRECT_URI,
'code_verifier': 'challenge'
}
r = requests.post(token_url, headers=header, params=data)
if r.status_code != 200:
print(f'Error: {r.status_code}')
reqToken.refresh_token = r.json()['refresh_token']
authUrl()
code = input('enter code from url after authenticating: ') # auth code from url
reqToken(code)
# refresh token allows app to obtain new access token w/o user prompt
def refreshToken():
refresh_url = 'https://api.twitter.com/2/oauth2/token'
headers = {
'Content-Type': 'authorization/x-www-form-urlencoded'
}
data = {
'refresh_token': reqToken.refresh_token,
'grant_type': 'refresh_token',
'client_id': CLIENT_ID
}
r = requests.post(refresh_url, headers=headers, params=data)
print(r.json()['access_token'])
refreshToken.access_token = r.json()['access_token']
refreshToken()
# Creates a Tweet from authenticated user
def postTweet(message):
post_tweet_url = 'https://api.twitter.com/2/tweets'
headers = {
'Content-Type': 'application/json',
'Authorization': f'Bearer {refreshToken.access_token}'
}
message = {
'text': message
}
r = requests.post(post_tweet_url, headers=headers, data=json.dumps(message))
print(r.json())
message = input('Whats on your mind?:\n') # Send your tweet
postTweet(message)
# get user by username
def getUser(username):
req_user_url = 'https://api.twitter.com/2/users/by'
headers = {
'Content-Type': 'application/json',
'Authorization': f'Bearer {refreshToken.access_token}'
}
username = {
'usernames': username
}
r = requests.get(req_user_url, headers=headers, params=username)
data = r.text
parsed_data = json.loads(data)
print(r.json())
# loop response for user id
for user in parsed_data['data']:
getUser.user_id = user['id']
print(getUser.user_id)
username = input('Enter a username:\n')
getUser(username)
|
StarcoderdataPython
|
3556179
|
<gh_stars>0
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from . import account_chart_template
from . import account_invoice
from . import account_move
from . import account_reconciliation_widget
from . import product
from . import stock
from . import res_config_settings
|
StarcoderdataPython
|
9779822
|
import urllib.request,json
from .models import news
News = news.News
Sources = news.Sources
# getting api key
api_key = None
# getting the news base url
base_url = None
def configure_request(app):
global api_key, base_url
api_key = app.config["NEWS_API_KEY"]
base_url = app.config["NEWS_API_BASE_URL"]
# __________________________News class ________________________
def get_news(category):
"""
Function that gets the json response to our url reques
"""
get_news_url = base_url.format(category,api_key)
with urllib.request.urlopen(get_news_url) as url:
get_news_data = url.read()
get_news_response = json.loads(get_news_data)
news_results = None
# confirming whether the response has any data
if get_news_response["articles"]:
news_results_list = get_news_response["articles"]
news_results = process_results(news_results_list)
return news_results
# if data is found,process it here
def process_results(news_list):
'''
Function that processes the news result and transform them to a list of Objects
Args:
news_list: A list of dictionaries that contain news details
Returns :
news_results: A list of news objects
'''
# To store our newly created news objects
news_results = []
# Accessing only the required values using a for loop
for news_item in news_list:
title = news_item.get("title")
description = news_item.get("description")
url = news_item.get("url")
urlToImage = news_item.get("urlToImage")
publishedAt = news_item.get("publishedAt")
# incase the artcles might not have an image we create a condition to cover that image
if urlToImage:
news_object = News(title,description,url,urlToImage,publishedAt)
news_results.append(news_object)
return news_results
# ___________________________Source class__________________________________________________________________
def sources_news(id):
'''
Function that gets the json response to our url request
'''
# Getting base url
sources_url = "https://newsapi.org/v2/sources?apiKey={}".format(api_key)
get_sources_url = sources_url.format(api_key)
with urllib.request.urlopen(get_sources_url) as url:
search_sources_data = url.read()
search_sources_response = json.loads(search_sources_data)
search_sources_results = None
if search_sources_response["sources"]:
search_sources_list = search_sources_response["sources"]
search_sources_results = process_sources(search_sources_list)
return search_sources_results
# Processing the sources
def process_sources(sources_list):
"""
Function that processes the sources result and transform them to a list of Objects
"""
sources_results = []
for sources_item in sources_list:
id = sources_item.get("id")
name = sources_item.get("name")
description = sources_item.get("description")
url = sources_item.get("url")
category = sources_item.get("category")
# for the sources that might have trouble with urls
if url:
sources_object = Sources(id, name, description, url, category)
sources_results.append(sources_object)
return sources_results
# _______________________________search For News__________________________________________________________
def search_news(topic):
'''
Function to search for news by topic
'''
search_news_url = "https://newsapi.org/v2/everything?q={}&apiKey={}".format(topic, api_key)
with urllib.request.urlopen(search_news_url) as url:
search_news_data = url.read()
search_news_response = json.loads(search_news_data)
search_news_results = None
if search_news_response["articles"]:
search_news_list = search_news_response["articles"]
search_news_results = process_results(search_news_list)
return search_news_results
|
StarcoderdataPython
|
6675969
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import getdate, nowdate
no_cache = 1
no_sitemap = 1
def get_context(context):
result = []
return {"rowContent" : result}
@frappe.whitelist()
def enterOperation(op, itemCode, person, date):
frappe.errprint("llama")
item = frappe.get_all('equipment',filters={'item_code':itemCode},fields=['name'])
frappe.errprint(item)
doc = frappe.new_doc("equipment_movement")
doc.operation = op
doc.item = item[0]['name']
doc.user = person
doc.date = date
return doc.submit()
|
StarcoderdataPython
|
11230850
|
<reponame>invinst/CPDB<gh_stars>10-100
from django.http.response import HttpResponse
from django.views.generic import View
from common.models import OfficerAllegation
from common.json_serializer import JSONSerializer
from allegation.services.outcome_analytics import OutcomeAnalytics
from allegation.query_builders import OfficerAllegationQueryBuilder
class OfficerAllegationAnalysisAPIView(View):
def __init__(self, **kwargs):
super(OfficerAllegationAnalysisAPIView, self).__init__(**kwargs)
def get_officer_allegations(self, request, ignore_filters=None):
queries = OfficerAllegationQueryBuilder()\
.build(request.GET, ignore_filters)
return OfficerAllegation.objects.filter(queries)
def get(self, request):
officer_allegations = self.get_officer_allegations(request)
analytics = OutcomeAnalytics.get_analytics(officer_allegations)
content = JSONSerializer().serialize({
'analytics': analytics
})
return HttpResponse(content)
|
StarcoderdataPython
|
3344769
|
import requests
import time
url = "https://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/all_month.geojson"
def load_json_data(json_url):
"""
:param json_url: "https://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/all_month.geojson"
:return: Earthquakes json data
"""
data = requests.get(json_url).json()
return data
def get_place_and_magnitude(data):
"""
:param data: Earthquakes json data
:return: List of places and magnitudes, where magnitude is greater than 1.0.
"""
start = time.time()
data = load_json_data(url)
for dictionary in data['features']:
place = dictionary['properties']['place']
magnitude = dictionary['properties']['mag']
if magnitude > 1.0:
print(place, '|', magnitude)
end = time.time()
print('Time spent', end - start, 'seconds')
if __name__ == "__main__":
get_place_and_magnitude(url)
# Python3.6: 4.863826036453247
# PyPy3: 4.521245002746582
|
StarcoderdataPython
|
6539838
|
<reponame>techsaphal/NEPSE_ShareCalculator<gh_stars>0
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
return render(request,'share_buy_form.html')
def buy_calculate(request):
buy_price =int( request.GET["buy_price"])
share_number = int(request.GET["share_number"])
share_amount = buy_price * share_number
if share_amount <= 50000:
broker_commission = (0.40*share_amount)/100
elif share_amount >=50001 and share_amount<= 500000:
broker_commission = (0.37*share_amount)/100
elif share_amount >=500001 and share_amount<= 2000000:
broker_commission = (0.34*share_amount)/100
elif share_amount >=2000001 and share_amount<= 10000000:
broker_commission = (0.3*share_amount)/100
else:
broker_commission = (0.27*share_amount)/100
sebon_commision = (0.015* share_amount)/100
dp_fee = 25
share_buy_calculation = broker_commission + share_amount + sebon_commision +dp_fee
cost_per_share = share_buy_calculation/share_number
return render(request,'result.html',
{ "share_amount":share_amount,"broker_commission":broker_commission,
"sebon_commision":sebon_commision,"share_buy_calculation":share_buy_calculation,
"dp_fee":dp_fee,"cost_per_share":cost_per_share })
|
StarcoderdataPython
|
1698401
|
import os
import sys
if getattr(sys, 'frozen', False):
directory_containing_script = os.path.dirname(sys.executable)
else:
directory_containing_script = sys.path[0]
CACHE_TIME_TO_LIVE = 60
CACHE_WAIT_TIME = 3
CHANNEL_ICONS_DIRECTORY_PATH = os.path.join(
directory_containing_script, 'resources', 'icons', 'channels'
)
DEFAULT_CHANNEL_ICON_FILE_PATH = os.path.join(CHANNEL_ICONS_DIRECTORY_PATH, '0.png')
DEFAULT_CONFIGURATION_FILE_PATH = os.path.join(
directory_containing_script, 'iptv_proxy.ini'
)
DEFAULT_DB_DIRECTORY_PATH = os.path.join(directory_containing_script, 'db')
DEFAULT_DB_FILE_PATH = os.path.join(DEFAULT_DB_DIRECTORY_PATH, 'iptv_proxy.db')
DEFAULT_HOSTNAME_LOOPBACK = 'localhost'
DEFAULT_LOGGING_CONFIGURATION = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'MultiLine': {
'format': '%(asctime)s %(name)-50s %(funcName)-40s %(levelname)-8s %(message)s',
'()': 'iptv_proxy.formatters.MultiLineFormatter',
},
},
'handlers': {
'console': {
'level': 'INFO',
'formatter': 'MultiLine',
'class': 'logging.StreamHandler',
},
'rotating_file': {
'level': 'INFO',
'formatter': 'MultiLine',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(
os.path.join(directory_containing_script, 'logs'), 'iptv_proxy.log'
),
'maxBytes': 1024 * 1024 * 10,
'backupCount': 10,
},
},
'loggers': {
'iptv_proxy': {
'handlers': ['console', 'rotating_file'],
'level': 'INFO',
'propagate': True,
}
},
}
DEFAULT_LOG_DIRECTORY_PATH = os.path.join(directory_containing_script, 'logs')
DEFAULT_LOG_FILE_PATH = os.path.join(DEFAULT_LOG_DIRECTORY_PATH, 'iptv_proxy.log')
DEFAULT_OPTIONAL_SETTINGS_FILE_PATH = os.path.join(
directory_containing_script, 'iptv_proxy_optional_settings.json'
)
DEFAULT_RECORDINGS_DIRECTORY_PATH = os.path.join(
directory_containing_script, 'recordings'
)
DEFAULT_SSL_DIRECTORY_PATH = os.path.join(directory_containing_script, 'ssl')
DEFAULT_SSL_CERTIFICATE_FILE_PATH = os.path.join(
DEFAULT_SSL_DIRECTORY_PATH, 'certificate', 'iptv_proxy.pem'
)
DEFAULT_SSL_KEY_FILE_PATH = os.path.join(
DEFAULT_SSL_DIRECTORY_PATH, 'key', 'iptv_proxy.pem'
)
DEFAULT_STREAMING_PROTOCOL = 'hls'
HTTP_CHUNK_SIZE = 8192
ICONS_DIRECTORY_PATH = os.path.join(directory_containing_script, 'resources', 'icons')
LOGGING_CONFIGURATION_FILE_PATH = os.path.join(
directory_containing_script, 'iptv_proxy_logging_configuration.json'
)
RESOURCES_DIRECTORY_PATH = os.path.join(directory_containing_script, 'resources')
TEMPLATES_BYTECODE_CACHE_DIRECTORY_PATH = os.path.join(
directory_containing_script, 'templates', 'byte_code_cache'
)
TEMPLATES_DIRECTORY_PATH = os.path.join(directory_containing_script, 'templates')
TRACE = 5
VERSION = '7.7.4'
|
StarcoderdataPython
|
9695927
|
<filename>demo/demo/urls.py
from django.conf.urls import url
from .views import index, listing, detailed
urlpatterns = [
url(r'^$', index, name='index'),
url(r'^articles/$', listing, name='articles-listing'),
url(r'^articles/(?P<article_id>\d+)/$', detailed, name='articles-detailed'),
]
|
StarcoderdataPython
|
3271141
|
#!/usr/bin/env python
from pyspark.sql import SparkSession
import sys, time
disabled = sys.argv[1]
spark = SparkSession.builder.appName('query1-sql').getOrCreate()
if disabled == "Y":
spark.conf.set("spark.sql.autoBroadcastJoinThreshold", -1)
#set("spark.sql.cbo.enabled", "False")
elif disabled == 'N':
pass
else:
raise Exception ("This setting is not available.")
df = spark.read.format("parquet")
df1 = df.load("hdfs://master:9000/user/data/ratings.parquet")
df2 = df.load("hdfs://master:9000/user/data/movie_genres_100.parquet")
df1.registerTempTable("ratings")
df2.registerTempTable("movie_genres")
sqlString = \
"""
select *
from
(
SELECT * FROM movie_genres LIMIT 100
) as g, ratings as r
where r.movie_id = g.movie_id
"""
t1 = time.time()
spark.sql(sqlString).show()
t2 = time.time()
spark.sql(sqlString).explain()
print("Time with choosing join type %s is %.4f sec."%("enabled" if
disabled == 'N' else "disabled", t2-t1))
with open("../results/test_joins.txt","a+") as f:
f.write("{}\n".format(t2 - t1))
|
StarcoderdataPython
|
1614347
|
<filename>upsampling/utils/__init__.py
from .dataset import Sequence
from .upsampler import Upsampler
from .utils import get_sequence_or_none
|
StarcoderdataPython
|
3344638
|
<gh_stars>1-10
class Solution:
# @param n, an integer
# @return an integer
def reverseBits(self, n):
out = 0
for i in range(32):
bit = (n & (1 << i)) >> i
new_place = 32 - i - 1
out |= (bit << new_place)
return out
|
StarcoderdataPython
|
46900
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-04-11 22:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('history', '0004_history_prev_quantity'),
]
operations = [
migrations.AlterField(
model_name='history',
name='prev_quantity',
field=models.PositiveIntegerField(default=0, verbose_name='\u539f\u5e93\u5b58\u6570\u91cf'),
),
]
|
StarcoderdataPython
|
11284341
|
"""
Copyright (c) 2018 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import math
import struct
from myhdl import *
import axis_ep
from pcie_us import *
class UltrascalePlusPCIeFunction(Endpoint, MSICapability, MSIXCapability):
def __init__(self):
super(UltrascalePlusPCIeFunction, self).__init__()
self.msi_64bit_address_capable = 1
self.msi_per_vector_mask_capable = 0
self.register_capability(PM_CAP_ID, offset=0x10)
self.register_capability(MSI_CAP_ID, offset=0x12)
self.register_capability(MSIX_CAP_ID, offset=0x18)
self.register_capability(PCIE_CAP_ID, offset=0x1c)
class UltrascalePlusPCIe(Device):
def __init__(self):
super(UltrascalePlusPCIe, self).__init__()
self.has_logic = False
self.default_function = UltrascalePlusPCIeFunction
self.dw = 256
# configuration options
self.pcie_generation = 3
self.pcie_link_width = 8
self.user_clk_frequency = 250e6
self.alignment = "dword"
self.cq_cc_straddle = False
self.rq_rc_straddle = False
self.rc_4tlp_straddle = False
self.enable_pf1 = False
self.enable_client_tag = True
self.enable_extended_tag = False
self.enable_parity = False
self.enable_rx_msg_interface = False
self.enable_sriov = False
self.enable_extended_configuration = False
self.enable_pf0_msi = False
self.enable_pf1_msi = False
self.cq_queue = []
self.cq_np_queue = []
self.cq_np_req_count = 0
self.rc_queue = []
self.msg_queue = []
self.config_space_enable = False
self.cq_source = CQSource()
self.cc_sink = CCSink()
self.rq_sink = RQSink()
self.rc_source = RCSource()
self.rq_seq_num = []
self.make_function()
def upstream_recv(self, tlp):
# logging
print("[%s] Got downstream TLP: %s" % (highlight(self.get_desc()), repr(tlp)))
if tlp.fmt_type == TLP_CFG_READ_0 or tlp.fmt_type == TLP_CFG_WRITE_0:
# config type 0
if not self.config_space_enable:
print("Configuraion space disabled")
cpl = TLP()
cpl.set_crs_completion(tlp, (self.bus_num, self.device_num, 0))
# logging
print("[%s] CRS Completion: %s" % (highlight(self.get_desc()), repr(cpl)))
yield from self.upstream_send(cpl)
return
elif tlp.dest_id.device == self.device_num:
# capture address information
self.bus_num = tlp.dest_id.bus
for f in self.functions:
f.bus_num = self.bus_num
# pass TLP to function
for f in self.functions:
if f.function_num == tlp.dest_id.function:
yield from f.upstream_recv(tlp)
return
#raise Exception("Function not found")
print("Function not found")
else:
print("Device number mismatch")
# Unsupported request
cpl = TLP()
cpl.set_ur_completion(tlp, (self.bus_num, self.device_num, 0))
# logging
print("[%s] UR Completion: %s" % (highlight(self.get_desc()), repr(cpl)))
yield from self.upstream_send(cpl)
elif (tlp.fmt_type == TLP_CPL or tlp.fmt_type == TLP_CPL_DATA or
tlp.fmt_type == TLP_CPL_LOCKED or tlp.fmt_type == TLP_CPL_LOCKED_DATA):
# Completion
if tlp.requester_id.bus == self.bus_num and tlp.requester_id.device == self.device_num:
for f in self.functions:
if f.function_num == tlp.requester_id.function:
tlp = TLP_us(tlp)
tlp.error_code = RC_ERROR_NORMAL_TERMINATION
if tlp.status != CPL_STATUS_SC:
tlp.error = RC_ERROR_BAD_STATUS
self.rc_queue.append(tlp)
return
print("Function not found")
else:
print("Bus/device number mismatch")
elif (tlp.fmt_type == TLP_IO_READ or tlp.fmt_type == TLP_IO_WRITE):
# IO read/write
for f in self.functions:
bar = f.match_bar(tlp.address, True)
if len(bar) == 1:
tlp = TLP_us(tlp)
tlp.bar_id = bar[0][0]
tlp.bar_aperture = int(math.log2((~self.functions[0].bar_mask[bar[0][0]]&0xffffffff)+1))
tlp.completer_id = PcieId(self.bus_num, self.device_num, f.function_num)
self.cq_queue.append(tlp)
return
print("IO request did not match any BARs")
# Unsupported request
cpl = TLP()
cpl.set_ur_completion(tlp, (self.bus_num, self.device_num, 0))
# logging
print("[%s] UR Completion: %s" % (highlight(self.get_desc()), repr(cpl)))
yield from self.upstream_send(cpl)
elif (tlp.fmt_type == TLP_MEM_READ or tlp.fmt_type == TLP_MEM_READ_64 or
tlp.fmt_type == TLP_MEM_WRITE or tlp.fmt_type == TLP_MEM_WRITE_64):
# Memory read/write
for f in self.functions:
bar = f.match_bar(tlp.address)
if len(bar) == 1:
tlp = TLP_us(tlp)
tlp.bar_id = bar[0][0]
if self.functions[0].bar[bar[0][0]] & 4:
tlp.bar_aperture = int(math.log2((~(self.functions[0].bar_mask[bar[0][0]] | (self.functions[0].bar_mask[bar[0][0]+1]<<32))&0xffffffffffffffff)+1))
else:
tlp.bar_aperture = int(math.log2((~self.functions[0].bar_mask[bar[0][0]]&0xffffffff)+1))
tlp.completer_id = PcieId(self.bus_num, self.device_num, f.function_num)
self.cq_queue.append(tlp)
return
print("Memory request did not match any BARs")
if tlp.fmt_type == TLP_MEM_READ or tlp.fmt_type == TLP_MEM_READ_64:
# Unsupported request
cpl = TLP()
cpl.set_ur_completion(tlp, PcieId(self.bus_num, self.device_num, 0))
# logging
print("[%s] UR Completion: %s" % (highlight(self.get_desc()), repr(cpl)))
yield from self.upstream_send(cpl)
else:
raise Exception("TODO")
def create_logic(self,
# Completer reQuest Interface
m_axis_cq_tdata=None,
m_axis_cq_tuser=None,
m_axis_cq_tlast=None,
m_axis_cq_tkeep=None,
m_axis_cq_tvalid=None,
m_axis_cq_tready=None,
pcie_cq_np_req=Signal(intbv(1)[2:]),
pcie_cq_np_req_count=Signal(intbv(0)[6:]),
# Completer Completion Interface
s_axis_cc_tdata=None,
s_axis_cc_tuser=None,
s_axis_cc_tlast=None,
s_axis_cc_tkeep=None,
s_axis_cc_tvalid=None,
s_axis_cc_tready=None,
# Requester reQuest Interface
s_axis_rq_tdata=None,
s_axis_rq_tuser=None,
s_axis_rq_tlast=None,
s_axis_rq_tkeep=None,
s_axis_rq_tvalid=None,
s_axis_rq_tready=None,
pcie_rq_seq_num0=Signal(intbv(0)[6:]),
pcie_rq_seq_num_vld0=Signal(bool(0)),
pcie_rq_seq_num1=Signal(intbv(0)[6:]),
pcie_rq_seq_num_vld1=Signal(bool(0)),
pcie_rq_tag0=Signal(intbv(0)[8:]),
pcie_rq_tag1=Signal(intbv(0)[8:]),
pcie_rq_tag_av=Signal(intbv(0)[4:]),
pcie_rq_tag_vld0=Signal(bool(0)),
pcie_rq_tag_vld1=Signal(bool(0)),
# Requester Completion Interface
m_axis_rc_tdata=None,
m_axis_rc_tuser=None,
m_axis_rc_tlast=None,
m_axis_rc_tkeep=None,
m_axis_rc_tvalid=None,
m_axis_rc_tready=None,
# Transmit Flow Control Interface
pcie_tfc_nph_av=Signal(intbv(0)[4:]),
pcie_tfc_npd_av=Signal(intbv(0)[4:]),
# Configuration Management Interface
cfg_mgmt_addr=Signal(intbv(0)[10:]),
cfg_mgmt_function_number=Signal(intbv(0)[8:]),
cfg_mgmt_write=Signal(bool(0)),
cfg_mgmt_write_data=Signal(intbv(0)[32:]),
cfg_mgmt_byte_enable=Signal(intbv(0)[4:]),
cfg_mgmt_read=Signal(bool(0)),
cfg_mgmt_read_data=Signal(intbv(0)[32:]),
cfg_mgmt_read_write_done=Signal(bool(0)),
cfg_mgmt_debug_access=Signal(bool(0)),
# Configuration Status Interface
cfg_phy_link_down=Signal(bool(0)),
cfg_phy_link_status=Signal(intbv(0)[2:]),
cfg_negotiated_width=Signal(intbv(0)[3:]),
cfg_current_speed=Signal(intbv(0)[2:]),
cfg_max_payload=Signal(intbv(0)[2:]),
cfg_max_read_req=Signal(intbv(0)[3:]),
cfg_function_status=Signal(intbv(0)[16:]),
cfg_vf_status=Signal(intbv(0)[504:]),
cfg_function_power_state=Signal(intbv(0)[12:]),
cfg_vf_power_state=Signal(intbv(0)[756:]),
cfg_link_power_state=Signal(intbv(0)[2:]),
cfg_err_cor_out=Signal(bool(0)),
cfg_err_nonfatal_out=Signal(bool(0)),
cfg_err_fatal_out=Signal(bool(0)),
cfg_local_err_out=Signal(intbv(0)[5:]),
cfg_local_err_valid=Signal(bool(0)),
cfg_rx_pm_state=Signal(intbv(0)[2:]),
cfg_tx_pm_state=Signal(intbv(0)[2:]),
cfg_ltssm_state=Signal(intbv(0)[6:]),
cfg_rcb_status=Signal(intbv(0)[4:]),
cfg_obff_enable=Signal(intbv(0)[2:]),
cfg_pl_status_change=Signal(bool(0)),
cfg_tph_requester_enable=Signal(intbv(0)[4:]),
cfg_tph_st_mode=Signal(intbv(0)[12:]),
cfg_vf_tph_requester_enable=Signal(intbv(0)[252:]),
cfg_vf_tph_st_mode=Signal(intbv(0)[756:]),
# Configuration Received Message Interface
cfg_msg_received=Signal(bool(0)),
cfg_msg_received_data=Signal(intbv(0)[8:]),
cfg_msg_received_type=Signal(intbv(0)[5:]),
# Configuration Transmit Message Interface
cfg_msg_transmit=Signal(bool(0)),
cfg_msg_transmit_type=Signal(intbv(0)[3:]),
cfg_msg_transmit_data=Signal(intbv(0)[32:]),
cfg_msg_transmit_done=Signal(bool(0)),
# Configuration Flow Control Interface
cfg_fc_ph=Signal(intbv(0)[8:]),
cfg_fc_pd=Signal(intbv(0)[12:]),
cfg_fc_nph=Signal(intbv(0)[8:]),
cfg_fc_npd=Signal(intbv(0)[12:]),
cfg_fc_cplh=Signal(intbv(0)[8:]),
cfg_fc_cpld=Signal(intbv(0)[12:]),
cfg_fc_sel=Signal(intbv(0)[3:]),
# Configuration Control Interface
cfg_hot_reset_in=Signal(bool(0)),
cfg_hot_reset_out=Signal(bool(0)),
cfg_config_space_enable=Signal(bool(1)),
cfg_dsn=Signal(intbv(0)[64:]),
cfg_ds_port_number=Signal(intbv(0)[8:]),
cfg_ds_bus_number=Signal(intbv(0)[8:]),
cfg_ds_device_number=Signal(intbv(0)[5:]),
cfg_ds_function_number=Signal(intbv(0)[3:]),
cfg_power_state_change_ack=Signal(bool(0)),
cfg_power_state_change_interrupt=Signal(bool(0)),
cfg_err_cor_in=Signal(bool(0)),
cfg_err_uncor_in=Signal(bool(0)),
cfg_flr_done=Signal(intbv(0)[4:]),
cfg_vf_flr_done=Signal(intbv(0)[1:]),
cfg_flr_in_process=Signal(intbv(0)[4:]),
cfg_vf_flr_in_process=Signal(intbv(0)[252:]),
cfg_req_pm_transition_l23_ready=Signal(bool(0)),
cfg_link_training_enable=Signal(bool(1)),
# Configuration Interrupt Controller Interface
cfg_interrupt_int=Signal(intbv(0)[4:]),
cfg_interrupt_sent=Signal(bool(0)),
cfg_interrupt_pending=Signal(intbv(0)[2:]),
cfg_interrupt_msi_enable=Signal(intbv(0)[4:]),
cfg_interrupt_msi_mmenable=Signal(intbv(0)[12:]),
cfg_interrupt_msi_mask_update=Signal(bool(0)),
cfg_interrupt_msi_data=Signal(intbv(0)[32:]),
cfg_interrupt_msi_select=Signal(intbv(0)[2:]),
cfg_interrupt_msi_int=Signal(intbv(0)[32:]),
cfg_interrupt_msi_pending_status=Signal(intbv(0)[32:]),
cfg_interrupt_msi_pending_status_data_enable=Signal(bool(0)),
cfg_interrupt_msi_pending_status_function_num=Signal(intbv(0)[2:]),
cfg_interrupt_msi_sent=Signal(bool(0)),
cfg_interrupt_msi_fail=Signal(bool(0)),
cfg_interrupt_msix_enable=Signal(intbv(0)[4:]),
cfg_interrupt_msix_mask=Signal(intbv(0)[4:]),
cfg_interrupt_msix_vf_enable=Signal(intbv(0)[252:]),
cfg_interrupt_msix_vf_mask=Signal(intbv(0)[252:]),
cfg_interrupt_msix_address=Signal(intbv(0)[64:]),
cfg_interrupt_msix_data=Signal(intbv(0)[32:]),
cfg_interrupt_msix_int=Signal(bool(0)),
cfg_interrupt_msix_vec_pending=Signal(intbv(0)[2:]),
cfg_interrupt_msix_vec_pending_status=Signal(bool(0)),
cfg_interrupt_msi_attr=Signal(intbv(0)[3:]),
cfg_interrupt_msi_tph_present=Signal(bool(0)),
cfg_interrupt_msi_tph_type=Signal(intbv(0)[2:]),
cfg_interrupt_msi_tph_st_tag=Signal(intbv(0)[8:]),
cfg_interrupt_msi_function_number=Signal(intbv(0)[8:]),
# Configuration Extend Interface
cfg_ext_read_received=Signal(bool(0)),
cfg_ext_write_received=Signal(bool(0)),
cfg_ext_register_number=Signal(intbv(0)[10:]),
cfg_ext_function_number=Signal(intbv(0)[8:]),
cfg_ext_write_data=Signal(intbv(0)[32:]),
cfg_ext_write_byte_enable=Signal(intbv(0)[4:]),
cfg_ext_read_data=Signal(intbv(0)[32:]),
cfg_ext_read_data_valid=Signal(bool(0)),
# Clock and Reset Interface
user_clk=Signal(bool(0)),
user_reset=Signal(bool(0)),
user_lnk_up=Signal(bool(0)),
sys_clk=None,
sys_clk_gt=None,
sys_reset=None,
phy_rdy_out=Signal(bool(0)),
# debugging connections
cq_pause=Signal(bool(0)),
cc_pause=Signal(bool(0)),
rq_pause=Signal(bool(0)),
rc_pause=Signal(bool(0)),
):
# validate parameters and widths
self.dw = len(m_axis_cq_tdata)
assert self.dw in [64, 128, 256, 512]
if self.user_clk_frequency < 1e6:
self.user_clk_frequency *= 1e6
assert self.pcie_generation in [1, 2, 3]
assert self.pcie_link_width in [1, 2, 4, 8, 16]
assert self.user_clk_frequency in [62.5e6, 125e6, 250e6]
assert self.alignment in ["address", "dword"]
self.upstream_port.max_speed = self.pcie_generation
self.upstream_port.max_width = self.pcie_link_width
if self.dw < 256 or self.alignment != "dword":
# straddle only supported with 256-bit or wider, DWORD-aligned interface
assert not self.cq_cc_straddle
assert not self.rq_rc_straddle
if self.dw != 512:
assert not self.rc_4tlp_straddle
# TODO change this when support added
assert self.alignment == 'dword'
assert not self.cq_cc_straddle
assert not self.rq_rc_straddle
assert not self.rc_4tlp_straddle
if self.pcie_generation == 1:
if self.pcie_link_width in [1, 2]:
assert self.dw == 64
assert self.user_clk_frequency in [62.5e6, 125e6, 250e6]
elif self.pcie_link_width == 4:
assert self.dw == 64
assert self.user_clk_frequency in [125e6, 250e6]
elif self.pcie_link_width == 8:
assert self.dw in [64, 128]
if self.dw == 64:
assert self.user_clk_frequency == 250e6
elif self.dw == 128:
assert self.user_clk_frequency == 125e6
elif self.pcie_link_width == 16:
assert self.dw == 128
assert self.user_clk_frequency == 250e6
elif self.pcie_generation == 2:
if self.pcie_link_width == 1:
assert self.dw == 64
assert self.user_clk_frequency in [62.5e6, 125e6, 250e6]
elif self.pcie_link_width == 2:
assert self.dw == 64
assert self.user_clk_frequency in [125e6, 250e6]
elif self.pcie_link_width == 4:
assert self.dw in [64, 128]
if self.dw == 64:
assert self.user_clk_frequency == 250e6
elif self.dw == 128:
assert self.user_clk_frequency == 125e6
elif self.pcie_link_width == 8:
assert self.dw in [128, 256]
if self.dw == 128:
assert self.user_clk_frequency == 250e6
elif self.dw == 256:
assert self.user_clk_frequency == 125e6
elif self.pcie_link_width == 16:
assert self.dw == 256
assert self.user_clk_frequency == 250e6
elif self.pcie_generation == 3:
if self.pcie_link_width == 1:
assert self.dw == 64
assert self.user_clk_frequency in [125e6, 250e6]
elif self.pcie_link_width == 2:
assert self.dw in [64, 128]
if self.dw == 64:
assert self.user_clk_frequency == 250e6
elif self.dw == 128:
assert self.user_clk_frequency == 125e6
elif self.pcie_link_width == 4:
assert self.dw in [128, 256]
if self.dw == 128:
assert self.user_clk_frequency == 250e6
elif self.dw == 256:
assert self.user_clk_frequency == 125e6
elif self.pcie_link_width == 8:
assert self.dw == 256
assert self.user_clk_frequency == 250e6
elif self.pcie_link_width == 16:
assert self.dw == 512
assert self.user_clk_frequency == 250e6
# Completer reQuest Interface
assert len(m_axis_cq_tdata) == self.dw
if len(m_axis_cq_tdata) == 512:
assert len(m_axis_cq_tuser) == 183
else:
assert len(m_axis_cq_tuser) == 88
assert len(m_axis_cq_tlast) == 1
assert len(m_axis_cq_tkeep) == self.dw/32
assert len(m_axis_cq_tvalid) == 1
assert len(m_axis_cq_tready) == 1
assert len(pcie_cq_np_req) == 2
assert len(pcie_cq_np_req_count) == 6
# Completer Completion Interface
assert len(s_axis_cc_tdata) == self.dw
if len(m_axis_cq_tdata) == 512:
assert len(s_axis_cc_tuser) == 81
else:
assert len(s_axis_cc_tuser) == 33
assert len(s_axis_cc_tlast) == 1
assert len(s_axis_cc_tkeep) == self.dw/32
assert len(s_axis_cc_tvalid) == 1
assert len(s_axis_cc_tready) == 1
# Requester reQuest Interface
assert len(s_axis_rq_tdata) == self.dw
if len(m_axis_cq_tdata) == 512:
assert len(s_axis_rq_tuser) == 137
else:
assert len(s_axis_rq_tuser) == 62
assert len(s_axis_rq_tlast) == 1
assert len(s_axis_rq_tkeep) == self.dw/32
assert len(s_axis_rq_tvalid) == 1
assert len(s_axis_rq_tready) == 1
assert len(pcie_rq_seq_num0) == 6
assert len(pcie_rq_seq_num_vld0) == 1
assert len(pcie_rq_seq_num1) == 6
assert len(pcie_rq_seq_num_vld1) == 1
assert len(pcie_rq_tag0) >= 8
assert len(pcie_rq_tag1) >= 8
assert len(pcie_rq_tag_av) == 4
assert len(pcie_rq_tag_vld0) == 1
assert len(pcie_rq_tag_vld1) == 1
# Requester Completion Interface
assert len(m_axis_rc_tdata) == self.dw
if len(m_axis_cq_tdata) == 512:
assert len(m_axis_rc_tuser) == 161
else:
assert len(m_axis_rc_tuser) == 75
assert len(m_axis_rc_tlast) == 1
assert len(m_axis_rc_tkeep) == self.dw/32
assert len(m_axis_rc_tvalid) == 1
assert len(m_axis_rc_tready) == 1
# Transmit Flow Control Interface
assert len(pcie_tfc_nph_av) == 4
assert len(pcie_tfc_npd_av) == 4
# Configuration Management Interface
assert len(cfg_mgmt_addr) == 10
assert len(cfg_mgmt_function_number) == 8
assert len(cfg_mgmt_write) == 1
assert len(cfg_mgmt_write_data) == 32
assert len(cfg_mgmt_byte_enable) == 4
assert len(cfg_mgmt_read) == 1
assert len(cfg_mgmt_read_data) == 32
assert len(cfg_mgmt_read_write_done) == 1
assert len(cfg_mgmt_debug_access) == 1
# Configuration Status Interface
assert len(cfg_phy_link_down) == 1
assert len(cfg_phy_link_status) == 2
assert len(cfg_negotiated_width) == 3
assert len(cfg_current_speed) == 2
assert len(cfg_max_payload) == 2
assert len(cfg_max_read_req) == 3
assert len(cfg_function_status) == 16
assert len(cfg_vf_status) == 504
assert len(cfg_function_power_state) == 12
assert len(cfg_vf_power_state) == 756
assert len(cfg_link_power_state) == 2
assert len(cfg_err_cor_out) == 1
assert len(cfg_err_nonfatal_out) == 1
assert len(cfg_err_fatal_out) == 1
assert len(cfg_local_err_out) == 5
assert len(cfg_local_err_valid) == 1
assert len(cfg_rx_pm_state) == 2
assert len(cfg_tx_pm_state) == 2
assert len(cfg_ltssm_state) == 6
assert len(cfg_rcb_status) == 4
assert len(cfg_obff_enable) == 2
assert len(cfg_pl_status_change) == 1
assert len(cfg_tph_requester_enable) == 4
assert len(cfg_tph_st_mode) == 12
assert len(cfg_vf_tph_requester_enable) == 252
assert len(cfg_vf_tph_st_mode) == 756
# Configuration Received Message Interface
assert len(cfg_msg_received) == 1
assert len(cfg_msg_received_data) == 8
assert len(cfg_msg_received_type) == 5
# Configuration Transmit Message Interface
assert len(cfg_msg_transmit) == 1
assert len(cfg_msg_transmit_type) == 3
assert len(cfg_msg_transmit_data) == 32
assert len(cfg_msg_transmit_done) == 1
# Configuration Flow Control Interface
assert len(cfg_fc_ph) == 8
assert len(cfg_fc_pd) == 12
assert len(cfg_fc_nph) == 8
assert len(cfg_fc_npd) == 12
assert len(cfg_fc_cplh) == 8
assert len(cfg_fc_cpld) == 12
assert len(cfg_fc_sel) == 3
# Configuration Control Interface
assert len(cfg_hot_reset_in) == 1
assert len(cfg_hot_reset_out) == 1
assert len(cfg_config_space_enable) == 1
assert len(cfg_dsn) == 64
assert len(cfg_ds_port_number) == 8
assert len(cfg_ds_bus_number) == 8
assert len(cfg_ds_device_number) == 5
assert len(cfg_ds_function_number) == 3
assert len(cfg_power_state_change_ack) == 1
assert len(cfg_power_state_change_interrupt) == 1
assert len(cfg_err_cor_in) == 1
assert len(cfg_err_uncor_in) == 1
assert len(cfg_flr_done) == 4
assert len(cfg_vf_flr_done) == 1
assert len(cfg_flr_in_process) == 4
assert len(cfg_vf_flr_in_process) == 252
assert len(cfg_req_pm_transition_l23_ready) == 1
assert len(cfg_link_training_enable) == 1
# Configuration Interrupt Controller Interface
assert len(cfg_interrupt_int) == 4
assert len(cfg_interrupt_sent) == 1
assert len(cfg_interrupt_pending) == 2
assert len(cfg_interrupt_msi_enable) == 4
assert len(cfg_interrupt_msi_mmenable) == 12
assert len(cfg_interrupt_msi_mask_update) == 1
assert len(cfg_interrupt_msi_data) == 32
assert len(cfg_interrupt_msi_select) == 2
assert len(cfg_interrupt_msi_int) == 32
assert len(cfg_interrupt_msi_pending_status) == 32
assert len(cfg_interrupt_msi_pending_status_data_enable) == 1
assert len(cfg_interrupt_msi_pending_status_function_num) == 2
assert len(cfg_interrupt_msi_sent) == 1
assert len(cfg_interrupt_msi_fail) == 1
assert len(cfg_interrupt_msix_enable) == 4
assert len(cfg_interrupt_msix_mask) == 4
assert len(cfg_interrupt_msix_vf_enable) == 252
assert len(cfg_interrupt_msix_vf_mask) == 252
assert len(cfg_interrupt_msix_address) == 64
assert len(cfg_interrupt_msix_data) == 32
assert len(cfg_interrupt_msix_vec_pending) == 2
assert len(cfg_interrupt_msix_vec_pending_status) == 1
assert len(cfg_interrupt_msix_int) == 1
assert len(cfg_interrupt_msi_attr) == 3
assert len(cfg_interrupt_msi_tph_present) == 1
assert len(cfg_interrupt_msi_tph_type) == 2
assert len(cfg_interrupt_msi_tph_st_tag) == 8
assert len(cfg_interrupt_msi_function_number) == 8
# Configuration Extend Interface
assert len(cfg_ext_read_received) == 1
assert len(cfg_ext_write_received) == 1
assert len(cfg_ext_register_number) == 10
assert len(cfg_ext_function_number) == 8
assert len(cfg_ext_write_data) == 32
assert len(cfg_ext_write_byte_enable) == 4
assert len(cfg_ext_read_data) == 32
assert len(cfg_ext_read_data_valid) == 1
# Clock and Reset Interface
assert len(user_clk) == 1
assert len(user_reset) == 1
assert len(user_lnk_up) == 1
assert len(sys_clk) == 1
assert len(sys_clk_gt) == 1
assert len(sys_reset) == 1
assert len(phy_rdy_out) == 1
assert not self.has_logic
self.has_logic = True
# sources and sinks
cq_source_logic = self.cq_source.create_logic(
user_clk,
user_reset,
tdata=m_axis_cq_tdata,
tuser=m_axis_cq_tuser,
tlast=m_axis_cq_tlast,
tkeep=m_axis_cq_tkeep,
tvalid=m_axis_cq_tvalid,
tready=m_axis_cq_tready,
name='cq_source',
pause=cq_pause
)
cc_sink_logic = self.cc_sink.create_logic(
user_clk,
user_reset,
tdata=s_axis_cc_tdata,
tuser=s_axis_cc_tuser,
tlast=s_axis_cc_tlast,
tkeep=s_axis_cc_tkeep,
tvalid=s_axis_cc_tvalid,
tready=s_axis_cc_tready,
name='cc_sink',
pause=cc_pause
)
rq_sink_logic = self.rq_sink.create_logic(
user_clk,
user_reset,
tdata=s_axis_rq_tdata,
tuser=s_axis_rq_tuser,
tlast=s_axis_rq_tlast,
tkeep=s_axis_rq_tkeep,
tvalid=s_axis_rq_tvalid,
tready=s_axis_rq_tready,
name='rq_sink',
pause=rq_pause
)
rc_source_logic = self.rc_source.create_logic(
user_clk,
user_reset,
tdata=m_axis_rc_tdata,
tuser=m_axis_rc_tuser,
tlast=m_axis_rc_tlast,
tkeep=m_axis_rc_tkeep,
tvalid=m_axis_rc_tvalid,
tready=m_axis_rc_tready,
name='rc_source',
pause=rc_pause
)
if self.user_clk_frequency == 62.5e6:
user_clk_period = 8
elif self.user_clk_frequency == 125e6:
user_clk_period = 4
else:
user_clk_period = 2
@always(delay(user_clk_period))
def clkgen():
user_clk.next = not user_clk
@instance
def reset_logic():
while True:
yield user_clk.posedge, sys_reset.negedge
if not sys_reset:
user_reset.next = 1
yield sys_reset.posedge
yield delay(20)
yield user_clk.posedge
user_reset.next = 0
@instance
def logic():
while True:
yield user_clk.posedge, sys_reset.negedge
if not sys_reset:
self.cq_np_req_count = 0
elif pcie_cq_np_req:
if self.cq_np_req_count < 32:
self.cq_np_req_count += 1
# handle completer requests
# send any queued non-posted requests first
while self.cq_np_queue and self.cq_np_req_count > 0:
tlp = self.cq_np_queue.pop(0)
self.cq_np_req_count -= 1
self.cq_source.send(tlp.pack_us_cq())
# handle new requests
while self.cq_queue:
tlp = self.cq_queue.pop(0)
if (tlp.fmt_type == TLP_IO_READ or tlp.fmt_type == TLP_IO_WRITE or
tlp.fmt_type == TLP_MEM_READ or tlp.fmt_type == TLP_MEM_READ_64):
# non-posted request
if self.cq_np_req_count > 0:
# have credit, can forward
self.cq_np_req_count -= 1
self.cq_source.send(tlp.pack_us_cq())
else:
# no credits, put it in the queue
self.cq_np_queue.append(tlp)
else:
# posted request
self.cq_source.send(tlp.pack_us_cq())
pcie_cq_np_req_count.next = self.cq_np_req_count
# handle completer completions
while not self.cc_sink.empty():
pkt = self.cc_sink.recv()
tlp = TLP_us().unpack_us_cc(pkt, self.enable_parity)
if not tlp.completer_id_enable:
tlp.completer_id = PcieId(self.bus_num, self.device_num, tlp.completer_id.function)
if not tlp.discontinue:
yield from self.send(TLP(tlp))
# handle requester requests
while not self.rq_sink.empty():
pkt = self.rq_sink.recv()
tlp = TLP_us().unpack_us_rq(pkt, self.enable_parity)
if not tlp.requester_id_enable:
tlp.requester_id = PcieId(self.bus_num, self.device_num, tlp.requester_id.function)
if not tlp.discontinue:
if self.functions[tlp.requester_id.function].bus_master_enable:
self.rq_seq_num.append(tlp.seq_num)
yield from self.send(TLP(tlp))
else:
print("Bus mastering disabled")
# TODO: internal response
# transmit sequence number
pcie_rq_seq_num_vld0.next = 0
if self.rq_seq_num:
pcie_rq_seq_num0.next = self.rq_seq_num.pop(0)
pcie_rq_seq_num_vld0.next = 1
pcie_rq_seq_num_vld1.next = 0
if self.rq_seq_num:
pcie_rq_seq_num1.next = self.rq_seq_num.pop(0)
pcie_rq_seq_num_vld1.next = 1
# TODO pcie_rq_tag
# handle requester completions
while self.rc_queue:
tlp = self.rc_queue.pop(0)
self.rc_source.send(tlp.pack_us_rc())
# transmit flow control
# TODO
pcie_tfc_nph_av.next = 0xf
pcie_tfc_npd_av.next = 0xf
# configuration management
# TODO four cycle delay
function = cfg_mgmt_function_number
reg_num = cfg_mgmt_addr
if cfg_mgmt_read_write_done:
cfg_mgmt_read_write_done.next = 0
elif cfg_mgmt_read:
cfg_mgmt_read_data.next = self.functions[function].read_config_register(reg_num)
cfg_mgmt_read_write_done.next = 1
elif cfg_mgmt_write:
self.functions[function].write_config_register(reg_num, cfg_mgmt_write_data, cfg_mgmt_byte_enable)
cfg_mgmt_read_write_done.next = 1
#cfg_mgmt_debug_access
# configuration status
if not sys_reset:
cfg_phy_link_down.next = 1
user_lnk_up.next = 0
else:
cfg_phy_link_down.next = 0 # TODO
user_lnk_up.next = 1 # TODO
#cfg_phy_link_status
cfg_negotiated_width.next = min(max((self.functions[0].negotiated_link_width).bit_length()-1, 0), 4)
cfg_current_speed.next = min(max(self.functions[0].current_link_speed-1, 0), 3)
cfg_max_payload.next = self.functions[0].max_payload_size & 3
cfg_max_read_req.next = self.functions[0].max_read_request_size
status = 0
for k in range(len(self.functions)):
if self.functions[k].bus_master_enable:
status |= 0x07 << k*4
if self.functions[k].interrupt_disable:
status |= 0x08 << k*4
cfg_function_status.next = status
#cfg_vf_status
#cfg_function_power_state
#cfg_vf_power_state
#cfg_link_power_state
#cfg_err_cor_out
#cfg_err_nonfatal_out
#cfg_err_fatal_out
#cfg_local_err_out
#cfg_local_err_valid
#cfg_rx_pm_state
#cfg_tx_pm_state
#cfg_ltssm_state
status = 0
for k in range(len(self.functions)):
if self.functions[k].read_completion_boundary:
status |= 1 << k
cfg_rcb_status.next = status
#cfg_obff_enable
#cfg_pl_status_change
#cfg_tph_requester_enable
#cfg_tph_st_mode
#cfg_vf_tph_requester_enable
#cfg_vf_tph_st_mode
# configuration received message
#cfg_msg_received
#cfg_msg_received_data
#cfg_msg_received_type
# configuration transmit message
#cfg_msg_transmit
#cfg_msg_transmit_type
#cfg_msg_transmit_data
#cfg_msg_transmit_done
# configuration flow control
if (cfg_fc_sel == 0b010):
# Receive credits consumed
# TODO
cfg_fc_ph.next = 0
cfg_fc_pd.next = 0
cfg_fc_nph.next = 0
cfg_fc_npd.next = 0
cfg_fc_cplh.next = 0
cfg_fc_cpld.next = 0
elif (cfg_fc_sel == 0b100):
# Transmit credits available
# TODO
cfg_fc_ph.next = 0x80
cfg_fc_pd.next = 0x800
cfg_fc_nph.next = 0x80
cfg_fc_npd.next = 0x800
cfg_fc_cplh.next = 0x80
cfg_fc_cpld.next = 0x800
elif (cfg_fc_sel == 0b101):
# Transmit credit limit
# TODO
cfg_fc_ph.next = 0x80
cfg_fc_pd.next = 0x800
cfg_fc_nph.next = 0x80
cfg_fc_npd.next = 0x800
cfg_fc_cplh.next = 0x80
cfg_fc_cpld.next = 0x800
elif (cfg_fc_sel == 0b110):
# Transmit credits consumed
# TODO
cfg_fc_ph.next = 0
cfg_fc_pd.next = 0
cfg_fc_nph.next = 0
cfg_fc_npd.next = 0
cfg_fc_cplh.next = 0
cfg_fc_cpld.next = 0
else:
# Reserved
cfg_fc_ph.next = 0
cfg_fc_pd.next = 0
cfg_fc_nph.next = 0
cfg_fc_npd.next = 0
cfg_fc_cplh.next = 0
cfg_fc_cpld.next = 0
# configuration control
#cfg_hot_reset_in
#cfg_hot_reset_out
if not sys_reset:
self.config_space_enable = False
else:
self.config_space_enable = bool(cfg_config_space_enable)
#cfg_dsn
#cfg_ds_port_number
#cfg_ds_bus_number
#cfg_ds_device_number
#cfg_ds_function_number
#cfg_power_state_change_ack
#cfg_power_state_change_interrupt
#cfg_err_cor_in
#cfg_err_uncor_in
#cfg_flr_done
#cfg_vf_flr_done
#cfg_flr_in_process
#cfg_vf_flr_in_process
#cfg_req_pm_transition_l23_ready
#cfg_link_training_enable
# configuration interrupt controller
# INTx
#cfg_interrupt_int
#cfg_interrupt_sent
#cfg_interrupt_pending
# MSI
val = 0
if self.functions[0].msi_enable:
val |= 1
if len(self.functions) > 1:
if self.functions[1].msi_enable:
val |= 2
cfg_interrupt_msi_enable.next = val
cfg_interrupt_msi_sent.next = 0
cfg_interrupt_msi_fail.next = 0
if (cfg_interrupt_msi_int):
n = int(cfg_interrupt_msi_int)
#bits = [i for i in range(n.bit_length()) if n >> i & 1]
bits = [i for i in range(32) if n >> i & 1]
if len(bits) == 1 and cfg_interrupt_msi_function_number < len(self.functions):
yield self.functions[cfg_interrupt_msi_function_number].issue_msi_interrupt(bits[0], attr=int(cfg_interrupt_msi_attr))
cfg_interrupt_msi_sent.next = 1
val = 0
val |= self.functions[0].msi_multiple_message_enable & 0x7
if len(self.functions) > 1:
val |= (self.functions[1].msi_multiple_message_enable & 0x7) << 3
cfg_interrupt_msi_mmenable.next = val
#cfg_interrupt_msi_mask_update
if cfg_interrupt_msi_select == 0b1111:
cfg_interrupt_msi_data.next = 0
else:
if cfg_interrupt_msi_select < len(self.functions):
cfg_interrupt_msi_data.next = self.functions[cfg_interrupt_msi_select].msi_mask_bits;
else:
cfg_interrupt_msi_data.next = 0
if cfg_interrupt_msi_pending_status_data_enable:
if cfg_interrupt_msi_pending_status_function_num < len(self.functions):
self.functions[cfg_interrupt_msi_pending_status_function_num].msi_pending_bits = int(cfg_interrupt_msi_pending_status)
# MSI-X
val = 0
if self.functions[0].msix_enable:
val |= 1
if len(self.functions) > 1:
if self.functions[1].msix_enable:
val |= 2
cfg_interrupt_msix_enable.next = val
val = 0
if self.functions[0].msix_function_mask:
val |= 1
if len(self.functions) > 1:
if self.functions[1].msix_function_mask:
val |= 2
cfg_interrupt_msix_mask.next = val
#cfg_interrupt_msix_vf_enable
#cfg_interrupt_msix_vf_mask
if cfg_interrupt_msix_int:
if cfg_interrupt_msi_function_number < len(self.functions):
yield self.functions[cfg_interrupt_msi_function_number].issue_msix_interrupt(int(cfg_interrupt_msix_address), int(cfg_interrupt_msix_data), attr=int(cfg_interrupt_msi_attr))
cfg_interrupt_msi_sent.next = 1
# MSI/MSI-X
#cfg_interrupt_msi_tph_present
#cfg_interrupt_msi_tph_type
#cfg_interrupt_msi_tph_st_tag
# configuration extend
#cfg_ext_read_received
#cfg_ext_write_received
#cfg_ext_register_number
#cfg_ext_function_number
#cfg_ext_write_data
#cfg_ext_write_byte_enable
#cfg_ext_read_data
#cfg_ext_read_data_valid
return instances()
|
StarcoderdataPython
|
11369120
|
<filename>MAC/mathapp/urls.py<gh_stars>1-10
from django.urls import path
from django.urls.conf import include
from mathapp import views as mathapp_views
urlpatterns = [
path('', mathapp_views.index, name='teste'),
path('special/<str:param>', mathapp_views.view_dinamica_str, name='dinamica_str'),
path('special/<int:param>', mathapp_views.view_dinamica_int, name='dinamica_int'),
path('analise/', mathapp_views.matheus, name='analise'),
]
|
StarcoderdataPython
|
1780876
|
"""First Migration
Revision ID: 523c20aa695
Revises:
Create Date: 2015-11-04 12:15:36.577201
"""
# revision identifiers, used by Alembic.
revision = '523c20aa695'
down_revision = None
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table('companies',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Unicode(length=200), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('employees',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Unicode(length=200), nullable=True),
sa.Column('age', sa.Integer(), nullable=False),
sa.Column('ssn', sa.Unicode(length=30), nullable=False),
sa.Column(
'favourite_meal',
sa.Enum('meat', 'vegan', 'vegetarian'),
nullable=False
),
sa.Column('company_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
['company_id'], ['companies.id'], name='fk_employees_companies'
),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('addresses',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('address', sa.Unicode(length=200), nullable=True),
sa.Column('zip_code', sa.Unicode(length=20), nullable=True),
sa.Column('city', sa.Unicode(length=100), nullable=True),
sa.Column('country', sa.Unicode(length=3), nullable=True),
sa.Column('person_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
['person_id'], ['employees.id'], name='fk_addresses_employees'
),
sa.PrimaryKeyConstraint('id')
)
op.create_table('phone_numbers',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('number', sa.String(length=40), nullable=True),
sa.Column('owner', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
['owner'], ['employees.id'], name='fk_phone_numbers_employees'
),
sa.PrimaryKeyConstraint('id')
)
def downgrade():
op.drop_table('phone_numbers')
op.drop_table('addresses')
op.drop_table('employees')
op.drop_table('companies')
|
StarcoderdataPython
|
303563
|
<filename>Codes/model_cal.py
import os
import glob
import pandas as pd
import numpy as np
import scipy as sp
from scipy.interpolate import interp1d
from datetime import timedelta
# import matplotlib.pyplot as plt
# import warnings
from keras.preprocessing import sequence
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.utils import to_categorical
from keras.optimizers import Adam
from keras.models import load_model
from keras.callbacks import ModelCheckpoint
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
# os.chdir('data')
##### get all the files that contain ".csv" #######
# all_path = glob.glob( '*/**.csv' )
#### read all the positive cases
crash_feature_label_300ms_500ms = pd.read_pickle('data/crash_feature_label_300ms_500ms_test')
### read all the negative casese
noncrash_feature_label_300ms_500ms = pd.read_pickle('data/noncrash_feature_label_300ms_500ms_test')
#### merge both positive and negative together
data_final = pd.concat([crash_feature_label_300ms_500ms, noncrash_feature_label_300ms_500ms])
data_final = data_final[['features_cal_vel','features_org_vel','label']]
#### split the data with calculated velocity and original velocity seperately
X_cal = data_final.features_cal_vel
X_cal = np.array([np.vstack(i) for i in X_cal])
# X_org = data_final.features_org_vel
# X_org = np.array([np.vstack(i) for i in X_org])
y = np.array(data_final.label)
# y = to_categorical(y)
X_train_cal, X_test_cal, y_train_cal, y_test_cal = train_test_split(X_cal, y, test_size=0.2, random_state=42)
# X_train_org, X_test_org, y_train_org, y_test_org = train_test_split(X_org, y, test_size=0.2, random_state=42)
##### make data into sequence for training
X_train_cal = sequence.pad_sequences(X_train_cal, maxlen=50, padding='post', dtype='float', truncating='post')
y_train_cal = np.array(y_train_cal).reshape(len(y_train_cal),1)
X_test_cal = sequence.pad_sequences(X_test_cal, maxlen=50, padding='post', dtype='float', truncating='post')
y_test_cal = np.array(y_test_cal).reshape(len(y_test_cal),1)
# X_train_org = sequence.pad_sequences(X_train_org, maxlen=50, padding='post', dtype='float', truncating='post')
# y_train_org = np.array(y_train_org).reshape(len(y_train_org),1)
# X_test_org = sequence.pad_sequences(X_test_org, maxlen=50, padding='post', dtype='float', truncating='post')
# y_test_org = np.array(y_test_org).reshape(len(y_test_org),1)
#### onehotecoder
enc = OneHotEncoder(handle_unknown='ignore', sparse=False)
enc = enc.fit(y_train_cal)
y_train_cal = enc.transform(y_train_cal)
y_test_cal = enc.transform(y_test_cal)
# enc = OneHotEncoder(handle_unknown='ignore', sparse=False)
# enc = enc.fit(y_train_org)
# y_train_org = enc.transform(y_train_org)
# y_test_org = enc.transform(y_test_org)
# print('..........', y_train_cal.shape)
# enc_org = enc.fit(y_train_org)
# y_train_org = enc.transform(y_train_org)
# y_test_org = enc.transform(y_test_org)
### train model
import keras
model = keras.Sequential()
model.add(
keras.layers.Bidirectional(
keras.layers.LSTM(
units=128,
input_shape=[X_train_cal.shape[1], X_train_cal.shape[2]]
)
)
)
model.add(keras.layers.Dropout(rate=0.5))
model.add(keras.layers.Dense(units=128, activation='relu'))
model.add(keras.layers.Dense(y_train_cal.shape[1], activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['categorical_accuracy']
)
class_weights = [{
0:1,
1:1
},
{
0:1,
1:10
},
{
0:1,
1:50
},
{
0:1,
1:100
}]
for i in range(len(class_weights)):
print('------------------------', i)
history = model.fit(
X_train_cal, y_train_cal,
epochs=15,
batch_size=32,
validation_split=0.1,
shuffle=False,
class_weight = class_weights[i]
)
model.evaluate(X_test_cal, y_test_cal)
y_pred_cal = model.predict(X_test_cal)
predictions_cal = y_pred_cal[:,0]
predictions_cal[predictions_cal>=0.5] = 1
predictions_cal[predictions_cal<0.5] = 0
testing_cal = y_test_cal[:,0]
### confusion matrix
cf_array_cal = confusion_matrix(testing_cal, predictions_cal)
pd.DataFrame(cf_array_cal).to_csv(str(i)+'_calculated_velocity'+'.csv')
# print(predictions_cal.shape, testing_cal.shape)
|
StarcoderdataPython
|
4886857
|
<filename>examples/control.py
# pylint: disable=W0621
"""Asynchronous Python client for the Fumis WiRCU API."""
import asyncio
from fumis import Fumis
async def main(loop):
"""Show example on controlling your Fumis WiRCU device."""
async with Fumis(mac="AABBCCDDEEFF", password="<PASSWORD>", loop=loop) as fumis:
info = await fumis.update_info()
print(info)
await fumis.set_target_temperature(23.0)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main(loop))
|
StarcoderdataPython
|
6452455
|
# coding=utf-8
class SpatialOrientation(object):
'''Spatial Orientation facade.
Computes the device's orientation based on the rotation matrix.
.. versionadded:: 1.3.1
'''
@property
def orientation(self):
'''Property that returns values of the current device orientation
as a (azimuth, pitch, roll) tuple.
Azimuth, angle of rotation about the -z axis. This value represents the
angle between the device's y axis and the magnetic north pole.
The range of values is -ฯ to ฯ.
Pitch, angle of rotation about the x axis. This value represents the
angle between a plane parallel to the device's screen and a plane
parallel to the ground.
The range of values is -ฯ to ฯ.
Roll, angle of rotation about the y axis. This value represents the
angle between a plane perpendicular to the device's screen and a plane
perpendicular to the ground.
The range of values is -ฯ/2 to ฯ/2.
Returns (None, None, None) if no data is currently available.
Supported Platforms:: Android
'''
return self._get_orientation() or (None, None, None)
def _get_orientation(self):
raise NotImplementedError()
def enable_listener(self):
'''Enable the orientation sensor.
'''
self._enable_listener()
def _enable_listener(self, **kwargs):
raise NotImplementedError()
def disable_listener(self):
'''Disable the orientation sensor.
'''
self._disable_listener()
def _disable_listener(self, **kwargs):
raise NotImplementedError()
|
StarcoderdataPython
|
11272782
|
<filename>exercicio7.py
filmes = ["forrest gump","fight club"]
jogos = ["mario","minecraft"]
livros = ["O senhor dos aneis","game of thrones"]
Esportes = ["basquete","futebol"]
#A
filmes.insert(1,"piratas do caribe")
filmes.insert(2,"velozes e furiosos")
jogos.insert(1,"sonic")
jogos.insert(2,"cs")
livros.insert(1,"the witcher")
livros.insert(2,"Diario de um banana")
Esportes.insert(1,"skate")
Esportes.insert(2,"volei")
#B
lista = filmes + jogos + livros + Esportes
#C
print(livros[1])
#D
del Esportes[0]
lista = filmes + jogos + livros + Esportes
#E
disciplinas = ["geografia", "historia"]
lista = lista + disciplinas
print(lista)
|
StarcoderdataPython
|
51863
|
<gh_stars>0
from scitwi.users.user_entities import UserEntities
from scitwi.users.user_profile import UserProfile
from scitwi.utils.attrs import bool_attr, datetime_attr, str_attr, obj_attr
from scitwi.utils.attrs import int_attr
from scitwi.utils.strs import obj_string
class User(object):
"""
Users can be anyone or anything. They tweet, follow, create lists,
have a home_timeline, can be mentioned, and can be looked up in bulk.
https://dev.twitter.com/overview/api/users
"""
def __init__(self, user: dict):
self.contributors_enabled = bool_attr(user, 'contributors_enabled')
self.created_at = datetime_attr(user, 'created_at')
self.default_profile = bool_attr(user, 'default_profile')
self.default_profile_image = bool_attr(user, 'default_profile_image')
self.description = str_attr(user, 'description')
self.entities = obj_attr(user, 'entities', UserEntities)
self.favourites_count = int_attr(user, 'favourites_count')
self.follow_request_sent = bool_attr(user, 'follow_request_sent')
self.following = bool_attr(user, 'following') # deprecated
self.followers_count = int_attr(user, 'followers_count')
self.friends_count = int_attr(user, 'friends_count')
self.geo_enabled = bool_attr(user, 'geo_enabled')
self.has_extended_profile = bool_attr(user, 'has_extended_profile')
self.id_ = int_attr(user, 'id')
self.is_translation_enabled = bool_attr(user, 'is_translation_enabled') # not in docs
self.is_translator = bool_attr(user, 'is_translator')
self.lang = str_attr(user, 'lang')
self.listed_count = int_attr(user, 'listed_count')
self.location = str_attr(user, 'location')
self.name = str_attr(user, 'name')
self.notifications = bool_attr(user, 'notifications') # deprecated
self.profile = UserProfile(user)
self.protected = bool_attr(user, 'protected')
self.screen_name = str_attr(user, 'screen_name')
self.show_all_inline_media = bool_attr(user, 'show_all_inline_media')
self.statuses_count = user['statuses_count']
self.time_zone = user['time_zone']
self.url = user['url']
self.utc_offset = user['utc_offset']
self.verified = user['verified']
def __str__(self):
str_out = ''
str_out += obj_string('Contributors Enabled', self.contributors_enabled)
str_out += obj_string('Created At', self.created_at)
str_out += obj_string('Default Profile', self.default_profile)
str_out += obj_string('Default Profile Image', self.default_profile_image)
str_out += obj_string('Description', self.description)
str_out += obj_string('Entities', self.entities)
str_out += obj_string('Favourites Count', self.favourites_count)
str_out += obj_string('Follow Request Sent', self.follow_request_sent)
str_out += obj_string('Following', self.following)
str_out += obj_string('Followers Count', self.followers_count)
str_out += obj_string('Friends Count', self.friends_count)
str_out += obj_string('Geo Enabled', self.geo_enabled)
str_out += obj_string('Has Extended Profile', self.has_extended_profile)
str_out += obj_string('Id', self.id_)
str_out += obj_string('Is Translation Enabled', self.is_translation_enabled)
str_out += obj_string('Is Translator', self.is_translator)
str_out += obj_string('Language', self.lang)
str_out += obj_string('Listed Count', self.listed_count)
str_out += obj_string('Location', self.location)
str_out += obj_string('Name', self.listed_count)
str_out += obj_string('Notifications', self.notifications)
str_out += obj_string('Profile', self.profile)
str_out += obj_string('Protected', self.protected)
str_out += obj_string('Screen Name', self.screen_name)
str_out += obj_string('Show All Inline Media', self.show_all_inline_media)
str_out += obj_string('Statuses Count', self.statuses_count)
str_out += obj_string('Time Zone', self.time_zone)
str_out += obj_string('Url', self.url)
str_out += obj_string('UTC Offset', self.utc_offset)
str_out += obj_string('Verified', self.verified)
return str_out
|
StarcoderdataPython
|
5132018
|
# Copyright (c) 2013, Techlift and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import calendar
def execute(filters=None):
columns, data = [], []
data = prepare_data(filters)
columns = get_columns(filters)
return columns, data
def get_columns(filters=None):
return [
{
"label": "Type",
"fieldname": "TYPE",
"fieldtype": "Data",
"width": 60
},
{
"label": "DOCTOR",
"fieldname": "DOCTOR"
},
{
"label": "AMT COLLECTED",
"fieldname": "SUM OF AMT COLLECTED",
"fieldtype": "Currency",
"width": 150
},
{
"label": "CONSUMABLE COST",
"fieldname": "SUM OF CONSUMABLE COST",
"fieldtype": "Currency"
},
{
"label": "ADMIN FEES",
"fieldname": "SUM OF ADMIN FEES",
"fieldtype": "Currency"
},
{
"label": "DOCTOR SHARE",
"fieldname": "SUM OF DOCTOR SHARE",
"fieldtype": "Currency"
},
{
"label": "COMPANY SHARE",
"fieldname": "COMPANY SHARE",
"fieldtype": "Currency"
},
{
"label": "%",
"fieldname": "%",
"fieldtype": "Data",
"width": 60
},
]
def prepare_data(filters):
month_options = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
from_year = filters.from_year
to_year = filters.to_year
from_month = filters.from_month
to_month = filters.to_month
from_month_digit = "{0:0=2d}".format(month_options.index(from_month) + 1)
to_month_digit = "{0:0=2d}".format(month_options.index(to_month) + 1)
from_date = "%s-%s-%s"%(from_year, from_month_digit, "01")
to_date_last_date = calendar.monthrange(int(to_year), month_options.index(to_month) + 1)[1]
to_date = "%s-%s-%s"%(to_year, to_month_digit, to_date_last_date)
from_date_object = frappe.utils.datetime.datetime.strptime(from_date, "%Y-%m-%d")
to_date_object = frappe.utils.datetime.datetime.strptime(to_date, "%Y-%m-%d")
if(from_date_object > to_date_object):
frappe.msgprint("From Date Cannot be Greater Than To Date")
return []
doctor_condition = ""
doctor_type = filters.doctor_type
if(doctor_type != "All"):
doctor_condition = "and b.doctor_type = '%s'"%(doctor_type)
query = """SELECT *, (d.`SUM OF AMT COLLECTED` - d.`SUM OF CONSUMABLE COST` - d.`SUM OF DOCTOR SHARE` - d.`SUM OF ADMIN FEES`) AS "COMPANY SHARE",
ROUND(( 100 * (d.`SUM OF AMT COLLECTED` - d.`SUM OF CONSUMABLE COST` - d.`SUM OF DOCTOR SHARE` - d.`SUM OF ADMIN FEES`) / d.`SUM OF AMT COLLECTED`), 2) AS "%"
from (Select doctor_type as TYPE, dr as DOCTOR, amt as "SUM OF AMT COLLECTED", cons as "SUM OF CONSUMABLE COST", adm as "SUM OF ADMIN FEES",
CASE WHEN c.doctor_type = "Salaried" THEN (SELECT IFNULL(sum(net_pay),0) from `tabSalary Slip` where employee = c.employee and start_date BETWEEN "{0}" and "{1}" and docstatus = 1) ELSE c.`share/salary` END "SUM OF DOCTOR SHARE" from (select a.*, b.mobile_phone,b.employee,
CASE WHEN b.doctor_type = "Salaried" THEN "Salaried" WHEN b.doctor_type = "Fixed" THEN "Fixed" WHEN b.doctor_type = "Percent" THEN "percent"
ELSE "none" END as doctor_type from (select parent, IFNULL(SUM(amount),0) as amt, IFNULL(SUM(admin_fees),0) as adm, IFNULL(SUM(consumable_cost),0) as cons,
IFNULL(SUM(doctor_share),0) as "share/salary", CASE WHEN reference_dt = "Patient Appointment" THEN (select practitioner from `tabPatient Appointment`
where name = reference_dn) when reference_dt = "Clinical Procedure" then (select practitioner from `tabClinical Procedure` where name = reference_dn) END as dr
from `tabSales Invoice Item` GROUP BY dr HAVING dr is NOT NULL) as a LEFT JOIN `tabHealthcare Practitioner` as b ON a.dr = b.name LEFT JOIN `tabSales Invoice` as
si ON a.parent = si.name where si.posting_date BETWEEN "{0}" and "{1}" and si.docstatus = 1 {2}) as c) as d""".format(from_date, to_date, doctor_condition)
#frappe.msgprint(query)
data = frappe.db.sql(query)
return data
|
StarcoderdataPython
|
1956675
|
#!/usr/bin/env python
# ENCODE annot_enrich (fraction of reads in annotated regions) wrapper
# Author: <NAME>, <NAME> (<EMAIL>)
import sys
import os
import argparse
from encode_lib_common import (
run_shell_cmd, strip_ext_ta,
ls_l, get_num_lines, log)
import warnings
warnings.filterwarnings("ignore")
def parse_arguments():
parser = argparse.ArgumentParser(
prog='ENCODE annot_enrich (fraction of reads in annotated regions)')
parser.add_argument(
'--ta', type=str, help='TAG-ALIGN file (from task bam2ta).')
parser.add_argument('--dnase', type=str, help='DNase definition bed file.')
parser.add_argument('--blacklist', type=str, help='Blacklist bed file.')
parser.add_argument('--prom', type=str,
help='Promoter definition bed file.')
parser.add_argument('--enh', type=str,
help='Enhancer definition bed file.')
parser.add_argument('--out-dir', default='', type=str,
help='Output directory.')
parser.add_argument('--log-level', default='INFO', help='Log level',
choices=['NOTSET', 'DEBUG', 'INFO', 'WARNING',
'CRITICAL', 'ERROR', 'CRITICAL'])
args = parser.parse_args()
log.setLevel(args.log_level)
log.info(sys.argv)
return args
def get_fract_reads_in_regions(reads_bed, regions_bed):
"""Function that takes in bed file of reads and bed file of regions and
gets fraction of reads sitting in said regions
"""
# uses new run_shell_cmd
cmd = "bedtools sort -i {} | "
cmd += "bedtools merge -i stdin | "
cmd += "bedtools intersect -u -nonamecheck -a {} -b stdin | "
cmd += "wc -l"
cmd = cmd.format(regions_bed, reads_bed)
intersect_read_count = int(run_shell_cmd(cmd))
total_read_count = get_num_lines(reads_bed)
fract_reads = float(intersect_read_count) / total_read_count
return intersect_read_count, fract_reads
def main():
# read params
args = parse_arguments()
FINAL_BED = args.ta
OUTPUT_PREFIX = os.path.join(
args.out_dir,
os.path.basename(strip_ext_ta(FINAL_BED)))
DNASE = args.dnase if args.dnase and os.path.basename(
args.dnase) != 'null' else ''
BLACKLIST = args.blacklist if args.blacklist and os.path.basename(
args.blacklist) != 'null' else ''
PROM = args.prom if args.prom and os.path.basename(
args.prom) != 'null' else ''
ENH = args.enh if args.enh and os.path.basename(args.enh) != 'null' else ''
result = []
# Dnase regions
if DNASE:
reads_dnase, fract_dnase = get_fract_reads_in_regions(FINAL_BED, DNASE)
result.append(('fraction_of_reads_in_universal_DHS_regions',
str(reads_dnase), str(fract_dnase)))
# Blacklist regions
if BLACKLIST:
reads_blacklist, \
fract_blacklist = get_fract_reads_in_regions(FINAL_BED, BLACKLIST)
result.append(('fraction_of_reads_in_blacklist_regions',
str(reads_blacklist), str(fract_blacklist)))
# Prom regions
if PROM:
reads_prom, fract_prom = get_fract_reads_in_regions(FINAL_BED, PROM)
result.append(('fraction_of_reads_in_promoter_regions',
str(reads_prom), str(fract_prom)))
# Enh regions
if ENH:
reads_enh, fract_enh = get_fract_reads_in_regions(FINAL_BED, ENH)
result.append(('fraction_of_reads_in_enhancer_regions',
str(reads_enh), str(fract_enh)))
annot_enrich_qc = OUTPUT_PREFIX + '.annot_enrich.qc'
with open(annot_enrich_qc, 'w') as fp:
for line in result:
fp.write('\t'.join(line) + '\n')
log.info('List all files in output directory...')
ls_l(args.out_dir)
log.info('All done.')
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
12851895
|
"""This program first reads in the sqlite database made by ParseAuthors.py.
Then, after just a little data cleaning, it undergoes PCA decomposition.
After being decomposed via PCA, the author data is then clustered by way of a
K-means clustering algorithm. The number of clusters can be set by changing
the value of n_clusters."""
import sqlite3
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
if __name__ == '__main__':
# Filepath of sqlite database made by ParseAuthors.py
db_path = '/media/sf_G_DRIVE/jita1407/authors.sqlite'
# Load this into a dataframe
conn = sqlite3.connect(db_path, detect_types=sqlite3.PARSE_DECLTYPES)
dataframe = pd.read_sql_query("SELECT * FROM Authors", conn)
conn.close()
# Get rid of some redundant data to make analysis cleaner and more straightforward
dataframe = dataframe.drop(['int_skew', 'unique_messages'], axis=1)
# Separate out our list of Authors from the data about them
authors = dataframe.ix[:,1].copy()
data = dataframe.ix[:,2:7].copy()
# Set up our PCA decomposition
pca = PCA()
pca.fit(data.as_matrix())
# Transform our data into features calculated by PCA
transformed = pca.transform(data.as_matrix())
# Cluster our data according to K-means
n_clusters = 2 # number of clusters to organize data into
n_init = 20 # number of times to replicate clustering
n_jobs = 1 # number of processors to use for clustering (-1 for all)
kmeans = KMeans(n_clusters=n_clusters, n_init=n_init, n_jobs=n_jobs).fit(transformed)
# Get the results of the clustering
centers = kmeans.cluster_centers_
labels = kmeans.labels_
# Make some plots
# Plot explained variance for each PCA component
#plt.bar(np.arange(len(pca.explained_variance_)), pca.explained_variance_)
|
StarcoderdataPython
|
1983476
|
<reponame>zishun/Poisson-EVA2019
import numpy as np
import time
import util
fn_input = './data/anom.training.npy'
fn_output = './data/X_min_flip.npy'
data = -np.load(fn_input)
neighbors = np.load('./data/neighbor.npy').astype(np.int32)
start = time.time()
X_min = util.X_min_A_compute(data, neighbors)
print('compute X_min: %.3fs' % (time.time()-start))
np.save(fn_output, X_min)
|
StarcoderdataPython
|
1713852
|
<reponame>RudSmith/netconf<gh_stars>10-100
# -*- coding: utf-8 -*-
# Copyright (C) 2018 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Network Instance Protocols(BGP) subsubmodule
"""
from beluganos.yang.python import constants
from beluganos.yang.python import elements
# pylint: disable=too-few-public-methods
class BgpGlobal(elements.BaseElement):
"""
Neteork instance protocol(BGP/global) element.
"""
_FIELDS = ("config",)
def __init__(self):
super(BgpGlobal, self).__init__(constants.BGP_GLOBAL)
self.config = BgpGlobalConfig()
class BgpGlobalConfig(elements.BaseElement):
"""
Neteork instance protocol(BGP/global/config) element.
"""
_FIELDS = ("_as", "router_id")
def __init__(self):
super(BgpGlobalConfig, self).__init__(constants.CONFIG)
self._as = None
self.router_id = None
|
StarcoderdataPython
|
5013981
|
<reponame>rgooler/comix
from flask import render_template
from app import app
import os
from flask import send_from_directory
from app.comicbook import comicbook
from natsort import natsorted
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),
'favicon.ico', mimetype='image/x-icon')
@app.route('/ios_icon.png')
def ios_icon():
return send_from_directory(os.path.join(app.root_path, 'static'),
'ios_icon.png', mimetype='image/png')
@app.route('/swipepage.js')
def swipepage_js():
return send_from_directory(os.path.join(app.root_path, 'static'),
'swipepage.js', mimetype='text/javascript')
@app.route('/')
@app.route('/index')
def index():
comics = natsorted(os.walk('res/').next()[1])
return render_template("index.html", comics=comics)
@app.route('/<path:comic>/<f>.<ext>')
def comic_page(comic, f, ext):
filename = f + '.' + ext
cb = comicbook(comic, filename)
basepath = os.path.join(app.root_path, '..', 'res', comic)
page = os.path.join(basepath, filename)
return render_template("comic_index.html", comicbook=cb, page=page)
@app.route('/<path:comic>/<f>.<ext>/img')
def comic_page_img(comic, f, ext):
page = f + '.' + ext
basepath = os.path.join(app.root_path, '..', 'res', comic)
return send_from_directory(basepath, page)
@app.route('/<path:comic>/thumbnail')
def comic_thumbnail(comic):
cb = comicbook(comic)
if cb.thumbnail_path():
basepath = os.path.join(app.root_path, '..', 'res', comic)
return send_from_directory(basepath, cb.thumbnail_path())
else:
return send_from_directory(os.path.join(app.root_path, 'static'),
'blank_thumbnail.gif', mimetype='image/gif')
@app.route('/<path:comic>')
def comic(comic):
cb = comicbook(comic)
return render_template("comic_index.html", comicbook=cb)
|
StarcoderdataPython
|
1615291
|
<filename>fluxture/structures.py
import asyncio
import itertools
from abc import ABCMeta
from collections import OrderedDict
from typing import Generic, Iterator, KeysView
from typing import OrderedDict as OrderedDictType
from typing import Tuple, Type, TypeVar
from typing import ValuesView
from typing import ValuesView as ValuesViewType
from fluxture.serialization import (AbstractIntEnum, ByteOrder, FixedSize, P,
Packable, UnpackError)
F = TypeVar("F")
class StructMeta(ABCMeta, Generic[F]):
FIELDS: OrderedDictType[str, Type[F]]
def __init__(cls, name, bases, clsdict):
fields = OrderedDict()
field_sources = {}
for base in bases:
if isinstance(base, StructMeta):
# this will happen if a Struct is extending another Struct
# so inherit all of the superclass's fields
for field_name, field_type in base.FIELDS.items():
if field_name in fields:
raise TypeError(
f"{name} inherits field {field_name} from both {base.__name__} and "
f"{field_sources[field_name]}"
)
elif hasattr(base, "non_serialized") and field_name not in getattr(
base, "non_serialized"
):
field_sources[field_name] = base
fields[field_name] = field_type
if "non_serialized" in clsdict:
non_serialized = set(clsdict["non_serialized"])
else:
non_serialized = set()
non_serialized |= {"FIELDS", "non_serialized"}
if "__annotations__" in clsdict:
for field_name, field_type in clsdict["__annotations__"].items():
if field_name in field_sources:
raise TypeError(
f"{name} cannot redefine field {field_name} from {field_sources[field_name]}"
)
elif field_name not in non_serialized:
fields[field_name] = field_type
super().__init__(name, bases, clsdict)
cls.validate_fields(fields)
setattr(cls, "FIELDS", fields)
# are all fields fixed size? if so, we are fixed size, too!
if all(hasattr(field, "num_bytes") for field in fields.values()):
cls.num_bytes = sum(field.num_bytes for field in fields.values()) # type: ignore
assert isinstance(cls, FixedSize)
def validate_fields(cls, fields: OrderedDictType[str, Type[F]]):
pass
class Struct(Generic[F], metaclass=StructMeta[F]):
def __init__(self, *args, **kwargs):
unsatisfied_fields = [
name for name in self.__class__.FIELDS.keys() if name not in kwargs
]
if len(args) > len(unsatisfied_fields):
raise ValueError(
f"Unexpected positional argument: {args[len(unsatisfied_fields)]}"
)
elif len(args) < len(unsatisfied_fields):
# see if any of the unsatisfied fields have defaults:
for name in unsatisfied_fields[len(args) :]:
field_type = self.__class__.FIELDS[name]
if (
hasattr(field_type, "column_options")
and field_type.column_options.default is not None
):
kwargs[name] = field_type.column_options.default
elif issubclass(field_type, AbstractIntEnum):
kwargs[name] = field_type.DEFAULT
else:
raise ValueError(f"Missing argument for {name} in {self.__class__}")
unsatisfied_fields = unsatisfied_fields[: len(args)]
for name, value in itertools.chain(
kwargs.items(), zip(unsatisfied_fields, args)
):
if name not in self.__class__.FIELDS:
raise TypeError(
f"{self.__class__.__name__}.__init__() got an unexpected keyword argument '{name}'. "
f"Valid arguments are: {', '.join(self.__class__.FIELDS.keys())}"
)
elif isinstance(value, self.__class__.FIELDS[name]):
# the value was already passed as the correct type
setattr(self, name, value)
else:
# we need to construct the correct type
setattr(self, name, self.__class__.FIELDS[name](value))
super().__init__()
def __contains__(self, field_name: str):
return field_name in self.__class__.FIELDS
def __getitem__(self, field_name: str) -> Type[F]:
if field_name not in self:
raise KeyError(field_name)
return getattr(self, field_name)
def __len__(self) -> int:
return len(self.__class__.FIELDS)
def __iter__(self) -> Iterator[str]:
return iter(self.__class__.FIELDS.keys())
def items(self) -> Iterator[Tuple[str, Type[F]]]:
for field_name in self:
yield field_name, getattr(self, field_name)
def keys(self) -> KeysView[str]:
return self.__class__.FIELDS.keys()
def values(self) -> ValuesViewType[Type[F]]:
return ValuesView(self)
def __eq__(self, other):
return (
isinstance(other, Struct)
and len(self) == len(other)
and all(a == b for (_, a), (_, b) in zip(self.items(), other.items()))
)
def __ne__(self, other):
return not (self == other)
def __str__(self):
types = "".join(
f" {field_name} = {field_value!s};\n"
for field_name, field_value in self.items()
)
newline = "\n"
return f"typedef struct {{{['', newline][len(types) > 0]}{types}}} {self.__class__.__name__}"
def __repr__(self):
args = [
f"{name}={getattr(self, name)!r}" for name in self.__class__.FIELDS.keys()
]
return f"{self.__class__.__name__}({', '.join(args)})"
class PackableStruct(Generic[P], Struct[P]):
def pack(self, byte_order: ByteOrder = ByteOrder.NETWORK) -> bytes:
# TODO: Combine the formats and use a single struct.pack instead
return b"".join(
getattr(self, field_name).pack(byte_order)
for field_name in self.__class__.FIELDS.keys()
)
@classmethod
def validate_fields(cls, fields: OrderedDictType[str, Type[F]]):
for field_name, field_type in fields.items():
if not isinstance(field_type, Packable):
raise TypeError(
f"Field {field_name} of {cls.__name__} must be Packable, not {field_type}"
)
@classmethod
def unpack(
cls: Type[P], data: bytes, byte_order: ByteOrder = ByteOrder.NETWORK
) -> P:
ret, remaining = cls.unpack_partial(data, byte_order)
if remaining:
raise ValueError(f"Unexpected trailing bytes: {remaining!r}")
return ret
@classmethod
def unpack_partial(
cls: Type[P], data: bytes, byte_order: ByteOrder = ByteOrder.NETWORK
) -> Tuple[P, bytes]:
remaining_data = data
args = []
for field_name, field_type in cls.FIELDS.items():
try:
field, remaining_data = field_type.unpack_partial(
remaining_data, byte_order
)
errored = False
except UnpackError:
errored = True
if errored:
parsed_fields = [
f"{field_name} = {arg!r}"
for field_name, arg in zip(cls.FIELDS.keys(), args)
]
parsed_fields = ", ".join(parsed_fields)
raise UnpackError(
f"Error parsing field {cls.__name__}.{field_name} (field {len(args)+1}) of type "
f"{field_type.__name__} from bytes {remaining_data!r}. Prior parsed field values: "
f"{parsed_fields}"
)
args.append(field)
return cls(*args), remaining_data
@classmethod
async def read(
cls: Type[P],
reader: asyncio.StreamReader,
byte_order: ByteOrder = ByteOrder.NETWORK,
) -> P:
if hasattr(cls, "num_bytes"):
data = await reader.read(cls.num_bytes)
return cls.unpack(data, byte_order)
# we need to read it one field at a time
args = []
for field_name, field_type in cls.FIELDS.items():
try:
field = field_type.read(reader, byte_order)
errored = False
except UnpackError:
errored = True
if errored:
parsed_fields = [
f"{field_name} = {arg!r}"
for field_name, arg in zip(cls.FIELDS.keys(), args)
]
parsed_fields = ", ".join(parsed_fields)
raise UnpackError(
f"Error parsing field {cls.__name__}.{field_name} (field {len(args) + 1}) of type "
f"{field_type.__name__}. Prior parsed field values: {parsed_fields}"
)
args.append(field)
return cls(*args)
|
StarcoderdataPython
|
12807534
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from typing import List
import dataclasses
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.animation import FuncAnimation
from .draw_obj import DrawObj
@dataclasses.dataclass
class ShipObj3dof:
"""Ship 3DOF class just for drawing.
Attributes:
L (float):
ship length [m]
B (float)
ship breath [m]
time (list[float]):
Time list of simulation result.
u (list[float]):
List of axial velocity [m/s] in simulation result.
v (list[float]):
List of lateral velocity [m/s] in simulation result.
r (list[float]):
List of rate of turn [rad/s] in simulation result.
x (list[float]):
List of position of X axis [m] in simulation result.
y (list[float]):
List of position of Y axis [m/s] in simulation result.
psi (list[float]):
List of azimuth [rad] in simulation result.
ฮด (list[float]):
rudder angle list of simulation.
npm (List[float]):
npm list of simulation.
"""
# Ship overview
L: float
B: float
# Simulation result
time: List[float] = dataclasses.field(default_factory=list)
u: List[float] = dataclasses.field(default_factory=list)
v: List[float] = dataclasses.field(default_factory=list)
r: List[float] = dataclasses.field(default_factory=list)
x: List[float] = dataclasses.field(default_factory=list)
y: List[float] = dataclasses.field(default_factory=list)
psi: List[float] = dataclasses.field(default_factory=list)
ฮด: List[float] = dataclasses.field(default_factory=list)
npm: List[float] = dataclasses.field(default_factory=list)
def load_simulation_result(
self,
time: List[float],
u: List[float],
v: List[float],
r: List[float],
x0: float = 0.0,
y0: float = 0.0,
psi0: float = 0.0,
):
"""Load simulation result (time, u, v, r).
By running this, `x`, `y` and `psi` of this class are registered automatically.
Args:
time (list[float]):
Time list of simulation result.
u (list[float]):
List of axial velocity [m/s] in simulation result.
v (list[float]):
List of lateral velocity [m/s] in simulation result.
r (list[float]):
List of rate of turn [rad/s] in simulation result.
x0 (float, optional):
Inital position of X axis [m].
Defaults to 0.0.
y (list[float]):
Inital position of Y axis [m/s].
Defaults to 0.0.
psi (list[float]):
Inital azimuth [rad].
Defaults to 0.0.
Examples:
>>> time_list = np.linspace(0.00, duration, num_of_sampling)
>>> delta_list = np.full(len(time_list), 10 * np.pi / 180)
>>> kt_params = KTParams(K=0.15, T=60.0)
>>> result = kt.simulate_kt(kt_params, time_list, delta_list)
>>> u_list = np.full(len(time_list), 20 * (1852.0 / 3600))
>>> v_list = np.zeros(len(time_list))
>>> r_list = result[0]
>>> ship = ShipObj3dof(L = 180, B = 20)
>>> ship.load_simulation_result(time_list, u_list, v_list, r_list)
>>> print(ship.x, ship.y, ship.psi)
"""
x = [x0]
y = [y0]
psi = [psi0]
for i, (ut, vt, rt) in enumerate(zip(u, v, r)):
if i > 0:
dt = time[i] - time[i - 1]
x.append(x[-1] + (ut * np.cos(psi[-1]) - vt * np.sin(psi[-1])) * dt)
y.append(y[-1] + (ut * np.sin(psi[-1]) + vt * np.cos(psi[-1])) * dt)
psi.append(psi[-1] + rt * dt)
# Register
self.time = time
self.u = u
self.v = v
self.r = r
self.x = x
self.y = y
self.psi = psi
def draw_xy_trajectory(
self,
dimensionless: bool = False,
aspect_equal: bool = True,
num: int or str = None,
figsize: List[float] = [6.4, 4.8],
dpi: float = 100.0,
fmt: str = None,
facecolor: str = None,
edgecolor: str = None,
frameon: bool = True,
FigureClass: matplotlib.figure.Figure = matplotlib.figure.Figure,
clear: bool = False,
tight_layout: bool = False,
constrained_layout: bool = False,
save_fig_path: str = None,
**kwargs
) -> plt.Figure:
"""Draw trajectry(x,y).
Args:
dimensionless (bool, optional):
drawing with dimensionless by using L or not.
Defaults to False
aspect_equal (bool, optional):
Set equal of figure aspect or not.
Defaults to True.
num (int or str, optional):
A unique identifier for the figure.
If a figure with that identifier already exists, this figure is made active and returned.
An integer refers to the Figure.number attribute, a string refers to the figure label.
If there is no figure with the identifier or num is not given,
a new figure is created, made active and returned.
If num is an int, it will be used for the Figure.number attribute.
Otherwise, an auto-generated integer value is used (starting at 1 and incremented for each new figure).
If num is a string, the figure label and the window title is set to this value.
Default to None.
figsize ((float, float), optional):
Width, height in inches.
Default to [6.4, 4.8]
dpi (float, optional):
The resolution of the figure in dots-per-inch.
Default to 100.0.
figsize ((float, float), optional):
Width, height in inches.
Default to [6.4, 4.8]
dpi (float, optional):
The resolution of the figure in dots-per-inch.
Default to 100.0
facecolor (str, optional):
The background color.
edgecolor (str, optional):
The border color.
frameon (bool, optional):
If False, suppress drawing the figure frame.
Defaults to True.
FigureClass (subclass of matplotlib.figure.Figure, optional):
Optionally use a custom Figure instance.
Defaults to matplotlib.figure.Figure.
clear (bool, optional):
If True and the figure already exists, then it is cleared.
Defaults to False.
tight_layout (bool, optional):
If False use subplotpars.
If True adjust subplot parameters using tight_layout with default padding.
When providing a dict containing the keys pad, w_pad, h_pad, and rect,
the default tight_layout paddings will be overridden.
Defaults to False.
constrained_layout (bool, optional):
If True use constrained layout to adjust positioning of plot elements.
Like tight_layout, but designed to be more flexible.
See Constrained Layout Guide for examples.
(Note: does not work with add_subplot or subplot2grid.)
Defaults to False.
fmt (str, optional):
A format string, e.g. 'ro' for red circles.
See the Notes section for a full description of the format strings.
Format strings are just an abbreviation for quickly setting basic line properties.
All of these and more can also be controlled by keyword arguments.
This argument cannot be passed as keyword.
Defaults to None.
save_fig_path (str, optional):
Path of saving figure.
Defaults to None.
**kwargs (matplotlib.lines.Line2D properties, optional):
kwargs are used to specify properties
like a line label (for auto legends), linewidth, antialiasing, marker face color.
You can show the detailed information at `matplotlib.lines.Line2D
<https://matplotlib.org/3.3.3/api/_as_gen/matplotlib.lines.Line2D.html#matplotlib.lines.Line2D>`_
Returns:
matplotlib.pyplot.Figure: Figure
Examples:
>>> ship.draw_xy_trajectory(save_fig_path="test.png")
"""
fig = plt.figure(
num=num,
figsize=figsize,
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
frameon=frameon,
FigureClass=FigureClass,
clear=clear,
tight_layout=tight_layout,
constrained_layout=constrained_layout,
)
if dimensionless:
if fmt is None:
plt.plot(np.array(self.x) / self.L, np.array(self.y) / self.L, **kwargs)
else:
plt.plot(
np.array(self.x) / self.L, np.array(self.y) / self.L, fmt, **kwargs
)
plt.xlabel(r"$x/L$")
plt.ylabel(r"$y/L$")
else:
plt.plot(self.x, self.y)
plt.xlabel(r"$x$")
plt.ylabel(r"$y$")
if aspect_equal:
plt.gca().set_aspect("equal")
if save_fig_path is not None:
plt.savefig(save_fig_path)
plt.close()
return fig
def draw_chart(
self,
x_index: str,
y_index: str,
xlabel: str = None,
ylabel: str = None,
num: int or str = None,
figsize: List[float] = [6.4, 4.8],
dpi: float = 100.0,
facecolor: str = None,
edgecolor: str = None,
frameon: bool = True,
FigureClass: matplotlib.figure.Figure = matplotlib.figure.Figure,
clear: bool = False,
tight_layout: bool = False,
constrained_layout: bool = False,
fmt: str = None,
save_fig_path: str = None,
**kwargs
) -> plt.Figure:
"""Draw chart.
Args:
x_index (string):
Index value of X axis.
y_index (string):
Index value of Y axis.
xlabel (string, optional):
Label of X axis.
Defaults to None.
ylabel (string, optional):
Label of Y axis.
Defaults to None.
num (int or str, optional):
A unique identifier for the figure.
If a figure with that identifier already exists, this figure is made active and returned.
An integer refers to the Figure.number attribute, a string refers to the figure label.
If there is no figure with the identifier or num is not given,
a new figure is created, made active and returned.
If num is an int, it will be used for the Figure.number attribute.
Otherwise, an auto-generated integer value is used (starting at 1 and incremented for each new figure).
If num is a string, the figure label and the window title is set to this value.
Default to None.
figsize ((float, float), optional):
Width, height in inches.
Default to [6.4, 4.8]
dpi (float, optional):
The resolution of the figure in dots-per-inch.
Default to 100.0.
facecolor (str, optional):
The background color.
edgecolor (str, optional):
The border color.
frameon (bool, optional):
If False, suppress drawing the figure frame.
Defaults to True.
FigureClass (subclass of matplotlib.figure.Figure, optional):
Optionally use a custom Figure instance.
Defaults to matplotlib.figure.Figure.
clear (bool, optional):
If True and the figure already exists, then it is cleared.
Defaults to False.
tight_layout (bool, optional):
If False use subplotpars.
If True adjust subplot parameters using tight_layout with default padding.
When providing a dict containing the keys pad, w_pad, h_pad, and rect,
the default tight_layout paddings will be overridden.
Defaults to False.
constrained_layout (bool, optional):
If True use constrained layout to adjust positioning of plot elements.
Like tight_layout, but designed to be more flexible.
See Constrained Layout Guide for examples.
(Note: does not work with add_subplot or subplot2grid.)
Defaults to False.
fmt (str, optional):
A format string, e.g. 'ro' for red circles.
See the Notes section for a full description of the format strings.
Format strings are just an abbreviation for quickly setting basic line properties.
All of these and more can also be controlled by keyword arguments.
This argument cannot be passed as keyword.
Defaults to None.
save_fig_path (str, optional):
Path of saving figure.
Defaults to None.
**kwargs (matplotlib.lines.Line2D properties, optional):
kwargs are used to specify properties
like a line label (for auto legends), linewidth, antialiasing, marker face color.
You can show the detailed information at `matplotlib.lines.Line2D
<https://matplotlib.org/3.3.3/api/_as_gen/matplotlib.lines.Line2D.html#matplotlib.lines.Line2D>`_
Returns:
matplotlib.pyplot.Figure: Figure
Examples:
>>> ship.draw_chart("time", "r", xlabel="time [sec]", \
>>> ylabel=r"$u$" + " [rad/s]",save_fig_path='test.png')
"""
target_x = None
if x_index == "time":
target_x = self.time
elif x_index == "u":
target_x = self.u
elif x_index == "v":
target_x = self.v
elif x_index == "r":
target_x = self.r
elif x_index == "x":
target_x = self.x
elif x_index == "y":
target_x = self.y
elif x_index == "psi":
target_x = self.psi
elif x_index == "delta":
target_x = self.ฮด
elif x_index == "ฮด":
target_x = self.ฮด
elif x_index == "npm":
target_x = self.npm
if target_x is None:
raise Exception(
"`x_index` is not good. Please set `x_index` from ["
"time"
", "
" u"
", "
" v"
", "
" r"
", "
" x"
", "
" y"
", "
" psi"
", "
" delta"
", "
" ฮด"
", "
" npm"
"]"
)
target_y = None
if y_index == "time":
target_y = self.time
elif y_index == "u":
target_y = self.u
elif y_index == "v":
target_y = self.v
elif y_index == "r":
target_y = self.r
elif y_index == "x":
target_y = self.x
elif y_index == "y":
target_y = self.y
elif y_index == "psi":
target_y = self.psi
elif y_index == "delta":
target_y = self.ฮด
elif y_index == "ฮด":
target_y = self.ฮด
elif y_index == "npm":
target_y = self.npm
if target_y is None:
raise Exception(
"`y_index` is not good. Please set `y_index` from ["
"time"
", "
" u"
", "
" v"
", "
" r"
", "
" x"
", "
" y"
", "
" psi"
", "
" delta"
", "
" ฮด"
", "
" npm"
"]"
"]"
)
fig = plt.figure(
num=num,
figsize=figsize,
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
frameon=frameon,
FigureClass=FigureClass,
clear=clear,
tight_layout=tight_layout,
constrained_layout=constrained_layout,
)
if xlabel is not None:
plt.xlabel(xlabel)
if ylabel is not None:
plt.ylabel(ylabel)
if fmt is None:
plt.plot(target_x, target_y, **kwargs)
else:
plt.plot(target_x, target_y, fmt, **kwargs)
if save_fig_path is not None:
plt.savefig(save_fig_path)
plt.close()
return fig
def draw_multi_x_chart(
self,
x_index_list: List[str],
y_index: str,
xlabel: str = None,
ylabel: str = None,
num: int or str = None,
figsize: List[float] = [6.4, 4.8],
dpi: float = 100.0,
facecolor: str = None,
edgecolor: str = None,
frameon: bool = True,
FigureClass: matplotlib.figure.Figure = matplotlib.figure.Figure,
clear: bool = False,
tight_layout: bool = False,
constrained_layout: bool = False,
fmt: str = None,
save_fig_path: str = None,
**kwargs
) -> plt.Figure:
"""Draw chart of multiple Y variables.
Args:
x_index_list (List[string]):
List of index value of X axis.
y_index (string):
Index value of Y axis.
xlabel (string, optional):
Label of X axis.
Defaults to None.
ylabel (string, optional):
Label of Y axis.
Defaults to None.
num (int or str, optional):
A unique identifier for the figure.
If a figure with that identifier already exists, this figure is made active and returned.
An integer refers to the Figure.number attribute, a string refers to the figure label.
If there is no figure with the identifier or num is not given,
a new figure is created, made active and returned.
If num is an int, it will be used for the Figure.number attribute.
Otherwise, an auto-generated integer value is used (starting at 1 and incremented for each new figure).
If num is a string, the figure label and the window title is set to this value.
Default to None.
figsize ((float, float), optional):
Width, height in inches.
Default to [6.4, 4.8]
dpi (float, optional):
The resolution of the figure in dots-per-inch.
Default to 100.0.
facecolor (str, optional):
The background color.
edgecolor (str, optional):
The border color.
frameon (bool, optional):
If False, suppress drawing the figure frame.
Defaults to True.
FigureClass (subclass of matplotlib.figure.Figure, optional):
Optionally use a custom Figure instance.
Defaults to matplotlib.figure.Figure.
clear (bool, optional):
If True and the figure already exists, then it is cleared.
Defaults to False.
tight_layout (bool, optional):
If False use subplotpars.
If True adjust subplot parameters using tight_layout with default padding.
When providing a dict containing the keys pad, w_pad, h_pad, and rect,
the default tight_layout paddings will be overridden.
Defaults to False.
constrained_layout (bool, optional):
If True use constrained layout to adjust positioning of plot elements.
Like tight_layout, but designed to be more flexible.
See Constrained Layout Guide for examples.
(Note: does not work with add_subplot or subplot2grid.)
Defaults to False.
fmt (str, optional):
A format string, e.g. 'ro' for red circles.
See the Notes section for a full description of the format strings.
Format strings are just an abbreviation for quickly setting basic line properties.
All of these and more can also be controlled by keyword arguments.
This argument cannot be passed as keyword.
Defaults to None.
save_fig_path (str, optional):
Path of saving figure.
Defaults to None.
**kwargs (matplotlib.lines.Line2D properties, optional):
kwargs are used to specify properties
like a line label (for auto legends), linewidth, antialiasing, marker face color.
You can show the detailed information at `matplotlib.lines.Line2D
<https://matplotlib.org/3.3.3/api/_as_gen/matplotlib.lines.Line2D.html#matplotlib.lines.Line2D>`_
Returns:
matplotlib.pyplot.Figure: Figure
Examples:
>>> ship.draw_chart("time", "r", xlabel="time [sec]", \
>>> ylabel=r"$u$" + " [rad/s]",save_fig_path='test.png')
"""
target_y = None
if y_index == "time":
target_y = self.time
elif y_index == "u":
target_y = self.u
elif y_index == "v":
target_y = self.v
elif y_index == "r":
target_y = self.r
elif y_index == "x":
target_y = self.x
elif y_index == "y":
target_y = self.y
elif y_index == "psi":
target_y = self.psi
elif y_index == "delta":
target_y = self.ฮด
elif y_index == "ฮด":
target_y = self.ฮด
elif y_index == "npm":
target_y = self.npm
if target_y is None:
raise Exception(
"`x_index` is not good. Please set `x_index` from ["
"time"
", "
" u"
", "
" v"
", "
" r"
", "
" x"
", "
" y"
", "
" psi"
", "
" delta"
", "
" ฮด"
", "
" npm"
"]"
)
target_x_list = []
for x_index in x_index_list:
if x_index == "time":
target_x_list.append(self.time)
elif x_index == "u":
target_x_list.append(self.u)
elif x_index == "v":
target_x_list.append(self.v)
elif x_index == "r":
target_x_list.append(self.r)
elif x_index == "x":
target_x_list.append(self.x)
elif x_index == "y":
target_x_list.append(self.y)
elif x_index == "psi":
target_x_list.append(self.psi)
elif x_index == "delta":
target_x_list.append(self.ฮด)
elif x_index == "ฮด":
target_x_list.append(self.ฮด)
elif x_index == "npm":
target_x_list.append(self.npm)
if len(target_x_list) == 0:
raise Exception(
"`y_index` is not good. Please set `y_index` from ["
"time"
", "
" u"
", "
" v"
", "
" r"
", "
" x"
", "
" y"
", "
" psi"
", "
" delta"
", "
" ฮด"
", "
" npm"
"]"
"]"
)
fig = plt.figure(
num=num,
figsize=figsize,
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
frameon=frameon,
FigureClass=FigureClass,
clear=clear,
tight_layout=tight_layout,
constrained_layout=constrained_layout,
)
if xlabel is not None:
plt.xlabel(xlabel)
if ylabel is not None:
plt.ylabel(ylabel)
if fmt is None:
for target_x in target_x_list:
plt.plot(target_x, target_y, **kwargs)
else:
for target_x in target_x_list:
plt.plot(target_x, target_y, fmt, **kwargs)
if save_fig_path is not None:
plt.savefig(save_fig_path)
plt.close()
return fig
def draw_multi_y_chart(
self,
x_index: str,
y_index_list: List[str],
xlabel: str = None,
ylabel: str = None,
num: int or str = None,
figsize: List[float] = [6.4, 4.8],
dpi: float = 100.0,
facecolor: str = None,
edgecolor: str = None,
frameon: bool = True,
FigureClass: matplotlib.figure.Figure = matplotlib.figure.Figure,
clear: bool = False,
tight_layout: bool = False,
constrained_layout: bool = False,
fmt: str = None,
save_fig_path: str = None,
**kwargs
) -> plt.Figure:
"""Draw chart of multiple Y variables.
Args:
x_index (string):
Index value of X axis.
y_index_list (List[string]):
List of index value of Y axis.
xlabel (string, optional):
Label of X axis.
Defaults to None.
ylabel (string, optional):
Label of Y axis.
Defaults to None.
num (int or str, optional):
A unique identifier for the figure.
If a figure with that identifier already exists, this figure is made active and returned.
An integer refers to the Figure.number attribute, a string refers to the figure label.
If there is no figure with the identifier or num is not given,
a new figure is created, made active and returned.
If num is an int, it will be used for the Figure.number attribute.
Otherwise, an auto-generated integer value is used (starting at 1 and incremented for each new figure).
If num is a string, the figure label and the window title is set to this value.
Default to None.
figsize ((float, float), optional):
Width, height in inches.
Default to [6.4, 4.8]
dpi (float, optional):
The resolution of the figure in dots-per-inch.
Default to 100.0.
facecolor (str, optional):
The background color.
edgecolor (str, optional):
The border color.
frameon (bool, optional):
If False, suppress drawing the figure frame.
Defaults to True.
FigureClass (subclass of matplotlib.figure.Figure, optional):
Optionally use a custom Figure instance.
Defaults to matplotlib.figure.Figure.
clear (bool, optional):
If True and the figure already exists, then it is cleared.
Defaults to False.
tight_layout (bool, optional):
If False use subplotpars.
If True adjust subplot parameters using tight_layout with default padding.
When providing a dict containing the keys pad, w_pad, h_pad, and rect,
the default tight_layout paddings will be overridden.
Defaults to False.
constrained_layout (bool, optional):
If True use constrained layout to adjust positioning of plot elements.
Like tight_layout, but designed to be more flexible.
See Constrained Layout Guide for examples.
(Note: does not work with add_subplot or subplot2grid.)
Defaults to False.
fmt (str, optional):
A format string, e.g. 'ro' for red circles.
See the Notes section for a full description of the format strings.
Format strings are just an abbreviation for quickly setting basic line properties.
All of these and more can also be controlled by keyword arguments.
This argument cannot be passed as keyword.
Defaults to None.
save_fig_path (str, optional):
Path of saving figure.
Defaults to None.
**kwargs (matplotlib.lines.Line2D properties, optional):
kwargs are used to specify properties
like a line label (for auto legends), linewidth, antialiasing, marker face color.
You can show the detailed information at `matplotlib.lines.Line2D
<https://matplotlib.org/3.3.3/api/_as_gen/matplotlib.lines.Line2D.html#matplotlib.lines.Line2D>`_
Returns:
matplotlib.pyplot.Figure: Figure
Examples:
>>> ship.draw_chart("time", "r", xlabel="time [sec]", \
>>> ylabel=r"$u$" + " [rad/s]",save_fig_path='test.png')
"""
target_x = None
if x_index == "time":
target_x = self.time
elif x_index == "u":
target_x = self.u
elif x_index == "v":
target_x = self.v
elif x_index == "r":
target_x = self.r
elif x_index == "x":
target_x = self.x
elif x_index == "y":
target_x = self.y
elif x_index == "psi":
target_x = self.psi
elif x_index == "delta":
target_x = self.ฮด
elif x_index == "ฮด":
target_x = self.ฮด
elif x_index == "npm":
target_x = self.npm
if target_x is None:
raise Exception(
"`x_index` is not good. Please set `x_index` from ["
"time"
", "
" u"
", "
" v"
", "
" r"
", "
" x"
", "
" y"
", "
" psi"
", "
" delta"
", "
" ฮด"
", "
" npm"
"]"
)
target_y_list = []
for y_index in y_index_list:
if y_index == "time":
target_y_list.append(self.time)
elif y_index == "u":
target_y_list.append(self.u)
elif y_index == "v":
target_y_list.append(self.v)
elif y_index == "r":
target_y_list.append(self.r)
elif y_index == "x":
target_y_list.append(self.x)
elif y_index == "y":
target_y_list.append(self.y)
elif y_index == "psi":
target_y_list.append(self.psi)
elif y_index == "delta":
target_y_list.append(self.ฮด)
elif y_index == "ฮด":
target_y_list.append(self.ฮด)
elif y_index == "npm":
target_y_list.append(self.npm)
if len(target_y_list) == 0:
raise Exception(
"`y_index` is not good. Please set `y_index` from ["
"time"
", "
" u"
", "
" v"
", "
" r"
", "
" x"
", "
" y"
", "
" psi"
", "
" delta"
", "
" ฮด"
", "
" npm"
"]"
"]"
)
fig = plt.figure(
num=num,
figsize=figsize,
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
frameon=frameon,
FigureClass=FigureClass,
clear=clear,
tight_layout=tight_layout,
constrained_layout=constrained_layout,
)
if xlabel is not None:
plt.xlabel(xlabel)
if ylabel is not None:
plt.ylabel(ylabel)
if fmt is None:
for target_y in target_y_list:
plt.plot(target_x, target_y, **kwargs)
else:
for target_y in target_y_list:
plt.plot(target_x, target_y, fmt, **kwargs)
if save_fig_path is not None:
plt.savefig(save_fig_path)
plt.close()
return fig
def draw_gif(
self,
dimensionless: bool = False,
aspect_equal: bool = True,
frate: int = 10,
interval: int = 100,
num: int or str = None,
figsize: List[float] = [6.4, 4.8],
dpi: float = 100.0,
facecolor: str = None,
edgecolor: str = None,
frameon: bool = True,
FigureClass: matplotlib.figure.Figure = matplotlib.figure.Figure,
clear: bool = False,
tight_layout: bool = False,
constrained_layout: bool = False,
fmt: str = "--k",
save_fig_path: str = None,
**kwargs
) -> plt.Figure:
"""Draw GIF of ship trajectory
Args:
dimensionless (bool, optional):
drawing with dimensionless by using L or not.
Defaults to False
aspect_equal (bool, optional):
Set equal of figure aspect or not.
Defaults to True.
frate (int, optional):
One of the parameter of `frames` in matplotlib.FuncAnimation().
`frames` expresses source of data to pass func and each frame of the animation.
`frames = int (len(time) / frate)`
Defaults to 10.
interval (int, optional):
Delay between frames in milliseconds.
Defaults to 100.
num (int or str, optional):
A unique identifier for the figure.
If a figure with that identifier already exists, this figure is made active and returned.
An integer refers to the Figure.number attribute, a string refers to the figure label.
If there is no figure with the identifier or num is not given,
a new figure is created, made active and returned.
If num is an int, it will be used for the Figure.number attribute.
Otherwise, an auto-generated integer value is used (starting at 1 and incremented for each new figure).
If num is a string, the figure label and the window title is set to this value.
Default to None.
figsize ((float, float), optional):
Width, height in inches.
Default to [6.4, 4.8]
dpi (float, optional):
The resolution of the figure in dots-per-inch.
Default to 100.0.
facecolor (str, optional):
The background color.
edgecolor (str, optional):
The border color.
frameon (bool, optional):
If False, suppress drawing the figure frame.
Defaults to True.
FigureClass (subclass of matplotlib.figure.Figure, optional):
Optionally use a custom Figure instance.
Defaults to matplotlib.figure.Figure.
clear (bool, optional):
If True and the figure already exists, then it is cleared.
Defaults to False.
tight_layout (bool, optional):
If False use subplotpars.
If True adjust subplot parameters using tight_layout with default padding.
When providing a dict containing the keys pad, w_pad, h_pad, and rect,
the default tight_layout paddings will be overridden.
Defaults to False.
constrained_layout (bool, optional):
If True use constrained layout to adjust positioning of plot elements.
Like tight_layout, but designed to be more flexible.
See Constrained Layout Guide for examples.
(Note: does not work with add_subplot or subplot2grid.)
Defaults to False.
fmt (str, optional):
A format string, e.g. 'ro' for red circles.
See the Notes section for a full description of the format strings.
Format strings are just an abbreviation for quickly setting basic line properties.
All of these and more can also be controlled by keyword arguments.
This argument cannot be passed as keyword.
Defaults to "--k".
save_fig_path (str, optional):
Path of saving figure.
Defaults to None.
**kwargs (matplotlib.lines.Line2D properties, optional):
kwargs are used to specify properties
like a line label (for auto legends), linewidth, antialiasing, marker face color.
You can show the detailed information at `matplotlib.lines.Line2D
<https://matplotlib.org/3.3.3/api/_as_gen/matplotlib.lines.Line2D.html#matplotlib.lines.Line2D>`_
Examples:
>>> ship.draw_gif(save_fig_path='test.gif')
"""
fig = plt.figure(
num=num,
figsize=figsize,
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
frameon=frameon,
FigureClass=FigureClass,
clear=clear,
tight_layout=tight_layout,
constrained_layout=constrained_layout,
)
ax = fig.add_subplot(111)
if dimensionless:
draw_x = np.array(self.x) / self.L
draw_y = np.array(self.y) / self.L
ax.set_xlabel(r"$x/L$")
ax.set_ylabel(r"$y/L$")
shape = (1 / 2, self.B / (2 * self.L))
else:
draw_x = np.array(self.x)
draw_y = np.array(self.y)
ax.set_xlabel(r"$x$")
ax.set_ylabel(r"$y$")
shape = (self.L / 2, self.B / 2)
if fmt is not None:
plt.plot(draw_x, draw_y, fmt, **kwargs)
else:
plt.plot(draw_x, draw_y, ls="--", color="k", **kwargs)
if aspect_equal:
ax.set_aspect("equal")
drawer = DrawObj(ax)
def update_obj(i, x_list, y_list, shape_list, ฯ_list, frate):
j = int(frate * i)
plt.title(r"$t$ = " + "{:.1f}".format(self.time[j]))
xT = np.array(x_list).T
_x_list_j = list(xT[j].T)
yT = np.array(y_list).T
_y_list_j = list(yT[j].T)
ฯT = np.array(ฯ_list).T
_ฯ_list_j = list(ฯT[j].T)
return drawer.draw_obj_with_angle(
_x_list_j, _y_list_j, shape_list, _ฯ_list_j
)
ani = FuncAnimation(
fig,
update_obj,
fargs=(
[draw_x],
[draw_y],
[shape],
[self.psi],
frate,
),
interval=interval,
frames=int(len(self.time) / frate),
)
gif = ani.save(save_fig_path, writer="pillow")
plt.close()
return gif
|
StarcoderdataPython
|
5030524
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
from knack.log import get_logger
from knack.util import CLIError
from azure.cli.command_modules.appservice.custom import (
show_webapp,
_get_site_credential,
_get_scm_url)
logger = get_logger(__name__)
def start_scan(cmd, resource_group_name, name, timeout="", slot=None):
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise CLIError("Only Linux App Service Plans supported, Found a Windows App Service Plan")
import requests
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
start_scan_url = scm_url + '/api/scan/start?timeout=' + timeout
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers = authorization
headers['content-type'] = 'application/octet-stream'
response = requests.get(start_scan_url, headers=authorization)
return response.json()
def get_scan_result(cmd, resource_group_name, name, scan_id, slot=None):
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise CLIError("Only Linux App Service Plans supported, Found a Windows App Service Plan")
import requests
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
scan_result_url = scm_url + '/api/scan/' + scan_id + '/result'
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers = authorization
headers['content-type'] = 'application/octet-stream'
response = requests.get(scan_result_url, headers=authorization)
return response.json()
def track_scan(cmd, resource_group_name, name, scan_id, slot=None):
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise CLIError("Only Linux App Service Plans supported, Found a Windows App Service Plan")
import requests
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
scan_result_url = scm_url + '/api/scan/' + scan_id + '/track'
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers = authorization
headers['content-type'] = 'application/octet-stream'
response = requests.get(scan_result_url, headers=authorization)
return response.json()
def get_all_scan_result(cmd, resource_group_name, name, slot=None):
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise CLIError("Only Linux App Service Plans supported, Found a Windows App Service Plan")
import requests
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
scan_result_url = scm_url + '/api/scan/results'
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers = authorization
headers['content-type'] = 'application/octet-stream'
response = requests.get(scan_result_url, headers=authorization)
return response.json()
def stop_scan(cmd, resource_group_name, name, slot=None):
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise CLIError("Only Linux App Service Plans supported, Found a Windows App Service Plan")
import requests
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
stop_scan_url = scm_url + '/api/scan/stop'
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers = authorization
headers['content-type'] = 'application/octet-stream'
requests.delete(stop_scan_url, headers=authorization)
|
StarcoderdataPython
|
6557268
|
from typing import List
# ้็บใขใธใฅใผใซ
from app.infra.db.base_repo import BaseRepo
from app.models.pets import PetSummary
class PetsRepo():
def __init__(self):
self.base_repo = BaseRepo()
def get_pets(self) -> List[PetSummary]:
sql = """
SELECT pet_id, name, type FROM pets;
"""
try:
self.base_repo.execute(sql=sql)
rows = self.base_repo.fetchall()
pet_summaries = []
for row in rows:
pet_summaries.append(
PetSummary(pet_id=row['pet_id'],
name=row['name'],
type=row['type']))
return pet_summaries
except Exception as e:
self.base_repo.exception_handler(e)
raise e
finally:
self.base_repo.clean_up()
|
StarcoderdataPython
|
1849856
|
<filename>Python/067.py
# -*- coding: utf-8 -*-
"""
Solution to Project Euler problem 67
Date: 16 Apr 2015
This code is the same as for problem 18, with a slight difference in the way
file is read.
Author: <NAME>
https://github.com/jaimeliew1/Project_Euler_Solutions
"""
def run():
# Read data file
data = []
with open('Data/p067_triangle.txt') as f:
for line in f:
data.append([int(x) for x in line.split(' ')])
#starts from the bottom of the pyramid and works upwards to create
#a cumulitive sum pyramid, where the value at each point is the maximum
#value of possible sums below that point
for n in range(len(data) - 2, -1, -1):
for i, num in enumerate(data[n]):
data[n][i] += max(data[n+1][i], data[n+1][i+1])
return data[0][0]
if __name__ == '__main__':
print(run())
|
StarcoderdataPython
|
5045216
|
<reponame>proteneer/timemachine
# tests for parallel execution
import numpy as np
import random
from tempfile import NamedTemporaryFile
import parallel
from parallel import client, worker
from parallel.utils import get_gpu_count
import os
import unittest
from unittest.mock import patch
import grpc
import concurrent
import jax.numpy as jnp
def jax_fn(x):
return jnp.sqrt(x)
def square(a):
return a * a
def mult(x, y):
return x * y
class TestProcessPool(unittest.TestCase):
def setUp(self):
max_workers = 10
self.cli = client.ProcessPoolClient(max_workers)
def test_submit(self):
arr = np.linspace(0, 1.0, 5)
futures = []
for x in arr:
fut = self.cli.submit(square, x)
futures.append(fut)
test_res = []
for f in futures:
test_res.append(f.result())
np.testing.assert_array_equal(test_res, arr * arr)
def test_jax(self):
# (ytz): test that jax code can be launched via multiprocessing
# if we didn't set get_context('spawn') earlier then this will hang.
x = jnp.array([50.0, 2.0])
fut = self.cli.submit(jax_fn, x)
np.testing.assert_almost_equal(fut.result(), np.sqrt(x))
def environ_check():
return os.environ["CUDA_VISIBLE_DEVICES"]
class TestGPUCount(unittest.TestCase):
@patch("parallel.utils.check_output")
def test_get_gpu_count(self, mock_output):
mock_output.return_value = b"\n".join([f"GPU #{i}".encode() for i in range(5)])
assert parallel.utils.get_gpu_count() == 5
mock_output.return_value = b"\n".join([f"GPU #{i}".encode() for i in range(100)])
assert parallel.utils.get_gpu_count() == 100
mock_output.side_effect = FileNotFoundError("nvidia-smi missing")
with self.assertRaises(FileNotFoundError):
parallel.utils.get_gpu_count()
class TestCUDAPoolClient(unittest.TestCase):
def setUp(self):
self.max_workers = get_gpu_count()
self.cli = client.CUDAPoolClient(self.max_workers)
def test_submit(self):
operations = 10
futures = []
for _ in range(operations):
fut = self.cli.submit(environ_check)
futures.append(fut)
test_res = []
for f in futures:
test_res.append(f.result())
expected = [str(i % self.max_workers) for i in range(operations)]
np.testing.assert_array_equal(test_res, expected)
def test_too_many_workers(self):
# I look forward to the day that we have 814 GPUs
cli = client.CUDAPoolClient(814)
with self.assertRaises(AssertionError):
cli.verify()
class TestGRPCClient(unittest.TestCase):
def setUp(self):
starting_port = random.randint(2000, 5000)
# setup server, in reality max_workers is equal to number of gpus
self.ports = [starting_port + i for i in range(2)]
self.servers = []
for port in self.ports:
server = grpc.server(
concurrent.futures.ThreadPoolExecutor(max_workers=1),
options=[
("grpc.max_send_message_length", 50 * 1024 * 1024),
("grpc.max_receive_message_length", 50 * 1024 * 1024),
],
)
parallel.grpc.service_pb2_grpc.add_WorkerServicer_to_server(worker.Worker(), server)
server.add_insecure_port("[::]:" + str(port))
server.start()
self.servers.append(server)
# setup client
self.hosts = [f"0.0.0.0:{port}" for port in self.ports]
self.cli = client.GRPCClient(self.hosts)
@patch("parallel.worker.get_worker_status")
def test_checking_host_status(self, mock_status):
# All the workers return the same thing
mock_status.side_effect = [
parallel.grpc.service_pb2.StatusResponse(nvidia_driver="foo", git_sha="bar") for _ in self.servers
]
self.cli.verify()
mock_status.side_effect = [
parallel.grpc.service_pb2.StatusResponse(nvidia_driver=f"foo{i}", git_sha=f"bar{i}")
for i in range(len(self.servers))
]
with self.assertRaises(AssertionError):
self.cli.verify()
@patch("parallel.worker.get_worker_status")
def test_unavailable_host(self, mock_status):
hosts = self.hosts.copy()
bad_host = "192.168.127.12:8888"
hosts.append(bad_host) # Give it a bad connexion, should fail
cli = client.GRPCClient(hosts)
# All the workers return the same thing
mock_status.side_effect = [
parallel.grpc.service_pb2.StatusResponse(nvidia_driver="foo", git_sha="bar") for _ in self.servers
]
with self.assertRaises(AssertionError) as e:
cli.verify()
self.assertIn(bad_host, str(e.exception))
def test_default_port(self):
host = "192.168.127.12"
cli = client.GRPCClient([host], default_port=9999)
self.assertEqual(cli.hosts[0], "192.168.127.12:9999")
def test_hosts_from_file(self):
with self.assertRaises(AssertionError):
cli = client.GRPCClient("nosuchfile", default_port=9999)
hosts = ["192.168.127.12", "127.127.127.127:8888"]
with NamedTemporaryFile(suffix=".txt") as temp:
for host in hosts:
temp.write(f"{host}\n".encode("utf-8"))
temp.flush()
cli = client.GRPCClient(temp.name, default_port=9999)
self.assertEqual(cli.hosts[0], "192.168.127.12:9999")
self.assertEqual(cli.hosts[1], hosts[1])
def test_foo_2_args(self):
xs = np.linspace(0, 1.0, 5)
ys = np.linspace(1.2, 2.2, 5)
futures = []
for x, y in zip(xs, ys):
fut = self.cli.submit(mult, x, y)
futures.append(fut)
test_res = []
for f in futures:
test_res.append(f.result())
np.testing.assert_array_equal(test_res, xs * ys)
def test_foo_1_arg(self):
xs = np.linspace(0, 1.0, 5)
futures = []
for x in xs:
fut = self.cli.submit(square, x)
futures.append(fut)
test_res = []
for f in futures:
test_res.append(f.result())
np.testing.assert_array_equal(test_res, xs * xs)
def tearDown(self):
for server in self.servers:
server.stop(5)
|
StarcoderdataPython
|
3323194
|
<gh_stars>10-100
# Generated by Django 2.2.13 on 2020-07-28 20:28
from django.db import migrations
UPDATE_INTENDED_WATER_USE_CODES = """
UPDATE well SET intended_water_use_code = 'NA'
WHERE well_class_code = 'MONITOR' and intended_water_use_code = 'UNK';
UPDATE well SET
intended_water_use_code = 'NA',
well_class_code = 'MONITOR',
well_subclass_guid = subquery.well_subclass_guid
FROM (select well_subclass_guid from well_subclass_code where well_subclass_code = 'PERMANENT' and well_class_code = 'MONITOR') as subquery
WHERE observation_well_number is not null and obs_well_status_code is not null;
UPDATE well SET intended_water_use_code = 'NA'
WHERE well_class_code = 'MONITOR' and intended_water_use_code = 'OBS'
and observation_well_number is null and obs_well_status_code is null;
"""
class Migration(migrations.Migration):
dependencies = [
('wells', '0120_update_water_supply_codes'),
]
operations = [
migrations.RunSQL([
UPDATE_INTENDED_WATER_USE_CODES
]),
]
|
StarcoderdataPython
|
4944991
|
<gh_stars>0
import time
import os
import requests
import json
from datetime import datetime
from flask_sqlalchemy import SQLAlchemy
from intruo import Intruo, IntruoConfiguration, IntruoModules
from flask import Flask, render_template, jsonify, request, send_file
from nanoid import generate
DEBUG_INTRUO = True
template_folder = os.path.join(os.getcwd(), 'web', 'templates')
static_folder = os.path.join(os.getcwd(), 'web', 'static')
app = Flask(
import_name='__name__',
template_folder=template_folder,
static_url_path='/static',
static_folder=static_folder,
)
app.config['SECRET_KEY'] = 'INTRUO_SECRET_KEY'
app.config ['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///intruo.db'
db = SQLAlchemy(app)
class IntruoDB(db.Model):
id = db.Column(db.Integer, primary_key = True)
public_id = db.Column(db.Text())
domain = db.Column(db.Text())
time_init = db.Column(db.Text())
time_end = db.Column(db.Text())
result = db.Column(db.Text())
def __init__(self, public_id, domain, time_init, time_end, result):
self.public_id = public_id
self.domain = domain
self.time_init = time_init
self.time_end = time_end
self.result = result
db.create_all()
# Routing Main
@app.route('/', methods=['GET'])
def main():
return render_template('main.html')
# Routing Result
@app.route('/resultado/<public_id>', methods=['GET'])
def result(public_id):
data = IntruoDB.query.filter_by(public_id=public_id).first()
result = json.loads(data.result)
return render_template('result.html', result=result)
# Routing API
@app.route('/api/check_configuration', methods=['GET'])
def api_configuration_check():
configuration = IntruoConfiguration.check_configuration()
for item in configuration:
if configuration[item]['result'] == False:
return jsonify(configuration), 400
return jsonify(configuration)
@app.route('/api/configuration/install/driver', methods=['POST'])
def api_configuration_install_driver():
data = request.files
if not 'driver' in data:
return jsonify({'error': 'No podemos instalar el driver. Intenta reinstalando INTRUO.'}), 400
driver = request.files['driver']
driver.save(os.path.join(os.getcwd(), 'utils', 'chromedriver.exe'))
return jsonify(True)
@app.route('/api/modules', methods=['GET'])
def api_modules():
result = []
for module in IntruoModules:
result.append(module.value.split('__')[1].replace('_', ' '))
return jsonify(result)
@app.route('/api/module/page_online', methods=['POST'])
def api_module_is_up():
data = request.json
domain = data['domain']
try:
r = requests.get(domain)
except requests.exceptions.RequestException as e:
print(e)
return jsonify('La pรกgina no estรก disponbile.'), 400
return jsonify(True)
@app.route('/api/modules/run', methods=['POST'])
def api_module_run():
data = request.json
domain = data['domain']
modules = data['modules']
intruo = Intruo(domain=domain, debug=DEBUG_INTRUO)
intruo.module__https()
for module in modules:
module = module.replace(' ', '_')
module = f'module__{module.lower()}'
getattr(intruo, module)()
save_result = intruo.action_generate_results()
result = intruo.result
intruo_record = IntruoDB(
public_id=generate('1234567890abcdefghijkmnopqrstuvwxyz', 10),
domain=domain,
time_init=result['time_execution']['init'],
time_end=result['time_execution']['end'],
result=json.dumps(save_result)
)
db.session.add(intruo_record)
db.session.commit()
return jsonify(intruo_record.public_id)
@app.route('/api/module/screenshot', methods=['POST'])
def api_module_screenshot():
data = request.json
domain = data['domain']
intruo = Intruo(domain=domain, debug=DEBUG_INTRUO)
filename = intruo.action_get_screenshoot()
return jsonify(filename)
@app.route('/api/download/json/<public_id>', methods=['GET'])
def api_download_json_result(public_id):
data = IntruoDB.query.filter_by(public_id=public_id).first()
json_file = json.loads(data.result)['json']
return send_file(os.path.join(os.getcwd(), 'web', 'static', 'results', 'json', json_file), as_attachment=True, download_name=json_file)
@app.route('/api/download/html/<public_id>', methods=['GET'])
def api_download_html_result(public_id):
data = IntruoDB.query.filter_by(public_id=public_id).first()
html_file = json.loads(data.result)['html']
return send_file(os.path.join(os.getcwd(), 'web', 'static', 'results', 'html', html_file), as_attachment=True, download_name=html_file)
@app.route('/api/scan/history', methods=['GET'])
def api_scan_history():
history = IntruoDB.query.all()
result = []
for row in history:
result.append({
'public_id': row.public_id,
'domain': row.domain,
'time_init': row.time_init,
'time_end': row.time_end,
'result': json.loads(row.result)
})
return jsonify(result)
@app.route('/api/scan/delete/<public_id>', methods=['DELETE'])
def api_scan_delete(public_id):
data = IntruoDB.query.filter_by(public_id=public_id).first()
data_files = json.loads(data.result)
files = {
'screenshot': os.path.join(os.getcwd(), 'web', 'static', 'results', 'screenshot', data_files['screenshot']),
'json': os.path.join(os.getcwd(), 'web', 'static', 'results', 'json', data_files['json']),
'js': os.path.join(os.getcwd(), 'web', 'static', 'results', 'js', data_files['js']),
'html': os.path.join(os.getcwd(), 'web', 'static', 'results', 'html', data_files['html']),
}
for f in files:
if os.path.exists(files[f]):
os.remove(files[f])
db.session.delete(data)
db.session.commit()
return jsonify(True)
if __name__ == "__main__":
# openedIntruo = False
# if not openedIntruo:
# Intruo.action_open_browser('http://127.0.0.1:5000/')
# openedIntruo = True
# app.run(debug=True, use_reloader=False)
app.run(debug=True, use_reloader=True)
|
StarcoderdataPython
|
5029443
|
<filename>tensorflow_addons/losses/sparsemax_loss_test.py<gh_stars>0
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
from tensorflow_addons.activations import sparsemax
from tensorflow_addons.losses import sparsemax_loss, SparsemaxLoss
from tensorflow_addons.utils import test_utils
test_obs = 17
def _np_sparsemax(z):
z = z - np.mean(z, axis=1)[:, np.newaxis]
# sort z
z_sorted = np.sort(z, axis=1)[:, ::-1]
# calculate k(z)
z_cumsum = np.cumsum(z_sorted, axis=1)
k = np.arange(1, z.shape[1] + 1)
z_check = 1 + k * z_sorted > z_cumsum
# use argmax to get the index by row as .nonzero() doesn't
# take an axis argument. np.argmax return the first index, but the last
# index is required here, use np.flip to get the last index and
# `z.shape[axis]` to compensate for np.flip afterwards.
k_z = z.shape[1] - np.argmax(z_check[:, ::-1], axis=1)
# calculate tau(z)
tau_sum = z_cumsum[np.arange(0, z.shape[0]), k_z - 1]
tau_z = ((tau_sum - 1) / k_z).reshape(-1, 1)
# calculate p
return np.maximum(0, z - tau_z)
def _np_sparsemax_loss(z, q):
z = z - np.mean(z, axis=1)[:, np.newaxis]
# Calculate q^T * z
z_k = np.sum(q * z, axis=1)
# calculate sum over S(z)
p = _np_sparsemax(z)
s = p > 0
# z_i^2 - tau(z)^2 = p_i (2 * z_i - p_i) for i \in S(z)
S_sum = np.sum(s * p * (2 * z - p), axis=1)
# because q is binary, sum([q_1^2, q_2^2, ...]) is just sum(q)
q_norm = np.sum(q, axis=1)
return -z_k + 0.5 * S_sum + 0.5 * q_norm
@test_utils.run_all_with_types(['float32', 'float64'])
@test_utils.run_all_in_graph_and_eager_modes
class SparsemaxTest(tf.test.TestCase):
def _tf_sparsemax(self, z, dtype):
tf_sparsemax_op = sparsemax(z.astype(dtype))
tf_sparsemax_out = self.evaluate(tf_sparsemax_op)
return tf_sparsemax_op, tf_sparsemax_out
def _tf_sparsemax_loss(self, z, q, dtype):
z = z.astype(dtype)
q = q.astype(dtype)
tf_sparsemax_op = sparsemax(z)
tf_loss_op = sparsemax_loss(z, tf_sparsemax_op, q)
tf_loss_out = self.evaluate(tf_loss_op)
return tf_loss_op, tf_loss_out
def test_sparsemax_loss_constructor_aginst_numpy(self, dtype=None):
"""check sparsemax-loss construcor against numpy."""
random = np.random.RandomState(1)
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
q = np.zeros((test_obs, 10))
q[np.arange(0, test_obs), random.randint(0, 10, size=test_obs)] = 1
loss_object = SparsemaxLoss()
tf_loss_op = loss_object(q, z) # pylint: disable=not-callable
tf_loss_out = self.evaluate(tf_loss_op)
np_loss = np.mean(_np_sparsemax_loss(z, q).astype(dtype))
self.assertAllCloseAccordingToType(np_loss, tf_loss_out)
self.assertShapeEqual(np_loss, tf_loss_op)
def test_sparsemax_loss_constructor_not_from_logits(self, dtype=None):
"""check sparsemax-loss construcor throws when from_logits=True."""
self.assertRaises(ValueError, lambda: SparsemaxLoss(from_logits=False))
def test_sparsemax_loss_against_numpy(self, dtype=None):
"""check sparsemax-loss kernel against numpy."""
random = np.random.RandomState(1)
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
q = np.zeros((test_obs, 10))
q[np.arange(0, test_obs), random.randint(0, 10, size=test_obs)] = 1
tf_loss_op, tf_loss_out = self._tf_sparsemax_loss(z, q, dtype)
np_loss = _np_sparsemax_loss(z, q).astype(dtype)
self.assertAllCloseAccordingToType(np_loss, tf_loss_out)
self.assertShapeEqual(np_loss, tf_loss_op)
def test_sparsemax_loss_of_nan(self, dtype=None):
"""check sparsemax-loss transfers nan."""
random = np.random.RandomState(2)
q = np.asarray([[0, 0, 1], [0, 0, 1], [0, 0, 1]])
z_nan = np.asarray([[0, np.nan, 0], [0, np.nan, np.nan],
[np.nan, np.nan, np.nan]]).astype(dtype)
_, tf_loss_nan = self._tf_sparsemax_loss(z_nan, q, dtype)
self.assertAllEqual([np.nan, np.nan, np.nan], tf_loss_nan)
def test_sparsemax_loss_of_inf(self, dtype=None):
"""check sparsemax-loss is infinity safe."""
random = np.random.RandomState(3)
q = np.asarray([[0, 0, 1], [0, 0, 1], [0, 0, 1], [0, 0, 1]])
z_neg = np.asarray([
[0, -np.inf, 0],
[0, -np.inf, -np.inf],
[-np.inf, -np.inf, 0],
[-np.inf, -np.inf, -np.inf],
]).astype(dtype)
z_pos = np.asarray([[0, np.inf, 0], [0, np.inf, np.inf],
[np.inf, np.inf, 0], [np.inf, np.inf,
np.inf]]).astype(dtype)
z_mix = np.asarray([[0, np.inf, 0], [0, np.inf, -np.inf],
[-np.inf, np.inf, 0], [-np.inf, np.inf,
-np.inf]]).astype(dtype)
_, tf_loss_neg = self._tf_sparsemax_loss(z_neg, q, dtype)
self.assertAllEqual([0.25, np.inf, 0, np.nan], tf_loss_neg)
_, tf_loss_pos = self._tf_sparsemax_loss(z_pos, q, dtype)
self.assertAllEqual([np.nan, np.nan, np.nan, np.nan], tf_loss_pos)
_, tf_loss_mix = self._tf_sparsemax_loss(z_mix, q, dtype)
self.assertAllEqual([np.nan, np.nan, np.nan, np.nan], tf_loss_mix)
def test_constant_add(self, dtype=None):
"""check sparsemax-loss proposition 3."""
random = np.random.RandomState(4)
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
c = random.uniform(low=-3, high=3, size=(test_obs, 1))
q = np.zeros((test_obs, 10))
q[np.arange(0, test_obs), np.random.randint(0, 10, size=test_obs)] = 1
_, tf_loss_zpc = self._tf_sparsemax_loss(z + c, q, dtype)
_, tf_loss_z = self._tf_sparsemax_loss(z, q, dtype)
self.assertAllCloseAccordingToType(
tf_loss_zpc, tf_loss_z, float_atol=5e-6, float_rtol=5e-6)
def test_sparsemax_loss_positive(self, dtype=None):
"""check sparsemax-loss proposition 4."""
random = np.random.RandomState(5)
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
q = np.zeros((test_obs, 10))
q[np.arange(0, test_obs), random.randint(0, 10, size=test_obs)] = 1
tf_loss_op, tf_loss_out = self._tf_sparsemax_loss(z, q, dtype)
self.assertAllCloseAccordingToType(np.abs(tf_loss_out), tf_loss_out)
self.assertShapeEqual(np.zeros(test_obs), tf_loss_op)
def test_sparsemax_loss_zero(self, dtype=None):
"""check sparsemax-loss proposition 5."""
random = np.random.RandomState(6)
# construct z and q, such that z_k >= 1 + max_{j!=k} z_k holds for
# delta_0 = 1.
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
z[:, 0] = np.max(z, axis=1) + 1.05
q = np.zeros((test_obs, 10))
q[:, 0] = 1
tf_loss_op, tf_loss_out = self._tf_sparsemax_loss(z, q, dtype)
tf_sparsemax_op, tf_sparsemax_out = self._tf_sparsemax(z, dtype)
self.assertAllCloseAccordingToType(np.zeros(test_obs), tf_loss_out)
self.assertShapeEqual(np.zeros(test_obs), tf_loss_op)
self.assertAllCloseAccordingToType(q, tf_sparsemax_out)
self.assertShapeEqual(q, tf_sparsemax_op)
def test_gradient_against_estimate(self, dtype=None):
"""check sparsemax-loss Rop, against estimated-loss Rop."""
random = np.random.RandomState(7)
# sparsemax is not a smooth function so gradient estimation is only
# possible for float64.
if dtype != 'float64':
return
z = random.uniform(low=-3, high=3, size=(test_obs, 10)).astype(dtype)
q = np.zeros((test_obs, 10)).astype(dtype)
q[np.arange(0, test_obs), np.random.randint(0, 10, size=test_obs)] = 1
(jacob_sym,), (jacob_num,) = tf.test.compute_gradient(
lambda logits: sparsemax_loss(logits, sparsemax(logits), q), [z])
self.assertAllCloseAccordingToType(jacob_sym, jacob_num)
if __name__ == '__main__':
tf.test.main()
|
StarcoderdataPython
|
4804268
|
print("Hola xd")
|
StarcoderdataPython
|
5082240
|
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.x509.oid import ExtensionOID, NameOID
def is_ca(certificate):
# TODO: test self signed if no extensions found
extensions = certificate.extensions
try:
return extensions.get_extension_for_oid(ExtensionOID.BASIC_CONSTRAINTS).value.ca
except x509.ExtensionNotFound:
try:
return extensions.get_extension_for_oid(ExtensionOID.KEY_USAGE).value.key_cert_sign
except x509.ExtensionNotFound:
pass
return False
def build_name_attributes_update_dict_from_name(name):
update_dict = {}
for oid, ztl_attr, is_list in ((NameOID.COMMON_NAME, "common_name", False),
(NameOID.ORGANIZATION_NAME, "organization", False),
(NameOID.ORGANIZATIONAL_UNIT_NAME, "organizational_unit", False),
(NameOID.DOMAIN_COMPONENT, "domain", True)):
name_attributes = name.get_attributes_for_oid(oid)
if name_attributes:
if is_list:
value = ".".join(na.value for na in name_attributes[::-1])
else:
value = name_attributes[-1].value
update_dict[ztl_attr] = value
return update_dict
def build_cert_tree(certificate):
cert_tree = {
"valid_from": certificate.not_valid_before,
"valid_until": certificate.not_valid_after,
"signed_by": build_name_attributes_update_dict_from_name(certificate.issuer),
"sha_1": certificate.fingerprint(hashes.SHA1()).hex()
}
cert_tree.update(build_name_attributes_update_dict_from_name(certificate.subject))
return cert_tree
def iter_certificates(pem_certificates):
default_backend_instance = default_backend()
for pem_certificate in pem_certificates:
yield x509.load_pem_x509_certificate(pem_certificate.encode("utf-8"), default_backend_instance)
def prepare_ms_tree_certificates(ms_tree):
"""
filter and process the uploaded device pem certificates
"""
pem_certificates = ms_tree.pop("pem_certificates", [])
certificates = []
for certificate in iter_certificates(pem_certificates):
# filter out CA certificates
if is_ca(certificate):
continue
# build the cert tree
cert_tree = build_cert_tree(certificate)
if cert_tree not in certificates:
certificates.append(cert_tree)
# update the ms tree
if certificates:
ms_tree["certificates"] = certificates
|
StarcoderdataPython
|
12856526
|
<filename>apps/goods/migrations/0063_auto_20200108_1555.py
# Generated by Django 2.1.8 on 2020-01-08 07:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('goods', '0062_auto_20191205_1656'),
]
operations = [
migrations.RenameField(
model_name='replgoodstype',
old_name='price',
new_name='credit',
),
migrations.AlterField(
model_name='replgoodstype',
name='credit',
field=models.PositiveIntegerField(default=0, verbose_name='็งฏๅ'),
preserve_default=False,
),
migrations.AddField(
model_name='replgoodstype',
name='market_price',
field=models.DecimalField(decimal_places=2, default=0, max_digits=15, verbose_name='ๅธๅบไปทๆ ผ'),
),
migrations.AddField(
model_name='replgoodstype',
name='price',
field=models.DecimalField(decimal_places=2, default=0, max_digits=15, verbose_name='ไปทๆ ผ'),
),
]
|
StarcoderdataPython
|
3461336
|
<filename>prometheus_adaptive_cards/config/logger.py
"""
Copyright 2020 <NAME>. Licensed under the Apache License 2.0
Configures Loguru by adding sinks and makes everything ready to be used for
logging with FastAPI and Uvicorn. Opinionated. Importing alone is not enough.
"""
import json
import logging
import sys
import uvicorn
from loguru import logger
from prometheus_adaptive_cards.config.settings import Logging
class _InterceptHandler(logging.Handler):
"""Handler that redirects all logs to Loguru.
**License and Attributions:** Unlicensed. All attributions go to Timothรฉe
Mazzucotelli. Found [here](https://pawamoy.github.io/posts/unify-logging-for-a-gunicorn-uvicorn-app/).
"""
def emit(self, record):
# Get corresponding Loguru level if it exists
try:
level = logger.level(record.levelname).name
except ValueError:
level = record.levelno
# Find caller from where originated the logged message
frame, depth = logging.currentframe(), 2
while frame.f_code.co_filename == logging.__file__:
frame = frame.f_back
depth += 1
logger.opt(depth=depth, exception=record.exc_info).log(level, record.getMessage())
def _sink(message) -> None:
"""Serializes record given by Loguru.
Used instead of `serialize=True` to customize the contents of the
structured log. Simplifying it for later usage in log monitoring. Also
compare this function with
<https://github.com/Delgan/loguru/blob/a9a2438a1ad63f41741a3c50f2efdeff17745396/loguru/_handler.py#L222>.
"""
record = message.record
exception = record["exception"]
if exception is not None:
exception = {
"type": None if exception.type is None else exception.type.__name__,
"value": exception.value,
"traceback": bool(record["exception"].traceback),
}
print(
json.dumps(
{
"timestamp": record["time"].timestamp(),
"time": record["time"],
"level": record["level"].name,
"level_value": record["level"].no,
"message": record["message"],
"extra": record["extra"],
"exception": exception,
"line": record["line"],
"module": record["module"],
"name": record["name"],
"function": record["function"],
"file_name": record["file"].name,
"file_path": record["file"].path,
"process_id": record["process"].id,
"thread_id": record["thread"].id,
"elapsed_seconds": record["elapsed"].total_seconds(),
},
default=str,
)
+ "\n",
file=sys.stderr,
)
def _setup_sink(
format: str,
level: str,
structured_custom_serializer: bool,
unstructured_fmt: str,
unstructured_colorize: bool,
) -> int:
"""
Adds and configures the sink Loguru should use. The default sink is not removed.
Returns:
int: Handler / Sink id. Can be passed to `remove()`.
"""
if format == "structured":
if structured_custom_serializer:
return logger.add(_sink, format="{message}", level=level)
else:
return logger.add(sys.stderr, format="{message}", serialize=True, level=level)
else:
return logger.add(
sys.stderr,
colorize=unstructured_colorize,
format=unstructured_fmt,
level=level,
)
def setup_logging(logging_settings: Logging = Logging()):
"""Sets up logging.
Configures Loguru sink based on given settings and also python logging
module default handlers and prepares Uvicorn to log to these handlers.
Args:
logging_settings (Logging, optional): Settings. Defaults to Logging().
"""
_setup_sink(
logging_settings.format,
logging_settings.level,
logging_settings.structured.custom_serializer,
logging_settings.unstructured.fmt,
logging_settings.unstructured.colorize,
)
logging.basicConfig(
level=logging.getLevelName(logging_settings.level),
handlers=[_InterceptHandler()],
)
uvicorn.main.LOGGING_CONFIG = {
"version": 1,
"disable_existing_loggers": False,
"loggers": {
"uvicorn": {"level": logging_settings.level},
"uvicorn.error": {"level": logging_settings.level},
"uvicorn.access": {"level": logging_settings.level},
},
}
|
StarcoderdataPython
|
104000
|
class Solution:
def canConvertString(self, s: str, t: str, k: int) -> bool:
|
StarcoderdataPython
|
5081194
|
<gh_stars>10-100
#!/usr/bin/env python3
import sys
import platform
def getinfo(name):
if name == "OS":
val = platform.system().lower()
if "msys" in val or "mingw" in val:
return "windows"
return val
elif name == "ARCH":
is64bit = platform.architecture()[0] == "64bit"
val = platform.machine().lower()
if val.startswith("arm") or val == "aarch64" or val == "arm64":
return "aarch64" if is64bit else "arm"
elif val in ["i386", "i686", "amd64", "x86_64"]:
return "x64" if is64bit else "x86"
else:
sys.stderr.write("Unknown architecture: '%s'\n" % val)
return "unknown"
else:
return None
if __name__ == "__main__":
def main():
if len(sys.argv) != 2:
sys.stderr.write("Invalid number of arguments: %d\n" % (len(sys.argv) - 1))
sys.exit(1)
val = getinfo(sys.argv[1])
if val is None:
sys.stderr.write("Invalid argument '%s'\n" % sys.argv[1])
sys.exit(1)
print(val)
main()
|
StarcoderdataPython
|
6554679
|
<reponame>lethain/phabulous
"""
Inspect the status of a given project.
"""
import phabulous
phab = phabulous.Phabulous()
project = phab.project(id=481)
# print out some stuff
print "%s\b" % project
for attr in ('name', 'date_created', 'phid', 'id'):
print "%s: %s" % (attr.capitalize(), getattr(project, attr))
print "members:"
for user in project.members:
print "\t%s" % user.name
for task in user.tasks[:5]:
print "\t\t%s" % task.title
|
StarcoderdataPython
|
159251
|
<reponame>berleon/seqgan
import re
import numpy as np
from keras.utils.data_utils import get_file
class TextSequenceData:
def __init__(self, fname, origin, inlen=100, outlen=50,
step=10, strip_ws=True):
self.inlen = inlen
self.outlen = outlen
self.step = step
self.path = get_file(fname, origin=origin)
text = open(self.path, encoding="utf-8").read().lower()
if strip_ws:
text = re.sub(' +', ' ', text).strip()
text = re.sub('\n+', '\n', text).strip()
self.chars = sorted(list(set(text)))
self.char_indices = dict((c, i) for i, c in enumerate(self.chars))
self.indices_char = dict((i, c) for i, c in enumerate(self.chars))
self.build_dataset(text)
def build_dataset(self, text):
insequences = []
outsequences = []
for i in range(0, len(text) - self.inlen - self.outlen, self.step):
iout = i + self.inlen
insequences.append(text[i:iout])
outsequences.append(text[iout:iout+self.outlen])
self.X = np.zeros((len(insequences), self.inlen, len(self.chars)),
dtype=np.bool)
self.Y = np.zeros((len(outsequences), self.outlen, len(self.chars)),
dtype=np.bool)
for i, seq in enumerate(insequences):
for t, char in enumerate(seq):
self.X[i, t, self.char_indices[char]] = True
for i, seq in enumerate(outsequences):
for t, char in enumerate(seq):
self.Y[i, t, self.char_indices[char]] = True
def seq_to_text(self, seq):
chars = []
for char_arr in seq:
ind = np.argmax(char_arr)
chars.append(self.indices_char[ind])
return ''.join(chars)
def batch_to_text(self, batch):
X, Y = batch
return zip([self.seq_to_text(s) for s in X],
[self.seq_to_text(s) for s in Y])
|
StarcoderdataPython
|
6632912
|
import sys
import weakref
import pydoc
from jfx_bridge import bridge
from .server.ghidra_bridge_port import DEFAULT_SERVER_PORT
from .server.ghidra_bridge_host import DEFAULT_SERVER_HOST
""" Use this list to exclude modules and names loaded by the remote ghidra_bridge side from being loaded into namespaces (they'll
still be present in the BridgedObject for the __main__ module. This prevents the ghidra_bridge imported by ghidra_bridge_server
being loaded over the local ghidra_bridge and causing issues. You probably only want this for stuff imported by the ghidra_bridge_server
script that might conflict on the local side (or which is totally unnecessary on the local side, like GhidraBridgeServer).
"""
EXCLUDED_REMOTE_IMPORTS = ["logging", "subprocess", "sys",
"ghidra_bridge", "bridge", "GhidraBridgeServer"]
GHIDRA_BRIDGE_NAMESPACE_TRACK = "__ghidra_bridge_namespace_track__"
def get_listing_panel(tool, ghidra):
""" Get the code listing UI element, so we can get up-to-date location/highlight/selection """
cvs = tool.getService(ghidra.app.services.CodeViewerService)
return cvs.getListingPanel()
class GhidraBridge():
def __init__(self, connect_to_host=bridge.DEFAULT_HOST, connect_to_port=DEFAULT_SERVER_PORT, loglevel=None, namespace=None, interactive_mode=None, response_timeout=bridge.DEFAULT_RESPONSE_TIMEOUT):
""" Set up a bridge. Default settings connect to the default ghidra bridge server,
If namespace is specified (e.g., locals() or globals()), automatically calls get_flat_api() with that namespace.
loglevel for what logging messages you want to capture
interactive_mode should auto-detect interactive environments (e.g., ipython or not in a script), but
you can force it to True or False if you need to. False is normal ghidra script behaviour
(currentAddress/getState() etc locked to the values when the script started. True is closer to the
behaviour in the Ghidra Jython shell - current*/getState() reflect the current values in the GUI
response_timeout is how long to wait for a response before throwing an exception, in seconds
"""
self.bridge = bridge.BridgeClient(
connect_to_host=connect_to_host, connect_to_port=connect_to_port, loglevel=loglevel, response_timeout=response_timeout)
if interactive_mode is None:
# from https://stackoverflow.com/questions/2356399/tell-if-python-is-in-interactive-mode, sys.ps1 only present in interactive interpreters
interactive_mode = bool(getattr(sys, 'ps1', sys.flags.interactive))
self.interactive_mode = interactive_mode
self.interactive_listener = None
self.flat_api_modules_list = []
self.namespace_list = []
self.namespace = None
if namespace is not None:
if connect_to_host is None or connect_to_port is None:
raise Exception(
"Can't get_flat_api for the namespace if connect_to_host/port are none - need a server!")
# track the namespace we loaded with - if we're part of an __enter__/__exit__ setup, we'll use it to automatically unload the flat api
self.namespace = namespace
self.get_flat_api(namespace=self.namespace)
def get_flat_api(self, namespace=None):
""" Get the flat API (as well as the GhidraScript API). If a namespace is provided (e.g., locals() or globals()), load the methods and
fields from the APIs into that namespace (call unload_flat_api() to remove). Otherwise, just return the bridged module.
Note that the ghidra and java packages are always loaded into the remote script's side, so get_flat_api with namespace will get the
ghidra api and java namespace for you for free.
"""
remote_main = self.bridge.remote_import("__main__")
if namespace is not None:
# we're going to need the all of __main__, so get it all in one hit
remote_main._bridged_get_all()
if self.interactive_mode:
# if we're in headless mode (indicated by no state attribute for pythonRun or no tool for ghidra headless), we can't actually do interactive mode - we don't have access to a PluginTool
if not hasattr(remote_main, 'state') or remote_main.state.getTool() is None:
self.interactive_mode = False
self.bridge.logger.warning(
"Disabling interactive mode - not supported when running against a headless Ghidra")
else:
# first, manually update all the current* values (this allows us to get the latest values, instead of what they were when the server started
tool = remote_main.state.getTool() # note: tool shouldn't change
listing_panel = get_listing_panel(tool, remote_main.ghidra)
locn = listing_panel.getProgramLocation()
# set the values as overrides in the bridged object - this prevents them from being changed in the remote object
remote_main._bridge_set_override(
"currentAddress", locn.getAddress())
remote_main._bridge_set_override(
"currentProgram", listing_panel.getProgram())
remote_main._bridge_set_override("currentLocation", locn)
remote_main._bridge_set_override(
"currentSelection", listing_panel.getProgramSelection())
remote_main._bridge_set_override(
"currentHighlight", listing_panel.getProgramHighlight())
# next, keep a reference to this module for updating these addresses
self.flat_api_modules_list.append(weakref.ref(remote_main))
# next, overwrite getState with the getState_fix
def getState_fix():
""" Used when in interactive mode - instead of calling the remote getState,
relies on the fact that the current* variables are being updated and creates
a GhidraState based on them.
This avoids resetting the GUI to the original values in the remote getState
"""
return remote_main.ghidra.app.script.GhidraState(tool, tool.getProject(), remote_main.currentProgram, remote_main.currentLocation, remote_main.currentSelection, remote_main.currentHighlight)
remote_main._bridge_set_override("getState", getState_fix)
# finally, install a listener for updates from the GUI events
if self.interactive_listener is None:
def update_vars(currentProgram=None, currentLocation=None, currentSelection=None, currentHighlight=None):
""" For all the namespaces and modules we've returned, update the current* variables that have changed
"""
# clear out any dead references
self.flat_api_modules_list = [
module for module in self.flat_api_modules_list if module() is not None]
update_list = [
module() for module in self.flat_api_modules_list]
for update in update_list:
# possible that a module might have been removed between the clear out and preparing the update list
if update is not None:
if currentProgram is not None:
update.currentProgram = currentProgram
if currentLocation is not None:
# match the order of updates in GhidraScript - location before address
update.currentLocation = currentLocation
update.currentAddress = currentLocation.getAddress()
if currentSelection is not None:
update.currentSelection = currentSelection if not currentSelection.isEmpty() else None
if currentHighlight is not None:
update.currentHighlight = currentHighlight if not currentHighlight.isEmpty() else None
# repeat the same for the namespace dictionaries, but also make sure we update the tracker so we know what to remove later
for update_dict in self.namespace_list:
if currentProgram is not None:
update_dict["currentProgram"] = currentProgram
update_dict[GHIDRA_BRIDGE_NAMESPACE_TRACK]["currentProgram"] = update_dict["currentProgram"]
if currentLocation is not None:
# match the order of updates in GhidraScript - location before address
update_dict["currentLocation"] = currentLocation
update_dict[GHIDRA_BRIDGE_NAMESPACE_TRACK]["currentLocation"] = update_dict["currentLocation"]
update_dict["currentAddress"] = currentLocation.getAddress(
)
update_dict[GHIDRA_BRIDGE_NAMESPACE_TRACK]["currentAddress"] = update_dict["currentAddress"]
if currentSelection is not None:
update_dict["currentSelection"] = currentSelection if not currentSelection.isEmpty(
) else None
update_dict[GHIDRA_BRIDGE_NAMESPACE_TRACK]["currentSelection"] = update_dict["currentSelection"]
if currentHighlight is not None:
update_dict["currentHighlight"] = currentHighlight if not currentHighlight.isEmpty(
) else None
update_dict[GHIDRA_BRIDGE_NAMESPACE_TRACK]["currentHighlight"] = update_dict["currentHighlight"]
# create the interactive listener to call our update_vars function (InteractiveListener defined in the GhidraBridgeServer class)
self.interactive_listener = remote_main.GhidraBridgeServer.InteractiveListener(
remote_main.state.getTool(), update_vars)
if namespace is not None:
# add a special var to the namespace to track what we add, so we can remove it easily later
namespace[GHIDRA_BRIDGE_NAMESPACE_TRACK] = dict()
# load in all the attrs from remote main, skipping the double underscores and avoiding overloading our own ghidra_bridge (and similar modules)
try:
for attr in set(remote_main._bridge_attrs + list(remote_main._bridge_overrides.keys())):
if not attr.startswith("__") and attr not in EXCLUDED_REMOTE_IMPORTS:
remote_attr = getattr(remote_main, attr)
namespace[attr] = remote_attr
# record what we added to the namespace
namespace[GHIDRA_BRIDGE_NAMESPACE_TRACK][attr] = remote_attr
# overload isinstance with bridged_isinstance, so checking bridged objects are of bridged types will just work
namespace["isinstance"] = bridge.bridged_isinstance
namespace[GHIDRA_BRIDGE_NAMESPACE_TRACK]["isinstance"] = bridge.bridged_isinstance
# overwrite help with our own function for using ghidra's help
def ghidra_help(param=None):
""" Used when in interactive mode - calls through the bridge to call ghidra's help and capture the output, then print it locally """
if param is not None and not bridge._is_bridged_object(param):
# asking for help on something that isn't bridged - just use the original help
# make sure we have the real help, just in case we've overridden it already
builtin_help = None
try:
from builtins import help as builtin_help # python3
except:
# try falling back to python2 syntax
from __builtin__ import help as builtin_help
builtin_help(param)
else:
# make a remote help call - either param is bridged, or no param (in which case, we'll get the default help for the GhidraScript API)
help_output = remote_main.GhidraBridgeServer.ghidra_help(
param)
pydoc.pager(help_output)
namespace["help"] = ghidra_help
namespace[GHIDRA_BRIDGE_NAMESPACE_TRACK]["help"] = ghidra_help
except Exception:
self.unload_flat_api(namespace)
raise
# if we're interactive, keep track of the namespace so we can update the current* values
if self.interactive_mode:
self.namespace_list.append(namespace)
return remote_main
def unload_flat_api(self, namespace=None):
""" If get_flat_api was called with a namespace and loaded methods/fields into it, unload_flat_api will remove them.
Note: if the values don't match what was loaded, we assume the caller has modified for their own reasons, and leave alone.
"""
if namespace is None:
if self.namespace is None:
raise Exception(
"Bridge wasn't initialized with a namespace - need to specify the namespace you want to unload from")
namespace = self.namespace
if self.interactive_mode and namespace in self.namespace_list:
self.namespace_list.remove(namespace)
if GHIDRA_BRIDGE_NAMESPACE_TRACK in namespace:
for key, value in namespace[GHIDRA_BRIDGE_NAMESPACE_TRACK].items():
if key in namespace:
# we use "is", not ==, because we're checking it's the same object, not just that it matches
if namespace[key] is value:
del namespace[key]
else:
raise Exception(GHIDRA_BRIDGE_NAMESPACE_TRACK +
" not present in namespace - get_flat_api() didn't load into this namespace")
def get_ghidra_api(self):
""" get the ghidra api - `ghidra = bridge.get_ghidra_api()` equivalent to doing `import ghidra` in your script.
Note that the module returned from get_flat_api() will also contain the ghidra module, so you may not need to call this.
"""
return self.bridge.remote_import("ghidra")
def get_java_api(self):
""" get the java namespace - `java = bridge.get_java_api()` equivalent to doing `import java` in your script.
Note that the module returned from get_flat_api() will also contain the java module, so you may not need to call this.
"""
return self.bridge.remote_import("java")
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.namespace is not None:
self.unload_flat_api(self.namespace)
if self.interactive_listener is not None:
self.interactive_listener.stop_listening()
|
StarcoderdataPython
|
5115915
|
import NodeDefender
fields = {'type' : 'value', 'readonly' : True, 'name' : 'Celsius', 'web_field'
: True}
info = {'number' : '1', 'name' : 'AirTemperature', 'commandclass' : 'msensor'}
def event(payload):
data = {'commandclass' : NodeDefender.icpe.zwave.commandclass.msensor.info,
'commandclasstype' : info, 'fields' : fields}
if payload['unit'] == '1': # Fahrenheit
return None
data['value'] = int(payload['data'], 0) / 10
data['state'] = True if data['value'] else False
data['icon'] = 'fa fa-thermometer-half'
return data
def icon(value):
return 'fa fa-thermometer-half'
|
StarcoderdataPython
|
1673697
|
<filename>Azure Microservices/CosmosDBTest/__init__.py
import logging
import azure.functions as func
import pymongo
from bson.json_util import dumps
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Python HTTP trigger function processed a request.')
name = req.params.get('name')
if not name:
try:
url = ""
client = pymongo.MongoClient(url)
database = client['notes_db']
collection = database['notes']
result = collection.find({})
result = dumps(result)
return func.HttpResponse(result,mimetype="application/json",charset="utf-8", status_code=200)
except:
return func.HttpResponse("Bad request", status_code=400)
else:
try:
url = ""
client = pymongo.MongoClient(url)
database = client['notes_db']
collection = database['notes']
result = collection.find_one({'title': name})
result = dumps(result)
print(result)
return func.HttpResponse(result,mimetype="application/json",charset="utf-8", status_code=200)
except:
return func.HttpResponse("Bad request", status_code=400)
|
StarcoderdataPython
|
11305222
|
<gh_stars>1-10
# DSMR P1 uitlezen
# (c) 10-2012 - GJ - gratis te kopieren en te plakken
versie = "1.0"
import sys
import serial
##############################################################################
#Main program
##############################################################################
print ("DSMR P1 uitlezen", versie)
print ("Control-C om te stoppen")
print ("Pas eventueel de waarde ser.port aan in het python script")
#Set COM port config
ser = serial.Serial()
ser.baudrate = 9600
ser.bytesize=serial.SEVENBITS
ser.parity=serial.PARITY_EVEN
ser.stopbits=serial.STOPBITS_ONE
ser.xonxoff=0
ser.rtscts=0
ser.timeout=20
ser.port="/dev/ttyUSB0"
#Open COM port
try:
ser.open()
except:
sys.exit ("Fout bij het openen van %s. Aaaaarch." % ser.name)
#Initialize
#p1_teller is mijn tellertje voor van 0 tot 20 te tellen
p1_teller=0
while p1_teller < 20:
p1_line=''
#Read 1 line van de seriele poort
try:
p1_raw = ser.readline()
except:
sys.exit ("Seriele poort %s kan niet gelezen worden. Aaaaaaaaarch." % ser.name )
p1_str=str(p1_raw)
p1_line=p1_str.strip()
# als je alles wil zien moet je de volgende line uncommenten
print (p1_line)
p1_teller = p1_teller +1
#Close port and show status
try:
ser.close()
except:
sys.exit ("Oops %s. Programma afgebroken. Kon de seriele poort niet sluiten." % ser.name )
|
StarcoderdataPython
|
9799796
|
<filename>packages/core/minos-microservice-aggregate/minos/aggregate/transactions/repositories/memory.py<gh_stars>100-1000
from datetime import (
datetime,
)
from typing import (
AsyncIterator,
Optional,
)
from uuid import (
UUID,
)
from minos.common import (
current_datetime,
)
from ...exceptions import (
TransactionRepositoryConflictException,
)
from ..entries import (
TransactionEntry,
)
from ..entries import TransactionStatus as s
from .abc import (
TransactionRepository,
)
class InMemoryTransactionRepository(TransactionRepository):
"""In Memory Transaction Repository class."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._storage = dict()
async def _submit(self, transaction: TransactionEntry) -> TransactionEntry:
transaction.updated_at = current_datetime()
if transaction.uuid in self._storage:
status = self._storage[transaction.uuid].status
if (
(status == s.PENDING and transaction.status not in (s.PENDING, s.RESERVING, s.REJECTED))
or (status == s.RESERVING and transaction.status not in (s.RESERVED, s.REJECTED))
or (status == s.RESERVED and transaction.status not in (s.COMMITTING, s.REJECTED))
or (status == s.COMMITTING and transaction.status not in (s.COMMITTED,))
or (status == s.COMMITTED)
or (status == s.REJECTED)
):
raise TransactionRepositoryConflictException(
f"{transaction!r} status is invalid respect to the previous one."
)
self._storage[transaction.uuid] = TransactionEntry(
uuid=transaction.uuid,
destination_uuid=transaction.destination_uuid,
status=transaction.status,
event_offset=transaction.event_offset,
updated_at=transaction.updated_at,
event_repository=transaction._event_repository,
transaction_repository=transaction._transaction_repository,
)
return transaction
async def _select(
self,
uuid: Optional[UUID] = None,
uuid_ne: Optional[UUID] = None,
uuid_in: Optional[tuple[UUID, ...]] = None,
destination_uuid: Optional[UUID] = None,
status: Optional[s] = None,
status_in: Optional[tuple[str, ...]] = None,
event_offset: Optional[int] = None,
event_offset_lt: Optional[int] = None,
event_offset_gt: Optional[int] = None,
event_offset_le: Optional[int] = None,
event_offset_ge: Optional[int] = None,
updated_at: Optional[datetime] = None,
updated_at_lt: Optional[datetime] = None,
updated_at_gt: Optional[datetime] = None,
updated_at_le: Optional[datetime] = None,
updated_at_ge: Optional[datetime] = None,
**kwargs,
) -> AsyncIterator[TransactionEntry]:
# noinspection DuplicatedCode
def _fn_filter(transaction: TransactionEntry) -> bool:
if uuid is not None and uuid != transaction.uuid:
return False
if uuid_ne is not None and uuid_ne == transaction.uuid:
return False
if uuid_in is not None and transaction.uuid not in uuid_in:
return False
if destination_uuid is not None and destination_uuid != transaction.destination_uuid:
return False
if status is not None and status != transaction.status:
return False
if status_in is not None and transaction.status not in status_in:
return False
if event_offset is not None and event_offset != transaction.event_offset:
return False
if event_offset_lt is not None and event_offset_lt <= transaction.event_offset:
return False
if event_offset_gt is not None and event_offset_gt >= transaction.event_offset:
return False
if event_offset_le is not None and event_offset_le < transaction.event_offset:
return False
if event_offset_ge is not None and event_offset_ge > transaction.event_offset:
return False
if updated_at is not None and updated_at != transaction.updated_at:
return False
if updated_at_lt is not None and updated_at_lt <= transaction.updated_at:
return False
if updated_at_gt is not None and updated_at_gt >= transaction.updated_at:
return False
if updated_at_le is not None and updated_at_le < transaction.updated_at:
return False
if updated_at_ge is not None and updated_at_ge > transaction.updated_at:
return False
return True
iterable = iter(self._storage.values())
iterable = filter(_fn_filter, iterable)
for item in iterable:
yield item
|
StarcoderdataPython
|
175287
|
<filename>src/modules/ArpFilter.py
from impacket import ImpactDecoder
import inspect
def dump(obj):
for name, data in inspect.getmembers(obj):
if name == '__builtins__':
continue
print '%s :' % name, repr(data)
class ArpFilter():
attributes = None
myIpAddresses = None
logger = None
def __init__(self, attributes, logger, myIpAddresses):
self.attributes = attributes
self.logger = logger
self.myIpAddresses = myIpAddresses
def rule(self):
rule = "arp"
return rule
def run(self, header, payload):
self.logger.debug("run")
self.logger.debug('Setting filter')
rip = ImpactDecoder.EthDecoder().decode(payload)
print rip
proto = -1
try:
proto = rip.child().get_ip_p()
except AttributeError:
pass
etherType = rip.get_ether_type()
if etherType != 2054:
self.logger.warn("doesnt seem to be ARP..")
return None
arp = rip.child()
print ("op name:"+str(arp.get_op_name(arp.get_ar_op())))
print ("src mac:"+str(arp.as_hrd(arp.get_ar_sha())))
print ("src ip:"+str(arp.as_pro(arp.get_ar_spa())))
print ("queried ip:"+str(arp.as_pro(arp.get_ar_tpa())))
# never send messages
return None
|
StarcoderdataPython
|
5183031
|
<reponame>ningyixue/AIPI530_Final_Project
from abc import ABCMeta, abstractmethod
from typing import Optional
import torch
from ..encoders import Encoder, EncoderWithAction
class QFunction(metaclass=ABCMeta):
@abstractmethod
def compute_error(
self,
obs_t: torch.Tensor,
act_t: torch.Tensor,
rew_tp1: torch.Tensor,
q_tp1: torch.Tensor,
ter_tp1: torch.Tensor,
gamma: float = 0.99,
reduction: str = "mean",
) -> torch.Tensor:
pass
@property
def action_size(self) -> int:
pass
class DiscreteQFunction(QFunction):
@abstractmethod
def forward(self, x: torch.Tensor) -> torch.Tensor:
pass
@abstractmethod
def compute_target(
self, x: torch.Tensor, action: Optional[torch.Tensor]
) -> torch.Tensor:
pass
def __call__(self, x: torch.Tensor) -> torch.Tensor:
return self.forward(x)
@property
def encoder(self) -> Encoder:
pass
class ContinuousQFunction(QFunction):
@abstractmethod
def forward(self, x: torch.Tensor, action: torch.Tensor) -> torch.Tensor:
pass
@abstractmethod
def compute_target(
self, x: torch.Tensor, action: torch.Tensor
) -> torch.Tensor:
pass
def __call__(self, x: torch.Tensor, action: torch.Tensor) -> torch.Tensor:
return self.forward(x, action)
@property
def encoder(self) -> EncoderWithAction:
pass
|
StarcoderdataPython
|
1884549
|
import bokego.go as go
import os
from math import sqrt
from tqdm import trange
import numpy as np
import pandas as pd
import torch
from torch.distributions.categorical import Categorical
from torch.utils.data import Dataset, DataLoader
from torch.nn.modules.utils import _pair
from torch.nn.parameter import Parameter
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
SOFT = nn.Softmax(dim = 1)
#v0.3 Policy Net
class PolicyNet(nn.Module):
'''(27,9,9) torch.Tensor --> (81) torch.Tensor
Takes input from features(game: go.Game).
The softmax of the output is the prior distribution over moves (0--80)
Layers:
1 5x5 convolution: 9x9 -> 9x9
6 3x3 convolution: 9x9 -> 9x9
1 1x1 convolution with untied bias: 9x9 -> 9x9
'''
def __init__(self):
super(PolicyNet, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(27,128,5, padding = 2),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128,128,3, padding =1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128,128,3, padding = 1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128,128,3, padding = 1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128,128,3, padding = 1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128,128,3, padding = 1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128,128,3, padding = 1),
nn.BatchNorm2d(128),
nn.ReLU(),
Conv2dUntiedBias(9,9,128,1,1))
def forward(self, x):
x = self.conv(x)
x = x.view(-1, 81)
return x
class ValueNet(nn.Module):
'''(27,9,9) torch.Tensor --> (1) torch.Tensor
Takes input from features(game: go.Game).
The output is the expected value of the game from current player's perspective;
win = 1, lose = -1
Layers:
1 5x5 convolution: 9x9 -> 9x9
6 3x3 convolutions: 9x9 -> 9x9
1 convolution with untied bias: 9x9 -> 9x9
2 fully connected layers 81 -> 64 -> 1
'''
def __init__(self):
super(ValueNet, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(27,128,5, padding = 2),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128,128,3, padding =1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128,128,3, padding = 1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128,128,3, padding = 1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128,128,3, padding = 1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128,128,3, padding = 1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128,128,3, padding = 1),
nn.BatchNorm2d(128),
nn.ReLU(),
Conv2dUntiedBias(9,9,128,1,1))
self.lin1 = nn.Linear(81,64)
self.lin2 = nn.Linear(64,1)
self.bn = nn.BatchNorm2d(1)
self.lin_bn = nn.BatchNorm1d(64)
self.relu = nn.ReLU()
self.tanh = nn.Tanh()
def load_policy_dict(self, policy_dict):
'''load convolution weights from a PolicyNet state dict'''
new_dict = self.state_dict()
new_dict.update(policy_dict)
self.load_state_dict(new_dict)
def forward(self, x):
x = self.relu(self.bn(self.conv(x)))
x = x.view(-1, 81)
x = self.relu(self.lin_bn(self.lin1(x)))
return self.tanh(self.lin2(x))
#v0.2 Policy Net
class PolicyNet_v2(nn.Module):
def __init__(self):
super(PolicyNet_v2, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(27,64,5, padding = 2),
nn.ReLU(),
nn.Conv2d(64,128,3, padding =1),
nn.ReLU(),
nn.Conv2d(128,128,3, padding = 1),
nn.ReLU(),
nn.Conv2d(128,128,3, padding = 1),
nn.ReLU(),
nn.Conv2d(128,128,3, padding = 1),
nn.ReLU(),
nn.Conv2d(128,128,3, padding = 1),
nn.ReLU(),
Conv2dUntiedBias(9,9,128,1,1))
def forward(self, x):
x = self.conv(x)
x = x.view(-1, 81)
return x
class Conv2dUntiedBias(nn.Module):
def __init__(self, height, width,
in_channels, out_channels,
kernel_size, stride=1,
padding=0, dilation=1,
groups=1):
super(Conv2dUntiedBias, self).__init__()
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
if in_channels % groups != 0:
raise ValueError('in_channels must be divisible by groups')
if out_channels % groups != 0:
raise ValueError('out_channels must be divisible by groups')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.weight = Parameter(torch.Tensor(
out_channels, in_channels // groups, *kernel_size))
self.bias = Parameter(torch.Tensor(out_channels, height, width))
self.reset_parameters()
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1. / sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input):
output = F.conv2d(input, self.weight, None, self.stride,
self.padding, self.dilation, self.groups)
# add untied bias
output += self.bias.unsqueeze(0).repeat(input.size(0), 1, 1, 1)
return output
def features(game: go.Game):
''' go.Game --> (27,9,9) torch.Tensor
Compute the input features from the board state
9x9 layer index: feature
------------------------
0: player stones
1 if coord has player's stones, else 0
1: opponent stones
1 if coord has opponent's stone, else 0
2: empty
1 if coord is empty, else 0
3: turn
all 1's if it is B's turn, all 0's if it is W's turn
4: last move
1 if coord was last move, else 0
5: legal
1 if coord is legal move for player, else 0
6-12: liberties
n if stone at coord has n liberties, else 0
layer 5 has coords with 1 liberty
layer 6 has coords with 2 liberties
...
layer 11 has coords with >6 liberties
13-19: liberties after playing
n if coord is a legal move and player's stone has n liberties after playing, else 0
liberties are separated the same way as 5-11
20-26: number of captures
n if playing at coord would capture n opponent stones, else 0
number of captures are separated the same way as 5-11
'''
plyr = np.expand_dims(game.to_numpy(), 0)
oppt = np.copy(plyr)
turn_num = (1 if game.turn%2 == 0 else -1)
color = (go.BLACK if turn_num == 1 else go.WHITE)
plyr[plyr != turn_num] = 0
oppt[oppt == turn_num] = 0
plyr *= turn_num
oppt *= -turn_num
empty = np.invert((plyr + oppt).astype(bool)).astype(float)
if color == go.BLACK:
turn = np.ones((1,9,9), dtype = float)
else:
turn = np.zeros((1,9,9), dtype = float)
last_mv = np.zeros(81, dtype = float)
if isinstance(game.last_move, int) and game.last_move >= 0:
last_mv[game.last_move] = 1.0
last_mv = last_mv.reshape(1,9,9)
legal_list = game.get_legal_moves()
legal = np.zeros(81)
legal[legal_list] = 1
libs = np.array(game.get_liberties()).reshape(9,9)
libs_after = np.zeros(81)
caps = np.zeros(81)
for sq_c in legal_list:
new_board, opp_captured = go.get_caps(go.place_stone(color, game.board,sq_c), sq_c, color)
if opp_captured:
libs_after[sq_c] = go.get_stone_lib(new_board, sq_c)
caps[sq_c] = len(opp_captured)
else:
libs_after[sq_c] = go.get_stone_lib(go.place_stone(color, game.board, sq_c), sq_c)
libs_after = libs_after.reshape(9,9)
caps = caps.reshape(9,9)
legal = legal.reshape(1,9,9)
def separate(arr):
out = np.zeros((7,9,9), dtype = float)
for i in range(6):
out[i, arr == i+1] = i+1
out[6, arr >6] = 7
return out
fts = np.vstack( [plyr, oppt, empty, turn, last_mv, legal,\
separate(libs) , separate(libs_after) , separate(caps)])
return torch.from_numpy(fts).float()
def policy_dist(policy: PolicyNet,
game: go.Game,
device = torch.device("cpu"),
fts: torch.Tensor=None):
'''Return torch.distribution.Categorial distribution over coordinates'''
if fts is None:
fts = features(game)
fts = fts.unsqueeze(0).to(device)
probs = SOFT(policy(fts)).squeeze(0)
dist = Categorical(probs)
return dist
def value(v: ValueNet,
game: go.Game,
device = torch.device("cpu"),
fts: torch.Tensor=None):
if fts is None:
fts = features(game)
fts = fts.unsqueeze(0).to(device)
return v(fts).item()
def policy_sample(policy: PolicyNet,
game: go.Game,
device = torch.device("cpu"),
fts: torch.Tensor=None):
'''sample a move from policy distribution. Use policy_dist
for multiple samplings'''
if fts is None:
fts = features(game)
fts = fts.unsqueeze(0).to(device)
probs = SOFT(policy(fts)).squeeze(0)
m = Categorical(probs)
return m.sample()
class NinebyNineGames(Dataset):
def __init__(self, path, **kwargs):
'''Load and process data for training policy or value net.
args:
path: path to csv or numpy zipped file (.npz)
csv must have columns (board, ko, turn, move) or (board, ko, turn, val).
npz must have "features" with size (n,27,9,9)
and either "vals" or "moves" with size (n, 1)
kwargs:
out_path: path to save input/target tensors (default "./data.npz")
transform: callable transformation to apply to the data
'''
self.out_path = kwargs.get("out_path", os.path.join(os.getcwd(), "data"))
self.vals = False
if path.endswith(".csv"):
self.inputs, self.targets = process_csv(path, out_path)
elif path.endswith(".npz"):
load = np.load(path)
self.inputs = torch.from_numpy(load["features"]).float()
if "vals" in load.files:
self.targets = load["vals"]
self.vals = True
else:
self.targets = load["moves"]
self.targets = torch.from_numpy(self.targets).float()
assert self.inputs[0,].shape == (27,9,9)
assert self.targets[0,].shape == (1,)
def __len__(self):
return len(self.boards)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
if self.transform:
if self.vals:
return self.transform(self.inputs[idx]), self.targets[idx]
else:
return self.transform(self.inputs[idx], self.targets[idx])
return self.inputs[idx], self.targets[idx]
def rand_refl(features, moves = None):
do_refl = randint(1)
if do_refl:
features = torch.transpose(features,2,3)
if moves != None:
x,y = moves//9, moves%9
moves = 9*y + x
if moves is None:
return features
return features, moves
def rand_rot(features, moves = None):
do_rot = randint(3)
if do_rot:
features = torch.rot90(features, do_rot,[3,2])
if moves != None:
moves = (moves*9+8-moves//9)%81
if moves is None:
return features
return features, moves
def compose(*args):
def compose_two(f,g):
return lambda x: f(g(x))
return reduce(compose, arg, lambda x: x)
def process_csv(path, npz_name):
cols = pd.read_csv(path, nrows = 0).columns
convert = {col: lambda x: eval(x) for col in cols}
convert["board"] = lambda x: x
boards = pd.read_csv(path, converters = convert, low_memory = False)
print(f"Processing features from {path}...")
fts = np.zeros(shape = (len(boards),27,9,9), dtype = np.int8)
targets = np.zeros(shape = (len(boards),1), dtype = np.int8)
for i in trange(len(boards)):
board, ko, last, target = boards.iloc[i]
g = go.Game(board, ko, last)
g.turn = 1 if g.board[last] == go.BLACK else 0
fts[i] = features(g)
if cols[-1] == "val":
targets[i] = -1 if target else 1
elif cols[-1] == "move":
targets[i] = target
np.savez_compressed(npz_name, features = fts, targets = targets)
|
StarcoderdataPython
|
193384
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import time
import click
from oslo_config import cfg
from oslo_log import log as logging
import rq
from tabulate import tabulate
from act.engine import config
from act.engine import metrics
from act.engine import utils
LOG = logging.getLogger(__name__)
LOGO = """
_____ ____ _____
(____ |/ ___|_ _)
/ ___ ( (___ | |
\_____|\____) |_|
"""
LOGO2 = """
____ ____ _____
____| | |
|____| |____ |
"""
green = functools.partial(click.style, fg='green')
red = functools.partial(click.style, fg='red')
def get_scale(x):
"""Finds the lowest scale where x <= scale."""
scales = [20, 50, 100, 200, 400, 600, 800, 1000]
for scale in scales:
if x <= scale:
return scale
return x
def make_canvas(width, height):
return [[' ' for x in range(width)] for y in range(height)]
def place(canvas, block, x, y):
lines = block.split('\n')
for i, line in enumerate(lines):
if y + i >= len(canvas):
break
for j, ch in enumerate(line):
if j + x >= len(canvas[y + i]):
break
canvas[y + i][j + x] = ch
def render(canvas):
for line in canvas:
click.echo(''.join(line))
def show():
click.clear()
term_width, term_height = click.get_terminal_size()
canvas = make_canvas(term_width, term_height - 1)
chart_width = min(20, term_width - 20)
m = metrics.get_all_metrics()
max_count = max(m.value for m in m.values())
scale = get_scale(max_count)
ratio = chart_width * 1.0 / scale
t = []
headers = ['param', 'value', 'chart']
keys = sorted(m.keys())
for key in keys:
metric = m[key]
count = metric.value
color = green if metric.mood == metrics.MOOD_HAPPY else red
chart = color('|' + u'โ' * int(ratio * count))
t.append([key, count, chart])
place(canvas, LOGO2, 0, 0)
s = tabulate(t, headers=headers, tablefmt='simple')
place(canvas, s, 25, 0)
render(canvas)
time.sleep(cfg.CONF.interval)
def run():
utils.init_config_and_logging(config.MONITOR_OPTS)
redis_connection = utils.make_redis_connection(host=cfg.CONF.redis_host,
port=cfg.CONF.redis_port)
with rq.Connection(redis_connection):
try:
while True:
show()
except KeyboardInterrupt:
LOG.info('Shutdown')
if __name__ == '__main__':
run()
|
StarcoderdataPython
|
1820089
|
# Copyright 2019 the ProGraML authors.
#
# Contact <NAME> <<EMAIL>>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for labelling program graphs with alias sets."""
import random
import typing
import networkx as nx
import numpy as np
from compilers.llvm import opt_util
from labm8.py import app
from labm8.py import decorators
FLAGS = app.FLAGS
app.DEFINE_integer(
"alias_set_min_size",
2,
"The minimum number of pointers in an alias set to be used as a labelled "
"example.",
)
@decorators.timeout(seconds=120)
def AnnotateAliasSet(
g: nx.MultiDiGraph,
root_identifier: str,
identifiers_in_set: typing.List[str],
x_label: str = "x",
y_label: str = "y",
false=False,
true=True,
) -> int:
"""
Args:
g: The graph.
root_identifier: A name of a node in the alias set.
identifiers_in_set: The names of the nodes in the alias set.
x_label: The graph 'x' attribute property attribute name.
y_label: The graph 'y' attribute property attribute name.
false: The value to set for nodes not in the alias set.
true: The value to set for nodes in the alias set.
Returns:
The number of identifiers in the alias set.
"""
# Set all of the nodes as not the root identifier and not part of the alias
# set. X labels are a list which concatenates the original graph 'x'
# embedding indices with a [0,1] value for false/true, respectively.
for _, data in g.nodes(data=True):
data[x_label] = [data[x_label], 0]
data[y_label] = false
g.nodes[root_identifier][x_label] = [g.nodes[root_identifier][x_label][0], 1]
# Mark the nodes in the alias set.
for pointer in identifiers_in_set:
if pointer not in g.nodes:
identifier_nodes = [
node for node, type_ in g.nodes(data="type") if type_ == "identifier"
]
raise ValueError(
f"Pointer `{pointer}` not in function with identifiers "
f"{identifier_nodes}"
)
g.nodes[pointer][y_label] = true
return len(identifiers_in_set)
def MakeAliasSetGraphs(
g: nx.MultiDiGraph,
bytecode: str,
n: typing.Optional[int] = None,
false=False,
true=True,
) -> typing.Iterable[nx.MultiDiGraph]:
"""Produce up to `n` alias set graphs.
Args:
g: The unlabelled input graph.
bytecode: The bytecode which produced the input graph.
n: The maximum number of graphs to produce. Multiple graphs are produced by
selecting different root pointers for alias sets. If `n` is provided,
the number of graphs generated will be in the range
1 <= x <= min(num_alias_sets, n), where num_alias_sets is the number of
alias sets larger than --alias_set_min_size. If n is None, num_alias_sets
graphs will be produced.
false: TODO(github.com/ChrisCummins/ProGraML/issues/2): Unused. This method
is hardcoded to use 3-class 1-hots.
true: TODO(github.com/ChrisCummins/ProGraML/issues/2): Unused. This method
is hardcoded to use 3-class 1-hots.
Returns:
A generator of annotated graphs, where each graph has 'x' and 'y' labels on
the statement nodes, and additionally a 'data_flow_max_steps_required'
attribute which is set to the number of pointers in the alias set.
"""
# TODO(github.com/ChrisCummins/ProGraML/issues/2): Replace true/false args
# with a list of class values for all graph annotator functions.
del false
del true
# Build the alias sets for the given bytecode.
alias_sets_by_function = opt_util.GetAliasSetsByFunction(bytecode)
functions = {
function
for node, function in g.nodes(data="function")
# Not all nodes have a 'function' attribute, e.g. the magic root node.
if function
}
# Silently drop alias sets for functions which don't exist in the graph.
alias_sets_to_delete = []
for function in alias_sets_by_function:
if function not in functions:
alias_sets_to_delete.append(function)
if alias_sets_to_delete:
for function in alias_sets_to_delete:
del alias_sets_by_function[function]
app.Log(
2,
"Removed %d alias sets generated from bytecode but not found in "
"graph: %s",
len(alias_sets_to_delete),
alias_sets_to_delete,
)
function_alias_set_pairs: typing.List[
typing.Tuple[str, opt_util.AliasSet]
] = []
# Flatten the alias set dictionary and ignore any alias sets that are smaller
# than the threshold size.
for function, alias_sets in alias_sets_by_function.items():
function_alias_set_pairs += [
(function, alias_set)
for alias_set in alias_sets
if len(alias_set.pointers) >= FLAGS.alias_set_min_size
]
# Select `n` random alias sets to generate labelled graphs for.
if n and len(function_alias_set_pairs) > n:
random.shuffle(function_alias_set_pairs)
function_alias_set_pairs = function_alias_set_pairs[:n]
for function, alias_set in function_alias_set_pairs:
# Translate the must/may alias property into 3-class 1-hot labels.
if alias_set.type == "may alias":
false = np.array([1, 0, 0], np.int64)
true = np.array([0, 1, 0], np.int64)
elif alias_set.type == "must alias":
false = np.array([1, 0, 0], np.int64)
true = np.array([0, 0, 1], np.int64)
else:
raise ValueError(f"Unknown alias set type `{alias_set.type}`")
# Transform pointer name into the node names produced by the ComposeGraphs()
# method in the graph builder. When we compose multiple graphs, we add the
# function name as a prefix, and `_operand` suffix to identifier nodes.
pointers = [
f"{function}_{p.identifier}_operand" for p in alias_set.pointers
]
root_pointer = random.choice(pointers)
labelled = g.copy()
labelled.data_flow_max_steps_required = AnnotateAliasSet(
labelled, root_pointer, pointers, false=false, true=true
)
yield labelled
|
StarcoderdataPython
|
96063
|
import os
from catalog import app
app.run(debug=True, host=os.environ.get('CATALOG_HOST'), port=os.environ.get('CATALOG_PORT'))
|
StarcoderdataPython
|
9603122
|
<reponame>kevin0120/onesphere
# -*- coding: utf-8 -*-
{
'name': "onesphere_spc",
'summary': """
้็จSPCๆจกๅ""",
'description': """
้็จSPCๆจกๅ
""",
'author': "ไธๆตทๆไบซๆฐๆฎ็งๆๆ้ๅ
ฌๅธ",
'website': "http://www.oneshare.com.cn",
'category': 'Manufacturing/Manufacturing',
'version': '192.168.127.12',
# any module necessary for this one to work correctly
'depends': ['mrp'],
# always loaded
'data': [
# 'security/ir.model.access.csv',
'views/spc_menu_views.xml',
'views/assets.xml',
],
'qweb': [
"static/xml/spc_view_template.xml",
],
'application': True,
}
|
StarcoderdataPython
|
6512527
|
<filename>examples/example_13.py<gh_stars>1-10
# SymBeam examples suit
# ==========================================================================================
# <NAME> <<EMAIL>> 2020
# Features: 1. Symbolic length
# 2. Fixed
# 3. Symbolic point moment
# 4. Classical clamped beam problem
import matplotlib.pyplot as plt
from symbeam import beam
test_beam = beam("L", x0=0)
test_beam.add_support(0, "fixed")
test_beam.add_point_moment("L", "M")
test_beam.solve()
fig, ax = test_beam.plot()
plt.savefig(__file__.split(".py")[0] + ".svg")
|
StarcoderdataPython
|
1726953
|
<gh_stars>1-10
"""
Make graphs (lifecycles, ...)
"""
from __future__ import absolute_import, division, print_function
import os
import sys
import json
from collections import OrderedDict
try:
import pygraphviz as pgv
except ImportError:
print('(optional) install pygraphviz to generate graphs')
sys.exit(0)
from iceprod.server import get_pkgdata_filename
def main():
table_filename = get_pkgdata_filename('iceprod.server','data/etc/db_config.json')
db_tables = json.load(open(table_filename),object_pairs_hook=OrderedDict)
for k in db_tables['status_graphs']:
outfile_name = os.path.join('static','lifecycle_'+k+'.png')
if os.path.exists(outfile_name) and os.path.getmtime(outfile_name) > os.path.getmtime(table_filename):
print('graph',outfile_name,'already exists. skipping')
continue
G = pgv.AGraph(strict=False,directed=True)
G.add_nodes_from(db_tables['status_options'][k])
for row in db_tables['status_graphs'][k]:
if row[-1] == 'std':
c = 'cornflowerblue'
elif row[-1] == 'auto':
c = 'cyan2'
elif row[-1] == 'debug':
c = 'chartreuse2'
elif row[-1] == 'manual':
c = 'firebrick2'
G.add_edge(row[0],row[1],color=c)
G.draw(outfile_name, prog='dot')
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
8138662
|
<gh_stars>1-10
from django.contrib import admin
from pinax.apps.account.models import Account, PasswordReset
class PasswordResetAdmin(admin.ModelAdmin):
list_display = ["user", "temp_key", "timestamp", "reset"]
admin.site.register(Account)
admin.site.register(PasswordReset, PasswordResetAdmin)
|
StarcoderdataPython
|
5017990
|
# -*- coding: utf-8 -*-
def break_text(sentence, k):
words = sentence.split()
broken_text = []
char_count = -1
current_words = []
idx = 0
while idx < len(words):
word = words[idx]
if len(word) > k:
return None
if char_count + len(word) + 1 <= k:
char_count += len(word) + 1
current_words.append(word)
idx += 1
else:
broken_text.append(' '.join(current_words))
char_count = -1
current_words = []
broken_text.extend(current_words)
return broken_text
if __name__ == '__main__':
sentence = 'the quick brown fox jumps over the lazy dog'
k = 10
print(' Input:', sentence)
print(' k:', k)
texts = break_text(sentence, k)
print(' Output:')
print('\n'.join(texts))
|
StarcoderdataPython
|
3580856
|
<gh_stars>0
from __future__ import print_function
import cv2 as cv
import numpy as np
import time
from rect_selector import RectSelector
from processor import Processor
from play import Play
class App:
def __init__(self, camera):
self.cap = cv.VideoCapture(camera)
# run `ffmpeg -f v4l2 -list_formats all -i /dev/video0` to check
# list of available video modes
resolutions = "1280x720"
resolutions = [int(i) for i in "1280x720".split('x')]
self.cap.set(cv.CAP_PROP_FRAME_WIDTH, resolutions[0])
self.cap.set(cv.CAP_PROP_FRAME_HEIGHT, resolutions[1])
_, self.frame = self.cap.read()
self.processor = Processor(self.frame, "camera.yml")
self.player = Play()
cv.namedWindow('processed')
self.rect_sel = RectSelector('processed', self.onrect)
self.the_rect = 0, 0, self.processor.w, self.processor.h
self.color = (128,255,255)
self.start_play = False
self.paused = False
self.store = False
self.end = False
self.winner = None
self.store_points = []
self.store_radius = []
self.location = tuple()
def reset_store(self):
self.store_points = []
self.store_radius = []
def onrect(self, rect):
self.the_rect = rect
print("select rect:", self.the_rect)
self.reset_store()
self.store = True
def read_frame(self, timeout):
start_time = time.time()
while True:
_, self.frame = self.cap.read()
self.frame, tsps,tsrs = self.processor.centers_detect(self.frame.copy(),
self.the_rect, self.color, self.store)
self.store_points.extend(tsps)
self.store_radius.extend(tsrs)
if (time.time() - start_time) > timeout:
break
def ai_play(self):
self.read_frame(0.5)
self.location, self.end, self.winner = self.player.game.play()
print("AI move:", self.location)
self.processor.store_coors.append(tuple(self.location))
self.processor.grid(self.frame, self.store_points, self.store_radius, self.paused)
def run(self):
while True:
if not self.start_play:
self.read_frame(0)
self.rect_sel.draw(self.frame)
elif not self.paused:
self.ai_play()
cv.imshow("processed", self.frame)
k = cv.waitKey(5) & 0xFF
if k == 27:
break
if k == ord('p'):
print(len(self.store_points))
if k == ord('c'):
print("clean store coordinates!")
self.processor.store_coors = []
if k == ord('s'):
cv.imwrite('frame.png',self.frame)
print("frame saved")
if k == ord(' ') and self.store:
self.start_play = True
self.paused = not self.paused
if self.paused:
self.ai_play()
else:
durations = 1.4
while True:
self.read_frame(durations)
ai_loc = self.processor.store_coors[-1]
self.processor.grid(self.frame, self.store_points, self.store_radius, self.paused)
location = self.processor.store_coors[-1]
if ai_loc != location:
location, self.end, winner = self.player.game.play(location)
print("Human move:", location)
break
print("Human not found,trying..")
durations += 0.3
self.reset_store()
if self.end:
print("game end")
print("the winner is:", winner)
break
cv.destroyAllWindows()
if __name__ == '__main__':
try:
camera = 0
except:
camera = 1
App(camera).run()
|
StarcoderdataPython
|
6407277
|
"""
biped
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from mcedit2.rendering.chunkmeshes.entity.modelrenderer import ModelRenderer
log = logging.getLogger(__name__)
class ModelBiped(object):
textureWidth = 64
textureHeight = 32
def __init__(self, expandOffset=0.0, headOffset=0.0):
self.bipedHead = ModelRenderer(self, 0, 0)
self.bipedHead.addBox(-4.0, -8.0, -4.0, 8, 8, 8, expandOffset)
self.bipedHead.setCenterPoint(0.0, 0.0 + headOffset, 0.0)
self.bipedHeadwear = ModelRenderer(self, 32, 0)
self.bipedHeadwear.addBox(-4.0, -8.0, -4.0, 8, 8, 8, expandOffset + 0.5)
self.bipedHeadwear.setCenterPoint(0.0, 0.0 + headOffset, 0.0)
self.bipedBody = ModelRenderer(self, 16, 16)
self.bipedBody.addBox(-4.0, 0.0, -2.0, 8, 12, 4, expandOffset)
self.bipedBody.setCenterPoint(0.0, 0.0 + headOffset, 0.0)
self.bipedRightArm = ModelRenderer(self, 40, 16)
self.bipedRightArm.addBox(-3.0, -2.0, -2.0, 4, 12, 4, expandOffset)
self.bipedRightArm.setCenterPoint(-5.0, 2.0 + headOffset, 0.0)
self.bipedLeftArm = ModelRenderer(self, 40, 16)
self.bipedLeftArm.mirror = True
self.bipedLeftArm.addBox(-1.0, -2.0, -2.0, 4, 12, 4, expandOffset)
self.bipedLeftArm.setCenterPoint(5.0, 2.0 + headOffset, 0.0)
self.bipedRightLeg = ModelRenderer(self, 0, 16)
self.bipedRightLeg.addBox(-2.0, 0.0, -2.0, 4, 12, 4, expandOffset)
self.bipedRightLeg.setCenterPoint(-1.9, 12.0 + headOffset, 0.0)
self.bipedLeftLeg = ModelRenderer(self, 0, 16)
self.bipedLeftLeg.mirror = True
self.bipedLeftLeg.addBox(-2.0, 0.0, -2.0, 4, 12, 4, expandOffset)
self.bipedLeftLeg.setCenterPoint(1.9, 12.0 + headOffset, 0.0)
@property
def parts(self):
return [
self.bipedHead,
self.bipedHeadwear,
self.bipedBody,
self.bipedRightArm,
self.bipedLeftArm,
self.bipedRightLeg,
self.bipedLeftLeg
]
class ModelZombie(ModelBiped):
textureWidth = 64
textureHeight = 64
modelTexture = "assets/minecraft/textures/entity/zombie/zombie.png"
id = "Zombie"
class ModelPigZombie(ModelBiped):
textureWidth = 64
textureHeight = 64
modelTexture = "assets/minecraft/textures/entity/zombie_pigman.png"
id = "PigZombie"
class ModelSkeleton(ModelBiped):
modelTexture = "assets/minecraft/textures/entity/skeleton/skeleton.png"
id = "Skeleton"
def __init__(self, expandOffset=1.0, headOffset=0.0):
super(ModelSkeleton, self).__init__(expandOffset, headOffset)
self.bipedRightArm = ModelRenderer(self, 40, 16)
self.bipedRightArm.addBox(-1.0, -2.0, -1.0, 2, 12, 2, expandOffset)
self.bipedRightArm.setCenterPoint(-5.0, 2.0, 0.0)
self.bipedLeftArm = ModelRenderer(self, 40, 16)
self.bipedLeftArm.mirror = True
self.bipedLeftArm.addBox(-1.0, -2.0, -1.0, 2, 12, 2, expandOffset)
self.bipedLeftArm.setCenterPoint(5.0, 2.0, 0.0)
self.bipedRightLeg = ModelRenderer(self, 0, 16)
self.bipedRightLeg.addBox(-1.0, 0.0, -1.0, 2, 12, 2, expandOffset)
self.bipedRightLeg.setCenterPoint(-2.0, 12.0, 0.0)
self.bipedLeftLeg = ModelRenderer(self, 0, 16)
self.bipedLeftLeg.mirror = True
self.bipedLeftLeg.addBox(-1.0, 0.0, -1.0, 2, 12, 2, expandOffset)
self.bipedLeftLeg.setCenterPoint(2.0, 12.0, 0.0)
|
StarcoderdataPython
|
8113896
|
from __future__ import print_function, division
import signal, importlib, sys, logging, os
import click
from .version import __version__
logLevels = ['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'NOTSET']
@click.group()
@click.version_option(version=__version__)
def cli():
# to make this script/module behave nicely with unix pipes
# http://newbebweb.blogspot.com/2012/02/python-head-ioerror-errno-32-broken.html
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
@cli.command()
@click.option('--workspace', required=True, type=click.Path(),
help='A directory to place outputs into')
@click.option('--job-db', default=None, type=click.Path(),
help="Path to LSF job sqlite DB [default='<workspace>/job_queue.db']")
@click.option('--ruffus-history', default=None, type=click.Path(),
help="Path to LSF job sqlite DB [default='<workspace>/job_queue.db']")
@click.option('--log', default=sys.stderr, type=click.File('w'),
help="Path to write log details to [default=stdout]")
@click.option('--log-level', default='INFO', type=click.Choice(logLevels),
help='Log Level -- [default=INFO]')
@click.option('--input-vcfs', required=True, type=click.Path(exists=True),
help='A file of chromosomal VCFs to process')
@click.option('--project-name', default='yaps.default', type=click.STRING,
help='A prefix used to name batch jobs')
@click.option('--email', default=None, type=click.STRING,
help='An email used to notify about batch jobs [default=<EMAIL>]')
@click.option('--timeout', default=43200, type=click.INT,
help='Seconds to timeout for LSF job polling [default=43200 {12 hours}]')
@click.option('--config', default=None, type=click.Path(exists=True),
help='An alternative configuration file to test')
def postvqsr(job_db, ruffus_history, log, log_level, input_vcfs, project_name, email, workspace, timeout, config):
conf = importlib.import_module('yaps.configs.postvqsr')
conf.initialize(input_vcfs, project_name, email, workspace, timeout, config)
conf.dump_config()
logLevel = getattr(logging, log_level.upper())
if job_db is None:
job_db = os.path.join(
os.path.abspath(conf.config['workspace']),
'.job_queue.db'
)
if ruffus_history is None:
ruffus_history = os.path.join(
os.path.abspath(conf.config['workspace']),
'.ruffus_history.sqlite'
)
pipeline = importlib.import_module('yaps.pipelines.postvqsr')
pipeline.initialize(job_db, ruffus_history, log, logLevel, input_vcfs)
pipeline.log.info("LSF Job DB : {}".format(job_db))
pipeline.log.info("Ruffus History DB : {}".format(ruffus_history))
pipeline.run()
|
StarcoderdataPython
|
9721995
|
<reponame>ViolaBuddy/EscapeFromPlegia
import logging
import math
from app.data.database import DB
from app.engine import (action, combat_calcs, engine, equations, evaluate,
item_funcs, item_system, line_of_sight, pathfinding,
skill_system, target_system)
from app.engine.combat import interaction
from app.engine.game_state import game
from app.engine.movement import MovementManager
from app.utilities import utils
class AIController():
def __init__(self):
# Controls whether we should be skipping through the AI's turns
self.do_skip: bool = False
self.reset()
def skip(self):
self.do_skip = True
def end_skip(self):
self.do_skip = False
def reset(self):
self.unit = None
self.state = "Init"
self.behaviour_idx = 0
self.behaviour = None
self.inner_ai = None
self.did_something = False
self.move_ai_complete = False
self.attack_ai_complete = False
self.canto_ai_complete = False
def load_unit(self, unit):
self.reset()
self.unit = unit
def is_done(self):
return self.move_ai_complete and \
self.attack_ai_complete and self.canto_ai_complete
def clean_up(self):
self.goal_position = None
self.goal_item = None
self.goal_target = None
def set_next_behaviour(self):
behaviours = DB.ai.get(self.unit.ai).behaviours
if self.behaviour_idx < len(behaviours):
self.behaviour = behaviours[self.behaviour_idx]
self.behaviour_idx += 1
else:
self.behaviour = None
self.behaviour_idx = 0
def get_behaviour(self):
return self.behaviour
def act(self):
logging.info("AI Act!")
change = False
if not self.move_ai_complete:
if self.think():
change = self.move()
self.move_ai_complete = True
elif not self.attack_ai_complete:
change = self.attack()
self.attack_ai_complete = True
elif not self.canto_ai_complete:
if self.unit.has_attacked and skill_system.has_canto(self.unit, None):
self.canto_retreat()
change = self.move()
self.canto_ai_complete = True
return self.did_something, change
def move(self):
if self.goal_position and self.goal_position != self.unit.position:
path = target_system.get_path(self.unit, self.goal_position)
# if self.unit.has_attacked:
# self.unit.wait()
game.state.change('movement')
action.do(action.Move(self.unit, self.goal_position, path))
return True
else:
return False
def attack(self):
# Attacking or supporting
if self.goal_target: # Target is a position tuple
if self.goal_item and self.goal_item in item_funcs.get_all_items(self.unit):
self.unit.equip(self.goal_item)
# Highlights
if item_system.is_weapon(self.unit, self.goal_item):
game.highlight.remove_highlights()
splash_positions = item_system.splash_positions(self.unit, self.goal_item, self.goal_target)
game.highlight.display_possible_attacks({self.goal_target})
game.highlight.display_possible_attacks(splash_positions, light=True)
elif item_system.is_spell(self.unit, self.goal_item):
game.highlight.remove_highlights()
splash_positions = item_system.splash_positions(self.unit, self.goal_item, self.goal_target)
game.highlight.display_possible_spell_attacks({self.goal_target})
game.highlight.display_possible_spell_attacks(splash_positions, light=True)
# Used for steal
if item_system.targets_items(self.unit, self.goal_item):
# Choose most expensive item that is legal
target = game.board.get_unit(self.goal_target)
legal_items = [item for item in target.items if item_system.item_restrict(self.unit, self.goal_item, target, item)]
items = sorted(legal_items, key=lambda x: item_system.sell_price(self.unit, x) or 0)
self.goal_item.data['target_item'] = items[-1]
# Combat
interaction.start_combat(self.unit, self.goal_target, self.goal_item, ai_combat=True, skip=self.do_skip)
return True
# Interacting with regions
elif self.goal_position and self.behaviour and self.behaviour.action == 'Interact':
# Get region
region = None
for r in game.level.regions:
if r.contains(self.goal_position) and r.region_type == 'event' and r.sub_nid == self.behaviour.target_spec:
try:
if not r.condition or evaluate.evaluate(r.condition, self.unit, position=self.goal_position):
region = r
break
except:
logging.warning("Could not evaluate region conditional %s" % r.condition)
if region:
did_trigger = game.events.trigger(region.sub_nid, self.unit, position=self.unit.position, region=region)
if did_trigger and region.only_once:
action.do(action.RemoveRegion(region))
if did_trigger:
action.do(action.HasAttacked(self.unit))
return True
return False
def canto_retreat(self):
valid_positions = self.get_true_valid_moves()
enemy_positions = {u.position for u in game.units if u.position and skill_system.check_enemy(self.unit, u)}
self.goal_position = utils.farthest_away_pos(self.unit.position, valid_positions, enemy_positions)
def smart_retreat(self) -> bool:
valid_positions = self.get_true_valid_moves()
target_positions = get_targets(self.unit, self.behaviour)
zero_move = max(target_system.find_potential_range(self.unit, True, True), default=0)
single_move = zero_move + equations.parser.movement(self.unit)
double_move = single_move + equations.parser.movement(self.unit)
target_positions = {(pos, utils.calculate_distance(self.unit.position, pos)) for pos in target_positions}
if self.behaviour.view_range == -4:
pass
elif self.behaviour.view_range == -3:
target_positions = {(pos, mag) for pos, mag in target_positions if mag < double_move}
elif self.behaviour.view_range == -2:
target_positions = {(pos, mag) for pos, mag in target_positions if mag < single_move}
elif self.behaviour.view_range == -1:
target_positions = {(pos, mag) for pos, mag in target_positions if mag < zero_move}
else:
target_positions = {(pos, mag) for pos, mag in target_positions if mag < self.view_range}
if target_positions and len(valid_positions) > 1:
self.goal_position = utils.smart_farthest_away_pos(self.unit.position, valid_positions, target_positions)
return True
else:
return False
def get_true_valid_moves(self) -> set:
valid_moves = target_system.get_valid_moves(self.unit)
other_unit_positions = {unit.position for unit in game.units if unit.position and unit is not self.unit}
valid_moves -= other_unit_positions
return valid_moves
def think(self):
time = engine.get_time()
success = False
self.did_something = False
orig_pos = self.unit.position
logging.info("*** AI Thinking... ***")
while True:
# Can spend up to half a frame thinking
over_time = engine.get_true_time() - time >= 8
logging.info("Current State: %s", self.state)
if self.state == 'Init':
self.start_time = engine.get_time()
logging.info("Starting AI with nid: %s, position: %s, class: %s, AI: %s", self.unit.nid, self.unit.position, self.unit.klass, self.unit.ai)
self.clean_up()
# Get next behaviour
self.set_next_behaviour()
if self.behaviour:
logging.info(self.behaviour.action)
if self.behaviour.action == "None":
pass # Try again
elif self.behaviour.action == "Attack":
self.inner_ai = self.build_primary()
self.state = "Primary"
elif self.behaviour.action == "Support":
self.inner_ai = self.build_primary()
self.state = "Primary"
elif self.behaviour.action == 'Steal':
self.inner_ai = self.build_primary()
self.state = "Primary"
elif self.behaviour.action == 'Interact':
self.inner_ai = self.build_secondary()
self.state = "Secondary"
elif self.behaviour.action == 'Move_to':
self.inner_ai = self.build_secondary()
self.state = "Secondary"
elif self.behaviour.action == "Move_away_from":
success = self.smart_retreat()
if success:
self.state = "Done"
else:
self.state = "Init" # Try another behaviour
else:
self.state = 'Done'
elif self.state == 'Primary':
done, self.goal_target, self.goal_position, self.goal_item = self.inner_ai.run()
if done:
if self.goal_target:
self.ai_group_ping()
success = True
self.state = "Done"
else:
self.inner_ai = self.build_secondary()
self.state = "Secondary" # Try secondary
elif over_time:
# Make sure to quick move back so that the in-between frames aren't flickering around
self.inner_ai.quick_move(self.inner_ai.orig_pos)
elif self.state == 'Secondary':
done, self.goal_position = self.inner_ai.run()
if done:
if self.goal_position:
if self.goal_position != self.unit.position:
self.ai_group_ping()
success = True
self.state = "Done"
else:
self.state = "Init" # Try another behaviour
if self.state == 'Done':
self.did_something = success
self.state = 'Init'
return True
if over_time:
break
return False
def ai_group_ping(self):
ai_group = self.unit.ai_group
if not ai_group:
return
for unit in game.units:
if unit.team == self.unit.team and unit.ai_group == ai_group:
if not unit._has_moved and not unit._has_attacked:
unit.has_run_ai = False # So it can be run through the AI state again
if not unit.ai_group_active:
action.do(action.AIGroupPing(unit))
def build_primary(self):
if self.behaviour.view_range == -1: # Guard AI
valid_moves = {self.unit.position}
else:
valid_moves = self.get_true_valid_moves()
return PrimaryAI(self.unit, valid_moves, self.behaviour)
def build_secondary(self):
return SecondaryAI(self.unit, self.behaviour)
class PrimaryAI():
def __init__(self, unit, valid_moves, behaviour):
self.max_tp = 0
self.unit = unit
self.orig_pos = self.unit.position
self.orig_item = self.unit.items[0] if self.unit.items else None
self.behaviour = behaviour
if self.behaviour.action == "Attack":
self.items = [item for item in item_funcs.get_all_items(self.unit) if
item_funcs.available(self.unit, item)]
self.extra_abilities = skill_system.get_extra_abilities(self.unit)
for ability in self.extra_abilities.values():
self.items.append(ability)
elif self.behaviour.action == 'Support':
self.items = [item for item in item_funcs.get_all_items(self.unit) if
item_funcs.available(self.unit, item)]
self.extra_abilities = skill_system.get_extra_abilities(self.unit)
for ability in self.extra_abilities.values():
self.items.append(ability)
elif self.behaviour.action == 'Steal':
self.items = []
self.extra_abilities = skill_system.get_extra_abilities(self.unit)
for ability in self.extra_abilities.values():
if ability.name == 'Steal':
self.items.append(ability)
self.behaviour_targets = get_targets(self.unit, self.behaviour)
logging.info("Testing Items: %s", self.items)
self.item_index = 0
self.move_index = 0
self.target_index = 0
self.valid_moves = list(valid_moves)
self.best_target = None
self.best_position = None
self.best_item = None
self.item_setup()
def item_setup(self):
if self.item_index < len(self.items):
logging.info("Testing %s" % self.items[self.item_index])
self.unit.equip(self.items[self.item_index])
self.get_all_valid_targets()
self.possible_moves = self.get_possible_moves()
logging.info(self.possible_moves)
def get_valid_targets(self, unit, item, valid_moves) -> list:
item_range = item_funcs.get_range(unit, item)
ai_targets = item_system.ai_targets(unit, item)
if len(ai_targets) < 20:
logging.info("AI Targets: %s", ai_targets)
filtered_targets = set()
for pos in ai_targets:
for valid_move in valid_moves:
# Determine if we can hit this unit at one of our moves
if (utils.calculate_distance(pos, valid_move) in item_range) and \
(not DB.constants.value('ai_fog_of_war') or game.board.in_vision(pos, self.unit.team)):
filtered_targets.add(pos)
break
return list(filtered_targets)
def get_all_valid_targets(self):
item = self.items[self.item_index]
logging.info("Determining targets for item: %s", item)
self.valid_targets = self.get_valid_targets(self.unit, item, self.valid_moves)
# Only if we already have some legal targets (ie, ourself)
if self.valid_targets and 0 in item_funcs.get_range(self.unit, item):
self.valid_targets += self.valid_moves # Hack to target self in all valid positions
self.valid_targets = list(set(self.valid_targets)) # Only uniques
logging.info("Valid Targets: %s", self.valid_targets)
def get_possible_moves(self) -> list:
if self.target_index < len(self.valid_targets) and self.item_index < len(self.items):
# Given an item and a target, find all positions in valid_moves that I can strike the target at.
item = self.items[self.item_index]
target = self.valid_targets[self.target_index]
a = target_system.find_manhattan_spheres(item_funcs.get_range(self.unit, item), *target)
b = set(self.valid_moves)
return list(a & b)
else:
return []
def quick_move(self, move):
game.leave(self.unit, test=True)
self.unit.position = move
game.arrive(self.unit, test=True)
def run(self):
if self.item_index >= len(self.items):
self.quick_move(self.orig_pos)
if self.orig_item:
self.unit.equip(self.orig_item)
return (True, self.best_target, self.best_position, self.best_item)
elif self.target_index >= len(self.valid_targets):
self.target_index = 0
self.item_index += 1
self.item_setup()
elif self.move_index >= len(self.possible_moves):
self.move_index = 0
self.target_index += 1
self.possible_moves = self.get_possible_moves()
else:
target = self.valid_targets[self.target_index]
item = self.items[self.item_index]
# If too many legal targets, just try for the best move first
# Otherwise it spends way too long trying every possible position to strike from
if len(self.valid_targets) > 10:
enemy_positions = {u.position for u in game.units if u.position and skill_system.check_enemy(self.unit, u)}
move = utils.farthest_away_pos(self.orig_pos, self.possible_moves, enemy_positions)
else:
move = self.possible_moves[self.move_index]
if self.unit.position != move:
self.quick_move(move)
# Check line of sight
line_of_sight_flag = True
if DB.constants.value('line_of_sight'):
max_item_range = max(item_funcs.get_range(self.unit, item))
valid_targets = line_of_sight.line_of_sight([move], [target], max_item_range)
if not valid_targets:
line_of_sight_flag = False
if line_of_sight_flag:
self.determine_utility(move, target, item)
self.move_index += 1
# If too many legal targets, do not bother with every possible move
if len(self.valid_targets) > 10:
self.move_index = len(self.possible_moves)
# Not done yet
return (False, self.best_target, self.best_position, self.best_item)
def determine_utility(self, move, target, item):
tp = 0
main_target_pos, splash = item_system.splash(self.unit, item, target)
if item_system.target_restrict(self.unit, item, main_target_pos, splash):
tp = self.compute_priority(main_target_pos, splash, move, item)
unit = game.board.get_unit(target)
# Don't target self if I've already moved and I'm not targeting my new position
if unit is self.unit and target != self.unit.position:
return
if unit:
name = unit.nid
else:
name = '--'
logging.info("Choice %.5f - Weapon: %s, Position: %s, Target: %s, Target Position: %s", tp, item, move, name, target)
if tp > self.max_tp:
self.best_target = target
self.best_position = move
self.best_item = item
self.max_tp = tp
def compute_priority(self, main_target_pos, splash, move, item) -> float:
tp = 0
main_target = game.board.get_unit(main_target_pos)
# Only count main target if it's one of the legal targets
if main_target and main_target_pos in self.behaviour_targets:
ai_priority = item_system.ai_priority(self.unit, item, main_target, move)
# If no ai priority hook defined
if ai_priority is None:
pass
else:
tp += ai_priority
if item_system.damage(self.unit, item) is not None and \
skill_system.check_enemy(self.unit, main_target):
ai_priority = self.default_priority(main_target, item, move)
tp += ai_priority
for splash_pos in splash:
target = game.board.get_unit(splash_pos)
# Only count splash target if it's one of the legal targets
if not target or splash_pos not in self.behaviour_targets:
continue
ai_priority = item_system.ai_priority(self.unit, item, main_target, move)
if ai_priority is None:
pass
else:
tp += ai_priority
if item_system.damage(self.unit, item):
accuracy = utils.clamp(combat_calcs.compute_hit(self.unit, target, item, target.get_weapon(), "attack")/100., 0, 1)
raw_damage = combat_calcs.compute_damage(self.unit, target, item, target.get_weapon(), "attack")
lethality = utils.clamp(raw_damage / float(target.get_hp()), 0, 1)
ai_priority = 3 if lethality * accuracy >= 1 else lethality * accuracy
if skill_system.check_enemy(self.unit, target):
tp += ai_priority
elif skill_system.check_ally(self.unit, target):
tp -= ai_priority
return tp
def default_priority(self, main_target, item, move):
# Default method
terms = []
offense_term = 0
defense_term = 1
raw_damage = combat_calcs.compute_damage(self.unit, main_target, item, main_target.get_weapon(), "attack")
crit_damage = combat_calcs.compute_damage(self.unit, main_target, item, main_target.get_weapon(), "attack", crit=True)
# Damage I do compared to target's current hp
lethality = utils.clamp(raw_damage / float(main_target.get_hp()), 0, 1)
crit_lethality = utils.clamp(crit_damage / float(main_target.get_hp()), 0, 1)
# Accuracy
hit_comp = combat_calcs.compute_hit(self.unit, main_target, item, main_target.get_weapon(), "attack")
if hit_comp:
accuracy = utils.clamp(hit_comp/100., 0, 1)
else:
accuracy = 0
crit_comp = combat_calcs.compute_crit(self.unit, main_target, item, main_target.get_weapon(), "attack")
if crit_comp:
crit_accuracy = utils.clamp(crit_comp/100., 0, 1)
else:
crit_accuracy = 0
# Determine if I would get countered
# Even if I wouldn't get countered, check anyway how much damage I would take
target_weapon = main_target.get_weapon()
target_damage = combat_calcs.compute_damage(main_target, self.unit, target_weapon, item, "defense")
if not target_damage:
target_damage = 0
target_damage = utils.clamp(target_damage/main_target.get_hp(), 0, 1)
target_accuracy = combat_calcs.compute_hit(main_target, self.unit, target_weapon, item, "defense")
if not target_accuracy:
target_accuracy = 0
target_accuracy = utils.clamp(target_accuracy/100., 0, 1)
# If I wouldn't get counterattacked, much less important, so multiply by 10 %
if not combat_calcs.can_counterattack(self.unit, item, main_target, target_weapon):
target_damage *= 0.3
target_accuracy *= 0.3
num_attacks = combat_calcs.outspeed(self.unit, main_target, item, target_weapon, "attack")
first_strike = lethality * accuracy if lethality >= 1 else 0
if num_attacks > 1 and target_damage >= 1:
# Calculate chance I actually get to strike more than once
num_attacks -= (target_accuracy * (1 - first_strike))
offense_term += 3 if lethality * accuracy >= 1 else lethality * accuracy * num_attacks
crit_term = (crit_lethality - lethality) * crit_accuracy * accuracy * num_attacks
offense_term += crit_term
defense_term -= target_damage * target_accuracy * (1 - first_strike)
if offense_term <= 0:
if lethality > 0 and DB.constants.value('attack_zero_hit'):
logging.info("Accuracy is bad, but continuing with stupid AI")
elif accuracy > 0 and DB.constants.value('attack_zero_dam'):
logging.info("Zero Damage, but continuing with stupid AI")
else:
logging.info("Offense: %.2f, Defense: %.2f", offense_term, defense_term)
return 0
# Only here to break ties
# Tries to minimize how far the unit should move
max_distance = equations.parser.movement(self.unit)
if max_distance > 0:
distance_term = (max_distance - utils.calculate_distance(move, self.orig_pos)) / float(max_distance)
else:
distance_term = 1
logging.info("Damage: %.2f, Accuracy: %.2f, Crit Accuracy: %.2f", lethality, accuracy, crit_accuracy)
logging.info("Offense: %.2f, Defense: %.2f, Distance: %.2f", offense_term, defense_term, distance_term)
ai_prefab = DB.ai.get(self.unit.ai)
offense_bias = ai_prefab.offense_bias
offense_weight = offense_bias * (1 / (offense_bias + 1))
defense_weight = 1 - offense_weight
terms.append((offense_term, offense_weight))
terms.append((defense_term, defense_weight))
terms.append((distance_term, .0001))
return utils.process_terms(terms)
def handle_unit_spec(all_targets, behaviour):
target_spec = behaviour.target_spec
if not target_spec:
return all_targets
invert = bool(behaviour.invert_targeting)
# Uses ^ (which is XOR) to handle inverting the targeting
if target_spec[0] == "Tag":
all_targets = [pos for pos in all_targets if bool(target_spec[1] in game.board.get_unit(pos).tags) ^ invert]
elif target_spec[0] == "Class":
all_targets = [pos for pos in all_targets if bool(game.board.get_unit(pos).klass == target_spec[1]) ^ invert]
elif target_spec[0] == "Name":
all_targets = [pos for pos in all_targets if bool(game.board.get_unit(pos).name == target_spec[1]) ^ invert]
elif target_spec[0] == 'Faction':
all_targets = [pos for pos in all_targets if bool(game.board.get_unit(pos).faction == target_spec[1]) ^ invert]
elif target_spec[0] == 'Party':
all_targets = [pos for pos in all_targets if bool(game.board.get_unit(pos).party == target_spec[1]) ^ invert]
elif target_spec[0] == 'ID':
all_targets = [pos for pos in all_targets if bool(game.board.get_unit(pos).nid == target_spec[1]) ^ invert]
return all_targets
def get_targets(unit, behaviour):
all_targets = []
if behaviour.target == 'Unit':
all_targets = [u.position for u in game.units if u.position]
elif behaviour.target == 'Enemy':
all_targets = [u.position for u in game.units if u.position and skill_system.check_enemy(unit, u)]
elif behaviour.target == 'Ally':
all_targets = [u.position for u in game.units if u.position and skill_system.check_ally(unit, u)]
elif behaviour.target == 'Event':
target_spec = behaviour.target_spec
all_targets = []
for region in game.level.regions:
try:
if region.region_type == 'event' and region.sub_nid == target_spec and (not region.condition or evaluate.evaluate(region.condition, unit)):
all_targets += region.get_all_positions()
except:
logging.warning("Region Condition: Could not parse %s" % region.condition)
all_targets = list(set(all_targets)) # Remove duplicates
elif behaviour.target == 'Position':
if behaviour.target_spec == "Starting":
if unit.starting_position:
all_targets = [unit.starting_position]
else:
all_targets = []
else:
all_targets = [tuple(behaviour.target_spec)]
if behaviour.target in ('Unit', 'Enemy', 'Ally'):
all_targets = handle_unit_spec(all_targets, behaviour)
if behaviour.target != 'Position':
if DB.constants.value('ai_fog_of_war'):
all_targets = [pos for pos in all_targets if game.board.in_vision(pos, unit.team)]
return all_targets
class SecondaryAI():
def __init__(self, unit, behaviour):
self.unit = unit
self.behaviour = behaviour
self.view_range = self.behaviour.view_range
if self.view_range == -4 or self.unit.ai_group_active:
self.view_range = -3 # Try this first
self.available_targets = []
# Determine all targets
self.all_targets = get_targets(self.unit, behaviour)
self.zero_move = max(target_system.find_potential_range(self.unit, True, True), default=0)
self.single_move = self.zero_move + equations.parser.movement(self.unit)
self.double_move = self.single_move + equations.parser.movement(self.unit)
movement_group = MovementManager.get_movement_group(self.unit)
self.grid = game.board.get_grid(movement_group)
self.pathfinder = \
pathfinding.AStar(self.unit.position, None, self.grid,
game.tilemap.width, game.tilemap.height,
self.unit.team, skill_system.pass_through(self.unit),
DB.constants.value('ai_fog_of_war'))
self.widen_flag = False # Determines if we've widened our search
self.reset()
def reset(self):
self.max_tp = 0
self.best_target = 0
self.best_path = None
limit = self.get_limit()
self.available_targets = [t for t in self.all_targets if utils.calculate_distance(self.unit.position, t) <= limit]
self.best_position = None
def get_limit(self) -> int:
# Make sure we don't exceed double move
if self.widen_flag or self.view_range == -4:
limit = 99
elif self.view_range == -3:
limit = self.double_move
elif self.view_range == -2:
if self.behaviour.action in ('Attack', 'Support', 'Steal'):
limit = -1 # Because the primary AI should have already taken care of this...
else:
limit = self.single_move
elif self.view_range == -1:
limit = -1
else:
limit = self.view_range
return limit
def run(self):
if self.available_targets:
target = self.available_targets.pop()
# Find a path to the target
path = self.get_path(target)
if not path:
logging.info("No valid path to %s.", target)
return False, None
# We found a path
tp = self.compute_priority(target, len(path))
logging.info("Path to %s. -- %s", target, tp)
if tp > self.max_tp:
self.max_tp = tp
self.best_target = target
self.best_path = path
elif self.best_target:
self.best_position = target_system.travel_algorithm(self.best_path, self.unit.movement_left, self.unit, self.grid)
logging.info("Best Target: %s", self.best_target)
logging.info("Best Position: %s", self.best_position)
return True, self.best_position
else:
if (self.behaviour.view_range == -4 or self.unit.ai_group_active) and not self.widen_flag:
logging.info("Widening search!")
self.widen_flag = True
self.view_range = -4
self.available_targets = [t for t in self.all_targets if t not in self.available_targets]
else:
return True, None
return False, None
def get_path(self, goal_pos):
self.pathfinder.set_goal_pos(goal_pos)
if self.behaviour.target == 'Event':
adj_good_enough = False
else:
adj_good_enough = True
limit = self.get_limit()
path = self.pathfinder.process(game.board, adj_good_enough=adj_good_enough, ally_block=False, limit=limit)
self.pathfinder.reset()
return path
def default_priority(self, enemy):
hp_max = equations.parser.hitpoints(enemy)
weakness_term = float(hp_max - enemy.get_hp()) / hp_max
items = [item for item in item_funcs.get_all_items(self.unit) if
item_funcs.available(self.unit, item)]
terms = []
tp, highest_damage_term, highest_status_term = 0, 0, 0
for item in items:
status_term = 1 if item.status_on_hit else 0
true_damage = 0
if item_system.is_weapon(self.unit, item) or item_system.is_spell(self.unit, item):
raw_damage = combat_calcs.compute_damage(self.unit, enemy, item, enemy.get_weapon(), 'attack')
hit = utils.clamp(combat_calcs.compute_hit(self.unit, enemy, item, enemy.get_weapon(), 'attack')/100., 0, 1)
if raw_damage:
true_damage = raw_damage * hit
else:
true_damage = 0
if true_damage <= 0 and status_term <= 0:
continue # If no damage could be dealt, ignore
damage_term = min(float(true_damage / hp_max), 1.)
new_tp = damage_term + status_term/2
if new_tp > tp:
tp = new_tp
highest_damage_term = damage_term
highest_status_term = status_term
if highest_status_term == 0 and highest_damage_term == 0:
# Just don't include any of this
return terms
terms.append((highest_damage_term, 15))
terms.append((highest_status_term, 10))
terms.append((weakness_term, 15))
return terms
def compute_priority(self, target, distance=0):
terms = []
if distance:
distance_term = 1 - math.log(distance)/4.
else:
target_distance = utils.calculate_distance(self.unit.position, target)
distance_term = 1 - math.log(target_distance)/4.
terms.append((distance_term, 60))
enemy = game.board.get_unit(target)
if self.behaviour.action == "Attack" and enemy:
new_terms = self.default_priority(enemy)
if new_terms:
terms += new_terms
else:
return 0
elif self.behaviour.action == "Steal" and enemy:
return 0 # TODO: For now, Steal just won't work with secondary AI
else:
pass
return utils.process_terms(terms)
|
StarcoderdataPython
|
3381413
|
<reponame>JIABI/GhostShiftAddNet
import torch
try:
import unoptimized_cuda
except:
print("Unable to import CUDA unoptimized kernels")
def linear(input, weight, bias):
out = torch.zeros([input.size(0), weight.size(0)], dtype=torch.float, device=torch.device('cuda:0'))
if bias is not None:
unoptimized_cuda.UNOPTIMIZED_LINEAR(input, weight, bias, out)
else:
temp = torch.zeros([weight.size(0)], dtype=torch.float, device=torch.device('cuda:0'))
unoptimized_cuda.UNOPTIMIZED_LINEAR(input, weight, temp, out)
return out
def conv2d(input, weight, bias, stride, padding):
if len(stride) == 1:
strides_h = stride[0]
strides_w = stride[0]
else:
strides_h = stride[0]
strides_w = stride[1]
out_height = int((input.size(2) - weight.size(2)) / strides_h +1)
out_width = int((input.size(3) - weight.size(3)) / strides_w +1)
out = torch.zeros([input.size(0), weight.size(0), out_height, out_width], dtype=torch.float, device=torch.device('cuda:0'))
if bias is not None:
unoptimized_cuda.UNOPTIMIZED_CONV(input, weight, bias, out, stride, padding )
else:
temp = torch.zeros([weight.size(0)], dtype=torch.float, device=torch.device('cuda:0'))
unoptimized_cuda.UNOPTIMIZED_CONV(input, weight, temp, out, stride, padding )
return out
|
StarcoderdataPython
|
1978335
|
# -*- coding: utf-8 -*-
import glob, os, json, pickle
import pandas as pd
import numpy as np
from scipy import ones,arange,floor
from sklearn.linear_model import SGDClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import precision_score, classification_report
from sklearn.pipeline import Pipeline
from manifesto_data import get_manifesto_texts
# manifestoproject codes for left/right orientation
label2rightleft = {
'right': [104,201,203,305,401,402,407,414,505,601,603,605,606],
'left': [103,105,106,107,403,404,406,412,413,504,506,701,202]
}
# manifestoproject codes (integer divided by 100) for political domain
label2domain = {
'External Relations':1,
'Freedom and Democracy':2,
'Political System':3,
'Economy':4,
'Welfare and Quality of Life':5,
'Fabric of Society':6
}
# pd.concat([df,clf.predictBatch(df.message.fillna(''))])
def manifestolabels(folder = "data/manifesto"):
lines = open(folder+"/manifestolabels.txt").readlines()
return dict(map(lambda x: (int(x[3:6]), x[8:-2]),lines))
class Classifier:
def __init__(self,train=False):
'''
Creates a classifier object
if no model is found, or train is set True, a new classifier is learned
INPUT
folder the root folder with the raw text data, where the model is stored
train set True if you want to train
'''
# if there is no classifier file or training is invoked
if (not os.path.isfile('classifier.pickle')) or train:
print('Training classifier')
self.train()
print('Loading classifier')
self.clf = pickle.load(open('classifier.pickle','rb'))
def predict(self,text):
'''
Uses scikit-learn Bag-of-Word extractor and classifier and
applies it to some text.
INPUT
text a string to assign to a manifestoproject label
'''
if (not type(text) is list) & (len(text)<3):
return nullPrediction()
# make it a list, if it is a string
if not type(text) is list: text = [text]
# predict probabilities
text = ["".join([x for x in t if not x.isdigit()]) for t in text]
probabilities = self.clf.predict_proba(text).flatten()
predictionsManifestocode = dict(zip(self.clf.classes_, probabilities.tolist()))
predictionsDomain = {l:sum(probabilities[np.floor(self.clf.classes_/100) == idx]) for l,idx in label2domain.items()}
predictionsRight = sum([p for l,p in predictionsManifestocode.items() if l in label2rightleft['right']])
predictionsLeft = sum([p for l,p in predictionsManifestocode.items() if l in label2rightleft['left']])
# transform the predictions into json output
return {
'leftright':{'right':predictionsRight,'left':predictionsLeft},
'domain':predictionsDomain,
'manifestocode':{mc[x[0]]:x[1] for x in predictionsManifestocode.items()}
}
def predictBatch(self,texts):
'''
Uses scikit-learn Bag-of-Word extractor and classifier and
applies it to some text.
INPUT
text a string to assign to a manifestoproject label
'''
mc = manifestolabels()
df = pd.DataFrame(self.clf.predict_proba(texts),columns=self.clf.classes_)
mcCols = df.columns
valid_right = list(set(label2rightleft['right']).intersection(set(mcCols)))
valid_left = list(set(label2rightleft['left']).intersection(set(mcCols)))
df['right'] = df[valid_right].sum(axis=1)
df['left'] = df[valid_left].sum(axis=1)
for dom,domIdx in label2domain.items():
df[dom] = df[mcCols[floor(mcCols/100)==domIdx]].sum(axis=1)
return df.rename(index=str,columns=mc)
def train(self,folds = 2, validation_ratio = 0.5, precision_threshold = 0.1):
'''
trains a classifier on the bag of word vectors
INPUT
folds number of cross-validation folds for model selection
'''
try:
# load the data
data,labels = get_manifesto_texts()
except:
print('Could not load text data file in\n')
raise
# the manifesto codes
mc = manifestolabels()
# set some data aside for cross-validation
train_data, test_data, train_labels, test_labels = train_test_split(data, labels, test_size=validation_ratio)
# the scikit learn pipeline for vectorizing, normalizing and classifying text
text_clf = Pipeline([('vect', TfidfVectorizer()),
('clf',SGDClassifier(loss="log",n_jobs=-1,n_iter=5))])
# tried many more hyperparameters, these worked best
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__max_df': [.2],
'clf__alpha': (np.logspace(-6, -4, 4)).tolist()
}
# perform gridsearch to get the best regularizer
gs_clf = GridSearchCV(text_clf, parameters, cv=folds, n_jobs=-1,verbose=4)
gs_clf.fit(train_data,train_labels)
test_predictions = gs_clf.predict(test_data)
with open("classification_report.txt",'w') as fh:
fh.write(classification_report(test_predictions, test_labels))
unique_labels = np.unique(labels)
# compute precisions for each manifesto label
precisions = dict(zip(unique_labels, precision_score(test_predictions, test_labels, labels=unique_labels, average=None)))
too_bad = [l for l,s in precisions.items() if s < precision_threshold]
print("Discarding %d labels with precisions below %f: %s"%(len(too_bad), precision_threshold, "\n".join([mc[l] for l in too_bad])))
# if manifesto code cannot be predicted with sufficient precision,
# don't try to predict it - so we're discarding the respective data points
data, labels = zip(*[(t,l) for t,l in zip(data,labels) if precisions[l] > precision_threshold])
# fit again on all data points but only with best params
# gs_clf = GridSearchCV(text_clf, params_, 'precision_weighted', cv=folds, n_jobs=-1,verbose=4)
gs_clf.best_estimator_.fit(data,labels)
# dump classifier to pickle
pickle.dump(gs_clf.best_estimator_,open('classifier.pickle','wb'))
|
StarcoderdataPython
|
188169
|
<filename>models/search_result.py
# coding: utf-8
from __future__ import absolute_import
from .base_model_ import Model
from . import util
class SearchResult(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, name=None, full_name=None): # noqa: E501
"""SearchResult - a model defined in Swagger
:param name: The name of this SearchResult. # noqa: E501
:type name: str
:param full_name: The full_name of this SearchResult. # noqa: E501
:type full_name: str
"""
self.swagger_types = {
'name': str,
'full_name': str
}
self.attribute_map = {
'name': 'name',
'full_name': 'fullName'
}
self._name = name
self._full_name = full_name
@classmethod
def from_dict(cls, dikt):
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The SearchResult of this SearchResult. # noqa: E501
:rtype: SearchResult
"""
return util.deserialize_model(dikt, cls)
@property
def name(self):
"""Gets the name of this SearchResult.
:return: The name of this SearchResult.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this SearchResult.
:param name: The name of this SearchResult.
:type name: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def full_name(self):
"""Gets the full_name of this SearchResult.
A fully-describing name of the package # noqa: E501
:return: The full_name of this SearchResult.
:rtype: str
"""
return self._full_name
@full_name.setter
def full_name(self, full_name):
"""Sets the full_name of this SearchResult.
A fully-describing name of the package # noqa: E501
:param full_name: The full_name of this SearchResult.
:type full_name: str
"""
if full_name is None:
raise ValueError("Invalid value for `full_name`, must not be `None`") # noqa: E501
self._full_name = full_name
|
StarcoderdataPython
|
3316719
|
import re
subst = re.compile("(%\((\w+)\))")
def substitute_str(text, vars):
out = []
i0 = 0
for m in subst.finditer(text):
name = m.group(2)
if name in vars:
out.append(text[i0:m.start(1)])
out.append(str(vars[name]))
i0 = m.end(1)
out.append(text[i0:])
return "".join(out)
def substitute_list(lst, vars):
return [substitute_in(item, vars) for item in lst]
def substitute_dict(d, outer):
vars = {}
vars.update(outer)
# substitute top level strings only
out = {k:substitute_str(v, outer) for k, v in d.items() if isinstance(v, (str, int))}
# use this as the substitution dictionary
vars.update(out)
out.update({k:substitute_in(v, vars) for k, v in d.items()}
return out
def substitute_in(item, vars)
if isinstance(item, str):
item = substitute_str(item, vars)
elif isinstance(item, dict):
item = substitute_dict(item, vars)
elif isinstance(item, list):
item = substitute_list(item, vars)
return item
def preprocess(s, vars={}):
return substitute_in(s, vars)
|
StarcoderdataPython
|
3296111
|
import time
from .utils import make_graph, preprocess_features, run_pic
class PIC:
"""Class to perform Power Iteration Clustering on a graph of nearest neighbors.
Args:
args: for consistency with k-means init
sigma (float): bandwith of the Gaussian kernel (default 0.2)
nnn (int): number of nearest neighbors (default 5)
alpha (float): parameter in PIC (default 0.001)
distribute_singletons (bool): If True, reassign each singleton to
the cluster of its closest non
singleton nearest neighbors (up to nnn
nearest neighbors).
Attributes:
images_lists (list of list): for each cluster, the list of image indexes
belonging to this cluster
"""
def __init__(
self, args=None, sigma=0.2, nnn=5, alpha=0.001, distribute_singletons=True
):
self.sigma = sigma
self.alpha = alpha
self.nnn = nnn
self.distribute_singletons = distribute_singletons
def cluster(self, data, verbose=False):
start = time.time()
# preprocess the data
xb = preprocess_features(data)
# construct nnn graph
I, D = make_graph(xb, self.nnn)
# run PIC
clust = run_pic(I, D, self.sigma, self.alpha)
images_lists = {}
for h in set(clust):
images_lists[h] = []
for data, c in enumerate(clust):
images_lists[c].append(data)
# allocate singletons to clusters of their closest NN not singleton
if self.distribute_singletons:
clust_NN = {}
for i in images_lists:
# if singleton
if len(images_lists[i]) == 1:
s = images_lists[i][0]
# for NN
for n in I[s, 1:]:
# if NN is not a singleton
if not len(images_lists[clust[n]]) == 1:
clust_NN[s] = n
break
for s in clust_NN:
del images_lists[clust[s]]
clust[s] = clust[clust_NN[s]]
images_lists[clust[s]].append(s)
self.images_lists = []
for c in images_lists:
self.images_lists.append(images_lists[c])
# if verbose:
# print('pic time: {0:.0f} s'.format(time.time() - start))
return 0
|
StarcoderdataPython
|
11325239
|
from django.conf import settings
from django.shortcuts import render
def home(request):
context = {
'google_street_view_api_key': settings.GOOGLE_STREET_VIEW_API_KEY
}
return render(request, 'layers/home.html', context=context)
|
StarcoderdataPython
|
164374
|
# this file is required to get the pytest working with relative imports
|
StarcoderdataPython
|
1633682
|
import asyncio
import logging
import sys
import asynctnt
logging.basicConfig(level=logging.DEBUG)
async def main():
c = asynctnt.Connection(
host='localhost',
port=3305,
connect_timeout=5,
request_timeout=5,
reconnect_timeout=1/3,
)
async with c:
while True:
res = await c.eval('local t ={}; for i=1,1000000 do t[i] = {i + 0.03} end; return t')
print(sys.getrefcount(res.body[0][-1]))
asyncio.run(main())
|
StarcoderdataPython
|
3304066
|
import abc
import asyncio
import logging
from types import MethodType
from typing import List, Optional
from pydantic import BaseSettings
from arrlio.models import Message, TaskInstance, TaskResult
from arrlio.serializer.base import Serializer
from arrlio.tp import AsyncCallableT, SerializerT, TimeoutT
logger = logging.getLogger("arrlio")
class BackendConfig(BaseSettings):
name: Optional[str]
serializer: SerializerT
timeout: Optional[TimeoutT]
class Backend(abc.ABC):
def __init__(self, config: BackendConfig):
self.config: BackendConfig = config
self.serializer: Serializer = config.serializer()
self._closed: asyncio.Future = asyncio.Future()
self._tasks: set = set()
def __repr__(self):
return self.__str__()
def _cancel_tasks(self):
for task in self._tasks:
task.cancel()
def task(method: MethodType):
async def wrap(self, *args, **kwds):
if self._closed.done():
raise Exception(f"Call {method} on closed {self}")
task = asyncio.create_task(method(self, *args, **kwds))
self._tasks.add(task)
try:
return await task
finally:
self._tasks.discard(task)
return wrap
@property
def is_closed(self):
return self._closed.done()
async def close(self):
if self.is_closed:
return
self._closed.set_result(None)
self._cancel_tasks()
await self.stop_consume_tasks()
await self.stop_consume_messages()
@abc.abstractmethod
async def send_task(self, task_instance: TaskInstance, **kwds):
pass
@abc.abstractmethod
async def consume_tasks(self, queues: List[str], on_task: AsyncCallableT):
pass
@abc.abstractmethod
async def stop_consume_tasks(self):
pass
@abc.abstractmethod
async def push_task_result(self, task_instance: TaskInstance, task_result: TaskResult):
pass
@abc.abstractmethod
async def pop_task_result(self, task_instance: TaskInstance) -> TaskResult:
pass
@abc.abstractmethod
async def send_message(self, message: Message, encrypt: bool = None, **kwds):
pass
@abc.abstractmethod
async def consume_messages(self, queues: List[str], on_message: AsyncCallableT):
pass
@abc.abstractmethod
async def stop_consume_messages(self):
pass
|
StarcoderdataPython
|
115703
|
<gh_stars>1-10
import numpy as np
import pandas as pd
import random
import pickle
import os
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from pipelitools.models import metrics as m
def test_models():
""" """
print('test_models: ok')
class Model:
"""Runs a model, plots confusion matrix, calculates the metrics and outputs the reports in a folder.
Parameters
----------
X_train : pd.DataFrame
Features used in training.
y_train : pd.Series
Labels for training (1D vector).
X_test : pd.DataFrame
Features used in testing.
y_test : pd.Series
Labels for testing (1D vector).
"""
def __init__(self, X_train, y_train, X_test, y_test):
self.X_train = X_train
self.y_train = y_train
self.X_test = X_test
self.y_test = y_test
def checkmodel(self,
name,
model,
steps=[],
parameters={},
average='binary',
multiclass=False,
metric='accuracy',
randomized_search=False,
nfolds=5,
n_jobs=None,
save_pickle=True,
verbose=0
):
""" Calculates the model based on the pipeline and hyperparameter grid.
Then, evaluates metrics (f1-score, accuracy, precision, recall) and plots a confusion matrix.
Can save the final fitted model with pickle to load later.
Parameters
----------
name : str
Name of the model.
model : abc.ABCMeta
Machine learning model.
steps : list, optional (default = [])
Steps of the preprocessing pipeline.
parameters : dict, optional (default = {})
Parameters of the model.
average : str, optional (default = 'binary')
This parameter is required for multiclass/multilabel targets. If None, the scores for each class are
returned. Otherwise, this determines the type of averaging performed on the data
multiclass : bool, optional (default = False)
True if the classification is multiclass.
metric : str, optional (default = 'accuracy')
Metric which should be used to select the best model.
randomized_search : bool, optional (default = False)
True if randomized search.
nfolds : int, optional (default = 5)
Number of folds in CV.
n_jobs : int, optional (default = None)
The number of parallel jobs to run.
save_pickle : bool, optional (default=True)
Save the best fitted model with pickle.
To load do:
loaded_model = pickle.load(open('./pickle_models/model.sav', 'rb'))
verbose : int, optional (default = 0)
Verbose CV.
Returns
-------
cv : sklearn.model_selection._search.GridSearchCV
The fitted model.
y_pred : np.ndarray
predicted values.
Figures are saved in a separate folder.
"""
assert ' ' not in name, "Parameter 'name' must be specified without space inside."
assert isinstance(self.y_train, pd.Series), "y_train must be of type pd.Series."
assert isinstance(self.y_test, pd.Series), "y_test must be of type pd.Series."
if len(parameters) != 0:
random_parameter = random.choice(list(parameters.keys()))
assert '__' in random_parameter and name in random_parameter, \
f"Parameters should be presented in a dictionary in the following way: \n\
'{name}__parameter': [parameter_value]"
steps_model = steps[:]
# Create the pipeline
if multiclass:
from imblearn.pipeline import Pipeline
else:
from sklearn.pipeline import Pipeline
steps_model.append((name, model))
pipeline = Pipeline(steps_model)
if multiclass:
cv_metric = metric + '_'+average
else:
cv_metric = metric
if randomized_search:
cv = RandomizedSearchCV(estimator=pipeline,
param_distributions=parameters,
cv=nfolds,
# refit=cv_metric',
scoring=cv_metric,
# n_iter=10,
verbose=verbose,
n_jobs=n_jobs,
random_state=42)
else:
cv = GridSearchCV(estimator=pipeline,
param_grid=parameters,
cv=nfolds,
# refit=cv_metric',
scoring=cv_metric,
verbose=verbose,
n_jobs=n_jobs)
# Fit to the training set
cv.fit(self.X_train, self.y_train)
# Mean cross-validated score of the best_estimator
print(f"Mean cross-validated score of the best_estimator: {round(cv.best_score_, 4)}")
# Parameter setting that gave the best results on the validation data
if len(parameters) != 0:
df_tuned = pd.DataFrame(cv.best_params_, index=[0]).transpose().reset_index().rename(
columns={'index': 'Parameter', 0: 'Tuned value'})
df_tuned['Parameter'] = df_tuned.Parameter.str.partition('__').iloc[:, -1]
print(df_tuned, '\n')
# Predict the labels of the test set
y_pred = cv.predict(self.X_test)
# METRICS
m.metrics_report(cv, name, self.X_test, self.y_test, self.y_train, data='validation')
# SAVE MODEL USING PICKLE
if save_pickle:
if os.path.exists("./temp_pickle_models/") is False:
os.mkdir("./temp_pickle_models/")
pickle.dump(cv, open(f"./temp_pickle_models/{name}.sav", 'wb'))
return cv, y_pred
def evaluate(self, model, name, X_test, y_test, y_train):
m.metrics_report(model, name, X_test, y_test, y_train, data='test')
if __name__ == '__main__':
test_models()
|
StarcoderdataPython
|
1666479
|
<reponame>VlachosGroup/PythonGroupAdditivity
import os
from warnings import warn
from collections import Mapping
from .. import yaml_io
import numpy as np
from .. Error import GroupMissingDataError
from . Group import Group, Descriptor
from . Scheme import GroupAdditivityScheme
from . DataDir import get_data_dir
class GroupLibrary(Mapping):
"""Represent library of contributing properties organized by group.
The set of properties that may be represented in a :class:`GroupLibrary` is
extensible. Contributing properties are represented by *property sets*
organized by group. See the manual for a list of available property sets.
.. note::
Because the *property set* system is extensible, the module within
which a particular *property set* is defined and registered must be
imported before loading a group library that contains data for that
type of *property set*.
To estimate properties, call :meth:`GroupLibrary.estimate()` with the
*property set* `name` and set of groups contained in the chemical
structure of interest. The properties estimate will be returned as an
object whose type depends on the particular *property set*.
To determine which groups are present in a particular chemical structure,
use :meth:`GroupLibrary.match_groups()`.
Data in multiple group libraries can be combined so long as the groups
they contain are defined within compatible schemes. See
:meth:`GroupLibrary.update()`.
"""
_property_set_estimator_types = {}
_property_set_group_yaml_types = {}
@classmethod
def register_property_set_type(cls, name, group_yaml_type, estimator_type):
"""(class method) Register new property set type.
Parameters
----------
name : str
Name of new property set type.
group_yaml_type : str
Name of property set type in the YAML type namespace.
estimator_type : class
The provided class is instantiated when an estimate is to be made
for a particular set of groups. The constructor should accept the
following parameters:
library : :class:`GroupLibrary`
library which is to be used to estimate these properties.
groups : mapping
Map from :class:`Group` to int or float specifying counts
of each group in the chemical structure.
"""
if name in cls._property_set_group_yaml_types:
raise KeyError('Property set %r already registered.' % name)
cls._property_set_group_yaml_types[name] = group_yaml_type
cls._property_set_estimator_types[name] = estimator_type
def __init__(self, scheme, contents={}, uq_contents={}, path=None):
"""Initialize library of contributing properties organized by group.
Parameters
----------
scheme : :class:`GroupScheme`
Specify group-additivity scheme to use.
contents : mapping or list
Define initial contents of the library either as mapping or list of
(`key`, `value`) pairs. See the last paragraph of the class
documentation for information on the format.
Other Parameters
----------------
path : str
File-system path library was loaded from or should be saved to by
default.
"""
self.scheme = scheme
self.path = path
if isinstance(contents, Mapping):
contents = list(contents.items())
self.contents = dict((group, property_sets)
for (group, property_sets) in contents)
self.uq_contents = uq_contents
def GetDescriptors(self, mol):
"""Determine groups appearing in chemical structure `chem`.
Parameters
----------
mol : :class:`rdkit.mol`
Specify chemical structure to match groups for.
manual_descriptors : mapping, optional
Specify value(s)/degree(s) of influence of additional descriptors
to include.
Returns
-------
groups : mapping
Map from :class:`Group` to int or float identifying groups and
their number of occurence in the structure.
"""
self.name = mol
return self.scheme.GetDescriptors(mol)
def Estimate(self, groups, property_set_name):
"""Estimate set of properties for chemical.
Parameters
----------
groups : mapping (dictionary)
Map from :class:`Group` to int or float specifying counts of each
group in the chemical structure.
property_set_name : str
Name of property set to estimate.
Returns
-------
estimated_properties : (varies)
The estimated properties, an object whose type depends on the
particular property set.
"""
if property_set_name not in self._property_set_estimator_types:
raise KeyError('Invalid property_set name: %r' % property_set_name)
# Verify groups present.
missing_groups = [group for group in groups
if property_set_name not in self[group]]
if missing_groups:
raise GroupMissingDataError(missing_groups, property_set_name)
estimator_type = self._property_set_estimator_types[property_set_name]
return estimator_type(self, groups)
def __contains__(self, group):
"""Test if this library contains contributing properties for `group`.
Parameters
----------
group : :class:`Group`
Group whose membership is being tested.
Returns
-------
result : bool
True if this library has properties for `group`.
"""
return group in self.contents
def __iter__(self):
"""Return iterator over all groups with property data in this library.
"""
return iter(self.contents)
def __len__(self):
"""Return number of groups with properties in this library."""
return len(self.contents)
def __getitem__(self, group):
"""Return contributing properties sets for `group`.
If no properties exist for `group`, then return ``{}`` instead of
raising an exception.
Parameters
----------
group : :class:`Group`
Identify group whose property sets are to be retrieved.
Returns
-------
property_sets : dict
Sets of contributing properties for `group`.
"""
return self.contents.get(group, {})
@classmethod
def Load(cls, path):
"""(class method) Load group-additivity library from file-system
`path` or builtin.
Parameters
----------
path : str
Specify either the path to a file containing the data or a symbolic
name of a builtin library to load (*e.g.* ``gas_benson`` to load
gas phase Benson groups.)
Returns
-------
lib : :class:`GroupLibrary`
Group library containing the loaded data.
"""
if os.sep not in path and '.' not in path and not os.path.exists(path):
# [JTF] where's our data directory?
base_path = os.path.join(get_data_dir(), path)
# We want to load the library.yaml in that directory:
path = os.path.join(base_path, 'library.yaml')
else:
# The base path is the directory containing whatever file/directory
# is referenced by path:
base_path = os.path.dirname(path)
# Load the scheme.yaml from the selected data directory:
scheme = GroupAdditivityScheme.Load(os.path.join(base_path,
'scheme.yaml'))
# Use that scheme to load the rest of the library:
return cls._do_load(path, base_path, scheme)
@classmethod
def _Load(cls, path, scheme):
if os.sep not in path and '.' not in path and not os.path.exists(path):
# [JTF] where's our data directory?
base_path = os.path.join(get_data_dir(), path)
# We want to load the library.yaml in that directory:
path = os.path.join(base_path, 'library.yaml')
else:
# The base path is the directory containing whatever file/directory
# is referenced by path:
base_path = os.path.dirname(path)
# Use the scheme passed to us to load the rest of the library:
return cls._do_load(path, base_path, scheme)
@classmethod
def _do_load(cls, path, base_path, scheme):
# Read data from file.
context = {'base_path': base_path}
with open(path) as f:
lib_data = yaml_io.load(
yaml_io.parse(f.read()), context, loader=cls._yaml_loader)
context['units'] = lib_data.units
group_properties = lib_data.groups
other_descriptor_properties = lib_data.other_descriptors
UQ = lib_data.UQ
if cls._property_set_group_yaml_types:
# Prepare property_sets loader.
property_sets_loader = yaml_io.make_object_loader(yaml_io.parse(
'\n'.join(('%r:\n type: %r\n optional: true'
% (str(name),
str(cls._property_set_group_yaml_types[name])))
for name in cls._property_set_group_yaml_types)))
# Read all properties.
lib_contents = {}
for name in group_properties:
group = Group.parse(scheme, name)
if group in lib_contents:
raise KeyError('Multiple definitions of group %s' % group)
property_sets = yaml_io.load(
group_properties[name], context,
loader=property_sets_loader)
lib_contents[group] = property_sets
for name in other_descriptor_properties:
descriptor = Descriptor(scheme, name)
if descriptor in lib_contents:
raise KeyError('Multiple definitions of descriptor %s' %
descriptor)
property_sets = yaml_io.load(
other_descriptor_properties[name], context,
loader=property_sets_loader)
lib_contents[descriptor] = property_sets
# Read UQ data
uq_contents = {}
if UQ:
uq_contents['RMSE'] = yaml_io.load(
UQ['RMSE'], context,
loader=property_sets_loader)
uq_contents['descriptors'] = UQ['InvCovMat']['groups']
uq_contents['mat'] = np.array(UQ['InvCovMat']['mat'])
uq_contents['dof'] = UQ['DOF']
else:
# No property sets defined.
warn('GroupLibrary.load(): No property sets defined.')
lib_contents = {}
uq_contents = {}
new_lib = cls(scheme, lib_contents, uq_contents, path=path)
# Update with included content.
for include_path in lib_data.include:
new_lib.Update(cls._Load(os.path.join(base_path,
include_path), scheme))
return new_lib
def Update(self, lib, overwrite=False):
"""Add complete contents of `lib` into this library.
Parameters
----------
lib : :class:`GroupLibrary`
Library to import from.
overwrite : bool
If True, then existing data may be overwritten by data from `lib`.
"""
for (group, other_property_sets) in list(lib.items()):
if group not in self.contents:
self.contents[group] = {}
property_sets = self.contents[group]
for name in other_property_sets:
if name not in property_sets:
property_sets[name] = other_property_sets[name].copy()
else:
property_sets[name].update(
other_property_sets[name], overwrite)
# UQ stuff can only be loaded once
if self.uq_contents and lib.uq_contents:
raise ValueError('More than one uncertainty quantification',
'information provided')
if not self.uq_contents:
self.uq_contents = lib.uq_contents
_yaml_loader = yaml_io.make_object_loader(yaml_io.parse("""
units:
type: mapping
default: {}
include:
type: list
item_type: string
default: []
groups:
type: mapping
default: {}
other_descriptors:
type: mapping
default: {}
UQ:
type: mapping
default: {}
"""))
|
StarcoderdataPython
|
8092325
|
<reponame>dearith/mfadmin
#!/usr/bin/env python3
import argparse
import requests
import os
import sys
from mflog import get_logger
DESCRIPTION = "export a kibana dashboard on stdout"
KIBANA_PORT = int(os.environ['MFADMIN_KIBANA_HTTP_PORT'])
KIBANA_PATTERN = \
"http://127.0.0.1:%i/api/kibana/" \
"dashboards/export?dashboard=%s" % (KIBANA_PORT, "%s")
LOGGER = get_logger("export_kibana_dashboard")
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument("DASHBOARD_ID", help="dashboard id")
args = parser.parse_args()
url = KIBANA_PATTERN % args.DASHBOARD_ID
r = requests.get(url, timeout=30)
if r.status_code != 200:
LOGGER.warning("can't get %s with status_code: %i" % (url, r.status_code))
print(r.text)
sys.exit(1)
try:
x = r.json()["objects"][0]["error"]["message"]
LOGGER.warning("can't get %s with message: %s", x)
print(r.text)
except Exception:
pass
print(r.text)
|
StarcoderdataPython
|
369428
|
#!/usr/bin/env python3
import argparse
from datetime import datetime
import json
from pathlib import Path
import logging
import sys
import hpc_submit
import mgm_utils
import kaldi_transcript_to_amp_transcript
def main():
"""
Submit a job to run ina speech segmenter on HPC
"""
parser = argparse.ArgumentParser(description=main.__doc__)
parser.add_argument("--debug", default=False, action="store_true", help="Turn on debugging")
parser.add_argument("root_dir", help="Galaxy root directory")
parser.add_argument("input", help="input audio file")
parser.add_argument("kaldi_transcript_json", help="Kaldi JSON output")
parser.add_argument("kaldi_transcript_txt", help="Kalid TXT output")
parser.add_argument("amp_transcript_json", help="AMP JSON output")
parser.add_argument("hpc_timestamps", help="HPC Timestamps output")
args = parser.parse_args()
# get hpc dropbox dir path
dropbox = mgm_utils.get_work_dir(args.root_dir, "hpc_dropbox")
# set up logging
logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO,
stream=sys.stderr,
format="%(asctime)s %(levelname)s %(message)s")
print("Preparing kaldi HPC job")
# job parameters
job = {
'script': 'kaldi',
'input_map': {
'input': args.input
},
'output_map': {
'kaldi_json': args.kaldi_transcript_json,
'kaldi_txt': args.kaldi_transcript_txt,
'amp_json': args.amp_transcript_json
}
}
print("Submitting HPC job")
job = hpc_submit.submit_and_wait(dropbox, job)
print("HPC job completed with status: " + job['job']['status'])
if job['job']['status'] != 'ok':
exit(1)
print("Convering output to AMP Transcript JSON")
kaldi_transcript_to_amp_transcript.convert(args.input, args.kaldi_transcript_json, args.kaldi_transcript_txt, args.amp_transcript_json)
print("Job output:")
print(job)
# Write the hpc timestamps output
if "start" in job['job'].keys() and "end" in job['job'].keys():
ts_output = {
"start_time": job['job']["start"],
"end_time": job['job']["end"],
"elapsed_time": (datetime.strptime(job['job']["end"], '%Y-%m-%d %H:%M:%S.%f') - datetime.strptime(job['job']["start"], '%Y-%m-%d %H:%M:%S.%f')).total_seconds()
}
with open(args.hpc_timestamps, 'w') as outfile:
json.dump(ts_output, outfile, default=lambda x: x.__dict__)
exit(0)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
343669
|
"""Dataset with 'Variola virus' sequences.
A dataset with 51 'Variola virus' genomes.
THIS PYTHON FILE WAS GENERATED BY A COMPUTER PROGRAM! DO NOT EDIT!
"""
import sys
from catch.datasets import GenomesDatasetSingleChrom
ds = GenomesDatasetSingleChrom(__name__, __file__, __spec__)
ds.add_fasta_path("data/variola.fasta.gz", relative=True)
sys.modules[__name__] = ds
|
StarcoderdataPython
|
1865034
|
<reponame>JainSamyak8840/deepchem<gh_stars>0
class Loss:
"""A loss function for use in training models."""
def _compute_tf_loss(self, output, labels):
"""Compute the loss function for TensorFlow tensors.
The inputs are tensors containing the model's outputs and the labels for a
batch. The return value should be a tensor of shape (batch_size) or
(batch_size, tasks) containing the value of the loss function on each
sample or sample/task.
Parameters
----------
output: tensor
the output of the model
labels: tensor
the expected output
Returns
-------
The value of the loss function on each sample or sample/task pair
"""
raise NotImplementedError("Subclasses must implement this")
def _create_pytorch_loss(self):
"""Create a PyTorch loss function."""
raise NotImplementedError("Subclasses must implement this")
class L1Loss(Loss):
"""The absolute difference between the true and predicted values."""
def _compute_tf_loss(self, output, labels):
import tensorflow as tf
output, labels = _make_tf_shapes_consistent(output, labels)
output, labels = _ensure_float(output, labels)
return tf.abs(output - labels)
def _create_pytorch_loss(self):
import torch
return torch.nn.L1Loss(reduction='none')
class HuberLoss(Loss):
"""Modified version of L1 Loss, also known as Smooth L1 loss.
Less sensitive to small errors, linear for larger errors.
Huber loss is generally better for cases where are are both large outliers as well as small, as compared to the L1 loss.
By default, Delta = 1.0 and reduction = 'none'.
"""
def _compute_tf_loss(self, output, labels):
import tensorflow as tf
output, labels = _make_tf_shapes_consistent(output, labels)
return tf.keras.losses.Huber(reduction='none')(output, labels)
def _create_pytorch_loss(self):
import torch
return torch.nn.SmoothL1Loss(reduction='none')
class L2Loss(Loss):
"""The squared difference between the true and predicted values."""
def _compute_tf_loss(self, output, labels):
import tensorflow as tf
output, labels = _make_tf_shapes_consistent(output, labels)
output, labels = _ensure_float(output, labels)
return tf.square(output - labels)
def _create_pytorch_loss(self):
import torch
return torch.nn.MSELoss(reduction='none')
class HingeLoss(Loss):
"""The hinge loss function.
The 'output' argument should contain logits, and all elements of 'labels'
should equal 0 or 1.
"""
def _compute_tf_loss(self, output, labels):
import tensorflow as tf
output, labels = _make_tf_shapes_consistent(output, labels)
return tf.keras.losses.hinge(labels, output)
def _create_pytorch_loss(self):
import torch
def loss(output, labels):
output, labels = _make_pytorch_shapes_consistent(output, labels)
return torch.mean(torch.clamp(1 - labels * output, min=0), dim=-1)
return loss
class PoissonLoss(Loss):
"""The Poisson loss function is defined as the mean of the elements of y_pred - (y_true * log(y_pred) for an input of (y_true, y_pred).
Poisson loss is generally used for regression tasks where the data follows the poisson
"""
def _compute_tf_loss(self, output, labels):
import tensorflow as tf
output, labels = _make_tf_shapes_consistent(output, labels)
loss = tf.keras.losses.Poisson(reduction='auto')
return loss(labels, output)
def _create_pytorch_loss(self):
import torch
def loss(output, labels):
output, labels = _make_pytorch_shapes_consistent(output, labels)
return torch.mean(output - labels * torch.log(output))
return loss
class BinaryCrossEntropy(Loss):
"""The cross entropy between pairs of probabilities.
The arguments should each have shape (batch_size) or (batch_size, tasks) and
contain probabilities.
"""
def _compute_tf_loss(self, output, labels):
import tensorflow as tf
output, labels = _make_tf_shapes_consistent(output, labels)
output, labels = _ensure_float(output, labels)
return tf.keras.losses.binary_crossentropy(labels, output)
def _create_pytorch_loss(self):
import torch
bce = torch.nn.BCELoss(reduction='none')
def loss(output, labels):
output, labels = _make_pytorch_shapes_consistent(output, labels)
return torch.mean(bce(output, labels), dim=-1)
return loss
class CategoricalCrossEntropy(Loss):
"""The cross entropy between two probability distributions.
The arguments should each have shape (batch_size, classes) or
(batch_size, tasks, classes), and represent a probability distribution over
classes.
"""
def _compute_tf_loss(self, output, labels):
import tensorflow as tf
output, labels = _make_tf_shapes_consistent(output, labels)
output, labels = _ensure_float(output, labels)
return tf.keras.losses.categorical_crossentropy(labels, output)
def _create_pytorch_loss(self):
import torch
def loss(output, labels):
output, labels = _make_pytorch_shapes_consistent(output, labels)
return -torch.sum(labels * torch.log(output), dim=-1)
return loss
class SigmoidCrossEntropy(Loss):
"""The cross entropy between pairs of probabilities.
The arguments should each have shape (batch_size) or (batch_size, tasks). The
labels should be probabilities, while the outputs should be logits that are
converted to probabilities using a sigmoid function.
"""
def _compute_tf_loss(self, output, labels):
import tensorflow as tf
output, labels = _make_tf_shapes_consistent(output, labels)
output, labels = _ensure_float(output, labels)
return tf.nn.sigmoid_cross_entropy_with_logits(labels, output)
def _create_pytorch_loss(self):
import torch
bce = torch.nn.BCEWithLogitsLoss(reduction='none')
def loss(output, labels):
output, labels = _make_pytorch_shapes_consistent(output, labels)
return bce(output, labels)
return loss
class SoftmaxCrossEntropy(Loss):
"""The cross entropy between two probability distributions.
The arguments should each have shape (batch_size, classes) or
(batch_size, tasks, classes). The labels should be probabilities, while the
outputs should be logits that are converted to probabilities using a softmax
function.
"""
def _compute_tf_loss(self, output, labels):
import tensorflow as tf
output, labels = _make_tf_shapes_consistent(output, labels)
output, labels = _ensure_float(output, labels)
return tf.nn.softmax_cross_entropy_with_logits(labels, output)
def _create_pytorch_loss(self):
import torch
ls = torch.nn.LogSoftmax(dim=1)
def loss(output, labels):
output, labels = _make_pytorch_shapes_consistent(output, labels)
return -torch.sum(labels * ls(output), dim=-1)
return loss
class SparseSoftmaxCrossEntropy(Loss):
"""The cross entropy between two probability distributions.
The labels should have shape (batch_size) or (batch_size, tasks), and be
integer class labels. The outputs have shape (batch_size, classes) or
(batch_size, tasks, classes) and be logits that are converted to probabilities
using a softmax function.
"""
def _compute_tf_loss(self, output, labels):
import tensorflow as tf
if len(labels.shape) == len(output.shape):
labels = tf.squeeze(labels, axis=-1)
labels = tf.cast(labels, tf.int32)
return tf.nn.sparse_softmax_cross_entropy_with_logits(labels, output)
def _create_pytorch_loss(self):
import torch
ce_loss = torch.nn.CrossEntropyLoss(reduction='none')
def loss(output, labels):
# Convert (batch_size, tasks, classes) to (batch_size, classes, tasks)
# CrossEntropyLoss only supports (batch_size, classes, tasks)
# This is for API consistency
if len(output.shape) == 3:
output = output.permute(0, 2, 1)
if len(labels.shape) == len(output.shape):
labels = labels.squeeze(-1)
return ce_loss(output, labels.long())
return loss
class VAE_ELBO(Loss):
"""The Variational AutoEncoder loss, KL Divergence Regularize + marginal log-likelihood.
This losses based on _[1].
ELBO(Evidence lower bound) lexically replaced Variational lower bound.
BCE means marginal log-likelihood, and KLD means KL divergence with normal distribution.
Added hyper parameter 'kl_scale' for KLD.
The logvar and mu should have shape (batch_size, hidden_space).
The x and reconstruction_x should have (batch_size, attribute).
The kl_scale should be float.
Examples
--------
Examples for calculating loss using constant tensor.
batch_size = 2,
hidden_space = 2,
num of original attribute = 3
>>> import numpy as np
>>> import torch
>>> import tensorflow as tf
>>> logvar = np.array([[1.0,1.3],[0.6,1.2]])
>>> mu = np.array([[0.2,0.7],[1.2,0.4]])
>>> x = np.array([[0.9,0.4,0.8],[0.3,0,1]])
>>> reconstruction_x = np.array([[0.8,0.3,0.7],[0.2,0,0.9]])
Case tensorflow
>>> VAE_ELBO()._compute_tf_loss(tf.constant(logvar), tf.constant(mu), tf.constant(x), tf.constant(reconstruction_x))
<tf.Tensor: shape=(2,), dtype=float64, numpy=array([0.70165154, 0.76238271])>
Case pytorch
>>> (VAE_ELBO()._create_pytorch_loss())(torch.tensor(logvar), torch.tensor(mu), torch.tensor(x), torch.tensor(reconstruction_x))
tensor([0.7017, 0.7624], dtype=torch.float64)
References
----------
.. [1] Kingma, <NAME>., and <NAME>. "Auto-encoding variational bayes." arXiv preprint arXiv:1312.6114 (2013).
"""
def _compute_tf_loss(self, logvar, mu, x, reconstruction_x, kl_scale=1):
import tensorflow as tf
x, reconstruction_x = _make_tf_shapes_consistent(x, reconstruction_x)
x, reconstruction_x = _ensure_float(x, reconstruction_x)
BCE = tf.keras.losses.binary_crossentropy(x, reconstruction_x)
KLD = VAE_KLDivergence()._compute_tf_loss(logvar, mu)
return BCE + kl_scale * KLD
def _create_pytorch_loss(self):
import torch
bce = torch.nn.BCELoss(reduction='none')
def loss(logvar, mu, x, reconstruction_x, kl_scale=1):
x, reconstruction_x = _make_pytorch_shapes_consistent(x, reconstruction_x)
BCE = torch.mean(bce(reconstruction_x, x), dim=-1)
KLD = (VAE_KLDivergence()._create_pytorch_loss())(logvar, mu)
return BCE + kl_scale * KLD
return loss
class VAE_KLDivergence(Loss):
"""The KL_divergence between hidden distribution and normal distribution.
This loss represents KL divergence losses between normal distribution(using parameter of distribution)
based on _[1].
The logvar should have shape (batch_size, hidden_space) and each term represents
standard deviation of hidden distribution. The mean shuold have
(batch_size, hidden_space) and each term represents mean of hidden distribtuon.
Examples
--------
Examples for calculating loss using constant tensor.
batch_size = 2,
hidden_space = 2,
>>> import numpy as np
>>> import torch
>>> import tensorflow as tf
>>> logvar = np.array([[1.0,1.3],[0.6,1.2]])
>>> mu = np.array([[0.2,0.7],[1.2,0.4]])
Case tensorflow
>>> VAE_KLDivergence()._compute_tf_loss(tf.constant(logvar), tf.constant(mu))
<tf.Tensor: shape=(2,), dtype=float64, numpy=array([0.17381787, 0.51425203])>
Case pytorch
>>> (VAE_KLDivergence()._create_pytorch_loss())(torch.tensor(logvar), torch.tensor(mu))
tensor([0.1738, 0.5143], dtype=torch.float64)
References
----------
.. [1] Kingma, <NAME>., and <NAME>. "Auto-encoding variational bayes." arXiv preprint arXiv:1312.6114 (2013).
"""
def _compute_tf_loss(self, logvar, mu):
import tensorflow as tf
logvar, mu = _make_tf_shapes_consistent(logvar, mu)
logvar, mu = _ensure_float(logvar, mu)
return 0.5 * tf.reduce_mean(
tf.square(mu) + tf.square(logvar) -
tf.math.log(1e-20 + tf.square(logvar)) - 1, -1)
def _create_pytorch_loss(self):
import torch
def loss(logvar, mu):
logvar, mu = _make_pytorch_shapes_consistent(logvar, mu)
return 0.5 * torch.mean(
torch.square(mu) + torch.square(logvar) -
torch.log(1e-20 + torch.square(logvar)) - 1, -1)
return loss
class ShannonEntropy(Loss):
"""The ShannonEntropy of discrete-distribution.
This loss represents shannon entropy based on _[1].
The inputs should have shape (batch size, num of variable) and represents
probabilites distribution.
Examples
--------
Examples for calculating loss using constant tensor.
batch_size = 2,
num_of variable = variable,
>>> import numpy as np
>>> import torch
>>> import tensorflow as tf
>>> inputs = np.array([[0.7,0.3],[0.9,0.1]])
Case tensorflow
>>> ShannonEntropy()._compute_tf_loss(tf.constant(inputs))
<tf.Tensor: shape=(2,), dtype=float64, numpy=array([0.30543215, 0.16254149])>
Case pytorch
>>> (ShannonEntropy()._create_pytorch_loss())(torch.tensor(inputs))
tensor([0.3054, 0.1625], dtype=torch.float64)
References
----------
.. [1] Chen, <NAME>. "A Brief Introduction to Shannonโs Information Theory." arXiv preprint arXiv:1612.09316 (2016).
"""
def _compute_tf_loss(self, inputs):
import tensorflow as tf
#extended one of probabilites to binary distribution
if inputs.shape[-1] == 1:
inputs = tf.concat([inputs, 1 - inputs], axis=-1)
return tf.reduce_mean(-inputs * tf.math.log(1e-20 + inputs), -1)
def _create_pytorch_loss(self):
import torch
def loss(inputs):
#extended one of probabilites to binary distribution
if inputs.shape[-1] == 1:
inputs = torch.cat((inputs, 1 - inputs), dim=-1)
return torch.mean(-inputs * torch.log(1e-20 + inputs), -1)
return loss
def _make_tf_shapes_consistent(output, labels):
"""Try to make inputs have the same shape by adding dimensions of size 1."""
import tensorflow as tf
shape1 = output.shape
shape2 = labels.shape
len1 = len(shape1)
len2 = len(shape2)
if len1 == len2:
return (output, labels)
if isinstance(shape1, tf.TensorShape):
shape1 = tuple(shape1.as_list())
if isinstance(shape2, tf.TensorShape):
shape2 = tuple(shape2.as_list())
if len1 > len2 and all(i == 1 for i in shape1[len2:]):
for i in range(len1 - len2):
labels = tf.expand_dims(labels, -1)
return (output, labels)
if len2 > len1 and all(i == 1 for i in shape2[len1:]):
for i in range(len2 - len1):
output = tf.expand_dims(output, -1)
return (output, labels)
raise ValueError("Incompatible shapes for outputs and labels: %s versus %s" %
(str(shape1), str(shape2)))
def _make_pytorch_shapes_consistent(output, labels):
"""Try to make inputs have the same shape by adding dimensions of size 1."""
import torch
shape1 = output.shape
shape2 = labels.shape
len1 = len(shape1)
len2 = len(shape2)
if len1 == len2:
return (output, labels)
shape1 = tuple(shape1)
shape2 = tuple(shape2)
if len1 > len2 and all(i == 1 for i in shape1[len2:]):
for i in range(len1 - len2):
labels = torch.unsqueeze(labels, -1)
return (output, labels)
if len2 > len1 and all(i == 1 for i in shape2[len1:]):
for i in range(len2 - len1):
output = torch.unsqueeze(output, -1)
return (output, labels)
raise ValueError("Incompatible shapes for outputs and labels: %s versus %s" %
(str(shape1), str(shape2)))
def _ensure_float(output, labels):
"""Make sure the outputs and labels are both floating point types."""
import tensorflow as tf
if output.dtype not in (tf.float32, tf.float64):
output = tf.cast(output, tf.float32)
if labels.dtype not in (tf.float32, tf.float64):
labels = tf.cast(labels, tf.float32)
return (output, labels)
|
StarcoderdataPython
|
288708
|
import os
import sys
import pickle
from typing import List
import numpy as np
import pandas as pd
from scipy.optimize import minimize_scalar
os.environ["OPENBLAS_NUM_THREADS"] = "1"
sys.path.append("../../")
from environments.Settings.EnvironmentManager import EnvironmentManager
from environments.Settings.Scenario import Scenario
from utils.folder_management import handle_folder_creation
from utils.stats.StochasticFunction import IStochasticFunction, AggregatedFunction, MultipliedStochasticFunction
SCENARIO_NAME = "linear_visit_tanh_price"
FOLDER_RESULT = "../../report/csv/pricing_bandit/{}/".format(SCENARIO_NAME)
CSV_DISCRETE_USER_REGRET = True
CSV_CONTINUE_USER_REGRET = True
CSV_DAILY_DISCRETE_REGRET = True
CSV_DAILY_CONT_REGRET = True
N_ARMS_PRICE = 11
FIXED_BUDGET = [1000 / 3, 1000 / 3, 1000 / 3]
PRICE_PLOT_N_POINTS = 100
MIN_PRICE = 15
MAX_PRICE = 25
FIXED_COST = 12
REWARD_FILE_LIST = ["../../report/project_point_4/Apr17_18-21-54/reward_TS.pkl",
"../../report/project_point_4/Apr17_18-40-58/reward_UCB1.pkl",
"../../report/project_point_4/Apr17_18-47-46/reward_UCBL.pkl",
"../../report/project_point_4/Apr17_20-07-36/reward_UCB1M.pkl",
"../../report/project_point_4/Apr17_21-36-22/reward_UCBLM.pkl",
"../../report/project_point_4/Apr17_18-56-01/reward_EXP3.pkl"]
DAYS_FILE_LIST = ["../../report/project_point_4/Apr17_18-21-54/day_TS.pkl",
"../../report/project_point_4/Apr17_18-40-58/day_UCB1.pkl",
"../../report/project_point_4/Apr17_18-47-46/day_UCBL.pkl",
"../../report/project_point_4/Apr17_20-07-36/day_UCB1M.pkl",
"../../report/project_point_4/Apr17_21-36-22/day_UCBLM.pkl",
"../../report/project_point_4/Apr17_18-56-01/day_EXP3.pkl"]
BANDIT_NAME = ["TS", "UCB1", "UCBL", "UCB1M", "UCBLM", "EXP3"]
n_bandit = len(BANDIT_NAME)
_, folder_path_with_date = handle_folder_creation(result_path=FOLDER_RESULT, retrieve_text_file=False)
assert len(REWARD_FILE_LIST) == len(BANDIT_NAME), "Number of bandits and file list does not match"
assert len(REWARD_FILE_LIST) == len(DAYS_FILE_LIST), "Number of bandits and file list does not match"
# Reading file list
total_reward_list = []
total_day_list = []
for curr_day, _ in enumerate(BANDIT_NAME):
rewards = []
with (open(REWARD_FILE_LIST[curr_day], "rb")) as openfile:
while True:
try:
rewards.append(pickle.load(openfile))
except EOFError:
break
days = []
with (open(DAYS_FILE_LIST[curr_day], "rb")) as openfile:
while True:
try:
days.append(pickle.load(openfile))
except EOFError:
break
rewards = rewards[0]
days = days[0]
total_reward_list.append(rewards)
total_day_list.append(days)
# Compute N-days
n_days = np.inf
for curr_day, day_list_bandit in enumerate(total_day_list):
for j, day_list_exp in enumerate(day_list_bandit):
if len(day_list_exp) < n_days:
n_days = len(day_list_exp)
n_days = n_days - 1
# Compute mean and standard deviation for each day
mean_reward = np.zeros(shape=(n_bandit + 1, n_days))
std_reward = np.zeros(shape=(n_bandit + 1, n_days))
mean_reward[-1] = np.arange(n_days) + 1
std_reward[-1] = np.arange(n_days) + 1
for bandit_idx in range(len(BANDIT_NAME)):
n_exp = len(total_reward_list[bandit_idx])
for curr_day in range(n_days):
daily_values = []
for exp in range(n_exp):
start_user = total_day_list[bandit_idx][exp][curr_day]
end_user = total_day_list[bandit_idx][exp][curr_day + 1]
daily_values.append(np.array(total_reward_list[bandit_idx][exp][start_user:end_user]).sum())
mean_reward[bandit_idx][curr_day] = np.array(daily_values).mean()
std_reward[bandit_idx][curr_day] = np.array(daily_values).std()
mean_df = pd.DataFrame(mean_reward.transpose())
std_df = pd.DataFrame(std_reward.transpose())
for bandit_idx, name in enumerate(BANDIT_NAME):
mean_df.rename(columns={bandit_idx: "mean_reward_{}".format(name)}, inplace=True)
mean_df.rename(columns={n_bandit: "day"}, inplace=True)
for bandit_idx, name in enumerate(BANDIT_NAME):
std_df.rename(columns={bandit_idx: "mean_std_{}".format(name)}, inplace=True)
std_df.rename(columns={n_bandit: "day"}, inplace=True)
total_df = mean_df.merge(std_df, left_on="day", right_on="day")
total_df.to_csv("{}instant_reward.csv".format(folder_path_with_date), index=False)
# Aggregated plot under a fixed budget
mean_scenario: Scenario = EnvironmentManager.load_scenario(SCENARIO_NAME, get_mean_function=True)
crp_function_list: List[IStochasticFunction] = mean_scenario.get_phases()[0].get_crp_function()
click_function_list: List[IStochasticFunction] = mean_scenario.get_phases()[0].get_n_clicks_function()
context_weight = np.array([f.draw_sample(FIXED_BUDGET[i]) for i, f in enumerate(click_function_list)])
context_weight = context_weight / context_weight.sum() # weight to to retrieve the aggregated CRP
aggregated_crp: AggregatedFunction = AggregatedFunction(f_list=crp_function_list, weights=context_weight)
price_point_arr = np.linspace(MIN_PRICE, MAX_PRICE, PRICE_PLOT_N_POINTS)
crp_data = np.zeros(shape=(1 + 1, PRICE_PLOT_N_POINTS))
crp_data[-1] = price_point_arr
for j, point in enumerate(price_point_arr):
crp_data[0][j] = aggregated_crp.draw_sample(point)
crp_df: pd.DataFrame = pd.DataFrame(crp_data.transpose())
crp_df.rename(columns={0: "mean_aggr_crp", 1: "price"}, inplace=True)
crp_df.to_csv("{}aggregated_crp_data.csv".format(folder_path_with_date), index=False)
price_point_arr = np.linspace(MIN_PRICE, MAX_PRICE, PRICE_PLOT_N_POINTS)
profit_data = np.zeros(shape=(1 + 1, PRICE_PLOT_N_POINTS))
profit_data[-1] = price_point_arr
for j, point in enumerate(price_point_arr):
profit_data[0][j] = aggregated_crp.draw_sample(point) * (point - 12)
profit_df: pd.DataFrame = pd.DataFrame(profit_data.transpose())
profit_df.rename(columns={0: "profit_0", 1: "price"}, inplace=True)
profit_df.to_csv("{}aggregated_profit_data.csv".format(folder_path_with_date), index=False)
# Optimal point computation
aggregated_profit: MultipliedStochasticFunction = MultipliedStochasticFunction(aggregated_crp, shift=-FIXED_COST)
min_result = minimize_scalar(aggregated_profit.get_minus_lambda(), bounds=(MIN_PRICE, MAX_PRICE), method="bounded")
optimal_mean_reward_user = aggregated_profit.draw_sample(min_result["x"])
average_daily_users = np.array([f.draw_sample(FIXED_BUDGET[i]) for i, f in enumerate(click_function_list)]).sum()
optimal_mean_daily_reward = optimal_mean_reward_user * average_daily_users
print("Optimal mean reward is {}, reached at x={}\n".format(optimal_mean_reward_user, min_result["x"]))
print("Optimal mean daily reward is {}, since there are {} daily users".format(optimal_mean_daily_reward,
average_daily_users))
# Compute regret
if CSV_DAILY_CONT_REGRET:
mean_regret_data = np.zeros(shape=(n_bandit + 1, n_days))
std_regret_data = np.zeros(shape=(n_bandit + 1, n_days))
mean_regret_data[-1] = np.arange(n_days) + 1
std_regret_data[-1] = np.arange(n_days) + 1
for bandit_idx in range(len(BANDIT_NAME)):
n_exp = len(total_reward_list[bandit_idx])
for curr_day in range(n_days):
daily_values = []
for exp in range(n_exp):
end_user = total_day_list[bandit_idx][exp][curr_day + 1]
daily_values.append((curr_day + 1) * optimal_mean_daily_reward - np.array(
total_reward_list[bandit_idx][exp][0:end_user]).sum())
mean_regret_data[bandit_idx][curr_day] = np.array(daily_values).mean()
std_regret_data[bandit_idx][curr_day] = np.array(daily_values).std()
mean_df = pd.DataFrame(mean_regret_data.transpose())
std_df = pd.DataFrame(std_regret_data.transpose())
for bandit_idx, name in enumerate(BANDIT_NAME):
mean_df.rename(columns={bandit_idx: "mean_regret_{}".format(name)}, inplace=True)
mean_df.rename(columns={n_bandit: "day"}, inplace=True)
for bandit_idx, name in enumerate(BANDIT_NAME):
std_df.rename(columns={bandit_idx: "std_regret_{}".format(name)}, inplace=True)
std_df.rename(columns={n_bandit: "day"}, inplace=True)
total_df = mean_df.merge(std_df, left_on="day", right_on="day")
total_df.to_csv("{}daily_regret.csv".format(folder_path_with_date), index=False)
# Regret computation with correct arms (and not all the function)
aggregated_profit: MultipliedStochasticFunction = MultipliedStochasticFunction(aggregated_crp, shift=-FIXED_COST)
points = np.linspace(start=MIN_PRICE, stop=MAX_PRICE, num=N_ARMS_PRICE)
profit_points = np.array([aggregated_profit.draw_sample(x) for x in points])
optimal_discrete_profit = profit_points.max()
average_daily_users = np.array([f.draw_sample(FIXED_BUDGET[i]) for i, f in enumerate(click_function_list)]).sum()
optimal_mean_daily_reward_discrete = optimal_discrete_profit * average_daily_users
print("Optimal mean discrete reward is {}, reached for arm index = {}\n".format(optimal_discrete_profit,
profit_points.argmax()))
print("Optimal mean daily reward is {}, since there are {} daily users".format(optimal_mean_daily_reward_discrete,
average_daily_users))
if CSV_DAILY_DISCRETE_REGRET:
mean_regret_data = np.zeros(shape=(n_bandit + 1, n_days))
std_regret_data = np.zeros(shape=(n_bandit + 1, n_days))
mean_regret_data[-1] = np.arange(n_days) + 1
std_regret_data[-1] = np.arange(n_days) + 1
for bandit_idx in range(len(BANDIT_NAME)):
n_exp = len(total_reward_list[bandit_idx])
for curr_day in range(n_days):
daily_values = []
for exp in range(n_exp):
end_user = total_day_list[bandit_idx][exp][curr_day + 1]
daily_values.append((curr_day + 1) * optimal_mean_daily_reward_discrete - np.array(
total_reward_list[bandit_idx][exp][0:end_user]).sum())
mean_regret_data[bandit_idx][curr_day] = np.array(daily_values).mean()
std_regret_data[bandit_idx][curr_day] = np.array(daily_values).std()
mean_df = pd.DataFrame(mean_regret_data.transpose())
std_df = pd.DataFrame(std_regret_data.transpose())
for bandit_idx, name in enumerate(BANDIT_NAME):
mean_df.rename(columns={bandit_idx: "mean_regret_{}".format(name)}, inplace=True)
mean_df.rename(columns={n_bandit: "day"}, inplace=True)
for bandit_idx, name in enumerate(BANDIT_NAME):
std_df.rename(columns={bandit_idx: "std_regret_{}".format(name)}, inplace=True)
std_df.rename(columns={n_bandit: "day"}, inplace=True)
total_df = mean_df.merge(std_df, left_on="day", right_on="day")
total_df.to_csv("{}daily_discrete_regret.csv".format(folder_path_with_date), index=False)
# Compute regret user-wise
if CSV_DISCRETE_USER_REGRET:
total_users = len(total_reward_list[0][0])
mean_data = np.zeros(shape=(n_bandit + 1, total_users))
std_data = np.zeros(shape=(n_bandit + 1, total_users))
mean_data[-1] = np.arange(total_users)
std_data[-1] = np.arange(total_users)
for bandit_idx in range(n_bandit):
n_exp = len(total_reward_list[bandit_idx])
values = [[] for _ in range(total_users)]
for exp in range(n_exp):
curr_exp_value = 0
for user in range(total_users):
curr_exp_value += total_reward_list[bandit_idx][exp][user]
values[user].append((user + 1) * optimal_discrete_profit - curr_exp_value)
for user in range(total_users):
mean_data[bandit_idx][user] = np.array(values[user]).mean()
std_data[bandit_idx][user] = np.array(values[user]).std()
mean_df = pd.DataFrame(mean_data.transpose())
std_df = pd.DataFrame(std_data.transpose())
for bandit_idx, name in enumerate(BANDIT_NAME):
mean_df.rename(columns={bandit_idx: "mean_regret_{}".format(name)}, inplace=True)
mean_df.rename(columns={n_bandit: "user"}, inplace=True)
for bandit_idx, name in enumerate(BANDIT_NAME):
std_df.rename(columns={bandit_idx: "std_regret_{}".format(name)}, inplace=True)
std_df.rename(columns={n_bandit: "user"}, inplace=True)
total_df = mean_df.merge(std_df, left_on="user", right_on="user")
total_df.to_csv("{}discrete_user_regret.csv".format(folder_path_with_date), index=False)
# Compute regret user-wise with real loss
if CSV_CONTINUE_USER_REGRET:
total_users = len(total_reward_list[0][0])
mean_data = np.zeros(shape=(n_bandit+1, total_users))
std_data = np.zeros(shape=(n_bandit+1, total_users))
mean_data[-1] = np.arange(total_users)
std_data[-1] = np.arange(total_users)
for bandit_idx in range(n_bandit):
n_exp = len(total_reward_list[bandit_idx])
values = [[] for _ in range(total_users)]
for exp in range(n_exp):
curr_exp_value = 0
for user in range(total_users):
curr_exp_value += total_reward_list[bandit_idx][exp][user]
values[user].append((user + 1) * optimal_mean_reward_user - curr_exp_value)
for user in range(total_users):
mean_data[bandit_idx][user] = np.array(values[user]).mean()
std_data[bandit_idx][user] = np.array(values[user]).std()
mean_df = pd.DataFrame(mean_data.transpose())
std_df = pd.DataFrame(std_data.transpose())
for bandit_idx, name in enumerate(BANDIT_NAME):
mean_df.rename(columns={bandit_idx: "mean_regret_{}".format(name)}, inplace=True)
mean_df.rename(columns={n_bandit: "user"}, inplace=True)
for bandit_idx, name in enumerate(BANDIT_NAME):
std_df.rename(columns={bandit_idx: "std_regret_{}".format(name)}, inplace=True)
std_df.rename(columns={n_bandit: "user"}, inplace=True)
total_df = mean_df.merge(std_df, left_on="user", right_on="user")
total_df.to_csv("{}cont_user_regret.csv".format(folder_path_with_date), index=False)
|
StarcoderdataPython
|
3267186
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
import logging
import shutil
from datetime import datetime, timedelta
from multiprocessing import Process, Queue
from time import sleep
from configuration import (
data_dir,
download_file,
env_bin,
output_dir,
processed_file,
temporary_dir,
)
from wildfires.data.mosaic_modis_tiles import get_file_dates, mosaic_process_date
from wildfires.logging_config import enable_logging
gdal_translate = env_bin / "gdal_translate"
gdalwarp = env_bin / "gdalwarp"
enable_logging(level="INFO")
logger = logging.getLogger(__name__)
def run(queue, data_dir):
"""Process the MODIS data in `data_dir`.
Returns None if an error occurred, and the processed date otherwise.
"""
def handle_error():
queue.put(None)
file_map = get_file_dates(data_dir)
if len(file_map) != 1:
logger.error(f"File map had length '{len(file_map)}' for dir: {data_dir}.")
return handle_error()
date, date_files = next(iter(file_map.items()))
error = False
try:
mosaic_process_date(
date,
date_files,
temporary_dir,
output_dir,
memory=4000,
multi=True,
overwrite=True,
gdal_translate=gdal_translate,
gdalwarp=gdalwarp,
)
except:
logger.exception(f"Processing of '{date}' failed.")
error = True
finally:
# Clean temporary dir.
# NOTE: This makes this code single-threaded!!
if temporary_dir.is_dir():
shutil.rmtree(temporary_dir)
temporary_dir.mkdir()
if error:
return handle_error()
# Record this date as having been processed.
year = int(date[:4])
days = int(date[4:])
queue.put(datetime(year, 1, 1) + timedelta(days=days - 1))
if __name__ == "__main__":
# Continuously monitor the file recording any downloaded files and process any
# previously unprocessed files accordingly.
while True:
logger.info("Checking for downloaded and unprocessed files")
with download_file.open("r") as f:
downloaded = f.read().strip().split("\n")
with processed_file.open("r") as f:
processed = f.read().strip().split("\n")
outstanding = set(downloaded) - set(processed)
logger.info(f"Outstanding dates: {outstanding}")
for date_str in outstanding:
logger.info(f"Processing: {date_str}")
date_dir = data_dir / date_str
# Carry out processing using a new process to avoid potential memory leaks.
queue = Queue()
p = Process(target=run, args=(queue, date_dir))
p.start()
processed_date = queue.get()
p.join()
if processed_date is not None and (
f"{processed_date:%Y.%m.%d}" == date_str
):
logger.info(f"Processed date: {date_str}")
with processed_file.open("a") as f:
f.write(date_str + "\n")
# Remove the original data directory.
shutil.rmtree(date_dir)
else:
logger.error(f"Error during processing of date: {date_str}.")
sleep(100)
|
StarcoderdataPython
|
1851179
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.DeliveryActivityContentInfo import DeliveryActivityContentInfo
class DeliveryContentInfo(object):
def __init__(self):
self._delivery_activity_content = None
self._delivery_content_type = None
@property
def delivery_activity_content(self):
return self._delivery_activity_content
@delivery_activity_content.setter
def delivery_activity_content(self, value):
if isinstance(value, DeliveryActivityContentInfo):
self._delivery_activity_content = value
else:
self._delivery_activity_content = DeliveryActivityContentInfo.from_alipay_dict(value)
@property
def delivery_content_type(self):
return self._delivery_content_type
@delivery_content_type.setter
def delivery_content_type(self, value):
self._delivery_content_type = value
def to_alipay_dict(self):
params = dict()
if self.delivery_activity_content:
if hasattr(self.delivery_activity_content, 'to_alipay_dict'):
params['delivery_activity_content'] = self.delivery_activity_content.to_alipay_dict()
else:
params['delivery_activity_content'] = self.delivery_activity_content
if self.delivery_content_type:
if hasattr(self.delivery_content_type, 'to_alipay_dict'):
params['delivery_content_type'] = self.delivery_content_type.to_alipay_dict()
else:
params['delivery_content_type'] = self.delivery_content_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = DeliveryContentInfo()
if 'delivery_activity_content' in d:
o.delivery_activity_content = d['delivery_activity_content']
if 'delivery_content_type' in d:
o.delivery_content_type = d['delivery_content_type']
return o
|
StarcoderdataPython
|
4919034
|
<gh_stars>0
#
# Copyright (c) 2018 ISP RAS (http://www.ispras.ru)
# Ivannikov Institute for System Programming of the Russian Academy of Sciences
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from io import BytesIO
from types import MethodType
from django.core.exceptions import ObjectDoesNotExist
from django.utils.translation import ugettext_lazy as _
from bridge.vars import ERROR_TRACE_FILE
from bridge.utils import ArchiveFileContent, logger, file_get_or_create, BridgeException
from reports.etv import ErrorTraceForests
from marks.models import ErrorTraceConvertionCache, ConvertedTraces
# To create new funciton:
# 1) Add created function to the class ConvertTrace;
# 2) Use self.error_trace for convertion
# 3) Return the converted trace. This value MUST be json serializable. Dict and list are good choices.
# 5) Add docstring to the created function.
# Do not use 'error_trace', 'pattern_error_trace', 'error' as function name.
ET_FILE_NAME = 'converted-error-trace.json'
class ConvertTrace:
def __init__(self, function_name, error_trace):
"""
If something failed raise BridgeException().
In case of success you need just self.patter_error_trace.
:param function_name: name of the function (str).
:param error_trace: error trace (str).
:return: nothing.
"""
self.error_trace = error_trace
self.pattern_error_trace = None
if function_name.startswith('_'):
raise BridgeException('Wrong function name')
try:
func = getattr(self, function_name)
if not isinstance(func, MethodType):
raise BridgeException('Wrong function name')
except AttributeError:
raise BridgeException(_('Error trace convert function does not exist'))
self.pattern_error_trace = func()
def callback_call_forests(self):
"""
This function is extracting the error trace call stack forests.
The forest is a couple of call trees under callback action.
Call tree is tree of function names in their execution order.
All its leaves are names of functions which calls or statements
are marked with the "note" or "warn" attribute. Returns list of forests.
"""
return ErrorTraceForests(self.error_trace).trace
def thread_call_forests(self):
"""
This function extracts error trace call forests. Each call forest is one or more call trees in the same thread.
A call tree is a tree of names of functions in their execution order. Each call tree root is either a callback action
if it exists in a corresponding call stack or a thread function. All call tree leaves are names of functions
which calls or statements are marked with the โnoteโ or โwarnโ attribute. If there are several such functions in
a call stack then the latests functions are chosen. The function returns a list of forests. A forests order corresponds
to an execution order of first statements of forest threads.
"""
return ErrorTraceForests(self.error_trace, all_threads=True).trace
class GetConvertedErrorTrace:
def __init__(self, func, unsafe):
self.unsafe = unsafe
self.function = func
self._parsed_trace = None
self.error_trace = self.__get_error_trace()
self.converted = self.__convert()
def __get_error_trace(self):
try:
return ArchiveFileContent(self.unsafe, 'error_trace', ERROR_TRACE_FILE).content.decode('utf8')
except Exception as e:
logger.exception(e, stack_info=True)
raise BridgeException("Can't exctract error trace for unsafe '%s' from archive" % self.unsafe.pk)
def __convert(self):
try:
return ErrorTraceConvertionCache.objects.get(unsafe=self.unsafe, function=self.function).converted
except ObjectDoesNotExist:
self._parsed_trace = ConvertTrace(self.function.name, self.error_trace).pattern_error_trace
et_file = file_get_or_create(BytesIO(
json.dumps(self._parsed_trace, ensure_ascii=False, sort_keys=True, indent=4).encode('utf8')
), ET_FILE_NAME, ConvertedTraces
)[0]
ErrorTraceConvertionCache.objects.create(unsafe=self.unsafe, function=self.function, converted=et_file)
return et_file
def parsed_trace(self):
if self._parsed_trace is not None:
return self._parsed_trace
with self.converted.file as fp:
return json.loads(fp.read().decode('utf8'))
|
StarcoderdataPython
|
9716837
|
<filename>train_model.py
import numpy as np
import tensorflow
from tensorflow.keras import Sequential, Model, Input
from tensorflow.keras.layers import LSTM, Embedding, Dense, TimeDistributed, Dropout, Bidirectional
from tensorflow.keras.utils import plot_model
from numpy.random import seed
from read_data import read_data_from_csv
from prepare_data import transform_data, get_dictionary_map
seed(1)
tensorflow.random.set_seed(2)
def get_input_dim():
data = read_data_from_csv()
data_group = transform_data(data)
tag2idx, idx2tag = get_dictionary_map(data, 'tag')
input_dim = len(list(set(data['Word'].to_list())))+1
output_dim = 64
input_length = max([len(s) for s in data_group['Word_idx'].tolist()])
n_tags = len(tag2idx)
return input_dim, output_dim, input_length, n_tags
def get_lstm_model():
model = Sequential()
input_dim, output_dim, input_length, n_tags = get_input_dim()
# Add Embedding layer
model.add(Embedding(input_dim=input_dim, output_dim=output_dim, input_length=input_length))
# Add bidirectional LSTM
model.add(Bidirectional(LSTM(units=output_dim, return_sequences=True, dropout=0.2, recurrent_dropout=0.2), merge_mode = 'concat'))
# Add LSTM
model.add(LSTM(units=output_dim, return_sequences=True, dropout=0.5, recurrent_dropout=0.5))
# Add timeDistributed Layer
model.add(TimeDistributed(Dense(n_tags, activation="relu")))
#Optimiser
# adam = k.optimizers.Adam(lr=0.0005, beta_1=0.9, beta_2=0.999)
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
return model
def train_model(X, y, model):
loss = list()
for i in range(25):
# fit model for one epoch on this sequence
hist = model.fit(X, y, batch_size=1000, verbose=1, epochs=1, validation_split=0.2)
loss.append(hist.history['loss'][0])
return loss
|
StarcoderdataPython
|
1906255
|
#!/usr/bin/env python3
# vim: sta:et:sw=4:ts=4:sts=4
"""
NAME
fermi_helper.py - Build and run Fermi HEP workflow using Docker/Singularity
SYNOPSIS
python3 fermi_helper.py build-docker-image [--tag TAG]
[--only-dependencies] [--pull-dependencies TAG] [--decaf-root ROOT]
[--decaf-repo REPO] [--decaf-repo-branch BRANCH]
python3 fermi_helper.py run-docker-image [--tag TAG] [--interactive]
python3 fermi_helper.py build-singularity-image [--tag TAG] [--sif SIF]
python3 fermi_helper.py run-singularity-image [--sif SIF] [--interactive]
EXAMPLE
Install Python dependency to run this script
$ python3 -m pip install --user jinja2
Build the Docker image, using pre-built dependencies
$ python3 fermi_helper.py build-docker-image --pull-dependencies thobson2/decaf-fermi:0.2.0-base
Run the workflow within Docker container
$ python3 fermi_helper.py run-docker-image
Build the dependencies and push them to DockerHub
$ python3 fermi_helper.py build-docker-image --only-dependencies --tag USERNAME/decaf-fermi:0.2.0-base
Run a shell within the Docker container interactively
$ python3 fermi_helper.py run-docker-image --interactive
docker$ mpirun --hostfile
mpirun --hostfile hostfile_workflow.txt -np 4 ./decaf-henson_python
Convert Docker image to Singularity image
$ python3 fermi_helper.py build-singularity-image
Run Singularity image
$ python3 fermi_helper.py run-singularity-image
DEBUGGING
Run the Docker container interactively
$ python3 fermi_helper.py run-docker-image --interactively
Run the workflow directly
docker$ mpirun --hostfile hostfile_workflow.txt -np 4 ./decaf-henson_python
To make things easier, install a text editor
docker$ apt-get update
docker$ apt-get install -y vim
To run the workflow in steps, first make a backup of the decaf-henson.json
file, before modifying it to remove the second half of the workflow (i.e.
the converter.py, approx.py, chi2.py, and new_box.py steps)
docker$ cp decaf-henson.json decaf-henson.json.bak
docker$ vi decaf-henson.json
Now run the workflow
docker$ mpirun --hostfile hostfile_workflow.txt -np 4 ./decaf-henson_python
Next, using the backup version of the decaf-henson.json file, extract the
command lines to run the second half step-by-step
docker$ grep cmdline decaf-henson.json.bak
Then modify each of the scripts to remove any reference the pyhenson
(specifically h.add, h.get, and h.yield_ functions)
docker$ vi converter.py # step = h.get("step") => step = 99
docker$ vi approx.py # nothing to change here
docker$ vi chi2.py # nothing to change here
docker$ vi new_box.py # nothing to change here
Now each command line can be run directly, after adding "python" to the
beginning, for example (the exact directories will be different)
docker$ python ./converter.py converter-henson.h5 /tmp/tmp.OrAXakGCKI/stage/examples/fermi_hep/deleteMe/
docker$ python ./approx.py converter-henson.h5 henson_approx.json
docker$ python ./chi2.py henson_approx.json exerimental_data.json
docker$ python ./new_box.py henson_approx.json.minimization newbox.json
NOTE: Anything done inside the container will be lost when the
"run-docker-image" command finishes. To make lasting changes, modify the
"decaf" directory from the host directory and re-run "build-docker-image".
DESCRIPTION
This script takes care of the build and execution process needed to run the
Fermi workflow using Decaf within Linux containers.
The build does everything within a container, which means that the entire
build process happens inside of Docker, and the image is, in a sense,
hermetically sealed away from the host system. The catch is that any change
to source code requires a complete re-build of all of Decaf and the
workflow, which can take up to 5 minutes.
Either build process happens within Docker first and uses a Dockerfile to
define the commands to be run. This Docker image can be used directly, or
can be converted into a Singularity image and then run using Singularity.
build-docker-image
Copy the source code into Docker and build Decaf and the Fermi
workflow.
run-docker-image
Run the workflow inside of Docker using the already-built Docker image.
build-singularity-image
Convert a Docker image into a Singularity image
run-singularity-image
Run the workflow inside of Singularity
OPTIONS
--tag TAG
Set the Docker image tag to be used. If named something like
USERNAME/IMAGENAME:VERSION, then the image will be pushed to a Docker
registry afterwards. Otherwise, a name like IMAGENAME:VERSION will only
be saved locally.
--sif SIF
Set the path to the Singularity image to be used.
--interactive
Instead of immediately running the workflow, open a shell into the
container to manually run the workflow and debug.
--decaf-root ROOT
Set the location of the decaf source code (including Fermi workflow).
--decaf-repo REPO
If the Decaf root directory doesn't exist, Decaf is first cloned using
this repo url.
--decaf-repo-branch BRANCH
The branch to be checked out after cloning Decaf (see --decaf-repo).
--only-dependencies
Only build the dependencies inside of Docker, without compiled Decaf.
--pull-dependencies TAG
Instead of building the whole set of dependencies, use the pre-built
image TAG.
FILES
go.sh
A helper script used in the Dockerfile to run CMake with the correct
arguments.
docker.env.sh
A helper script that sets some variables to be used inside the go.sh
script.
NOTES
The build-docker-image and run-docker-image commands require Docker to be
installed, but do not require Singularity installed. Likewise,
build-singularity-image and run-singularity-image require Singularity, but
not Docker. This means that those commands can be run on different machines
(provided the image is pushed to a registry, c.f. --tag option above with a
"/" separator)
BUGS
Currently, even if Python source code is changed, the hermetic build and
run process will rebuild everything, despite it being unnecessary for an
interpreted script. This could be fixed in one of two ways: 1) copy only
C++ source code first and then Python source code, or 2) build and run
incrementally.
CHANGELOG
v0.2.6, 16 April 2021
Add missing zlib package.
v0.2.5, 9 April 2021
Fix regression with Spack spec for py-h5py and hdf5 packages.
v0.2.4, 9 April 2021
Add missing pandas dependency.
v0.2.3, 23 March 2021
Update the version of apprentice within the repository to the latest
version from GitHub at this time (commit 6fbf53).
v0.2.2, 18 March 2021
Changed the default branch to the new "fermi-workflow" branch.
Added documentation on running the workflow interactively to aid in
debugging.
v0.2.1, 15 October 2020
Fixed a problem that would cause template parametrization to fail to
apply to the workflow which pythia8-diy would error out on.
The root cause is that pythia8-diy can read the mb7tev.txt file from a
few different places: in the current directory and in a subdirectory
(under "deleteMe"). Although the new mb7tev.txt file is created, the
old one is not automatically removed, so the workflow reads the wrong
file.
In the previous version, a remnant of the old runtime directory code
was used. In this version, moving a file from "$FERMI_PREFIX" would
correspond to moving it from the current directory. But now, the
current directory is nested under /tmp, so the file wasn't moved, and
in particular, the old one wasn't deleted. Now the file is moved from
the current directory, and this resolves the problem.
v0.2.0, 01 October 2020
Remove incremental building for now until it's been tested more. Right
now, only hermetic builds are supported, though now the dependencies
can be pre-built and saved to a Docker registry to be used. This needs
templates to work effectively, hence the introduction of the jinja2
library dependency.
v0.1.0, 24 September 2020
First release with full support for hermetic builds and in-progress
support for incremental ones.
AUTHORS
<NAME> <<EMAIL>>
"""
from subprocess import run
from textwrap import dedent
from pathlib import Path
from jinja2 import Template
setupscript = dedent("""\
#ls -lah /.singularity.d/
. /etc/profile
#cat /.singularity.d/runscript -A
#set -euo pipefail
""")
hermeticscript = dedent("""\
#ls -lah /.singularity.d/
. /etc/profile
#cat /.singularity.d/runscript -A
set -euo pipefail
DECAF_PREFIX=/opt/decaf/stage
DECAF_HENSON_PREFIX=${DECAF_PREFIX:?}/examples/henson
FERMI_PREFIX=${DECAF_PREFIX:?}/examples/fermi_hep
cd "$(TMPDIR=/tmp mktemp -d)"
tmpdir=$PWD
cp -r "${DECAF_PREFIX:?}" "${tmpdir:?}"
cd stage/examples/fermi_hep
echo $PWD
mkdir conf
mv mb7tev.txt conf/
cp hep-fullWorkflow-inputPre.json ./decaf-henson.json
sed -ie 's!/home/oyildiz/mohan/fermi-workflow/install!'"$tmpdir/stage"'!g' ./decaf-henson.json
#sed -ie 's!\\./!'"${FERMI_PREFIX:?}/"'!g' ./decaf-henson.json
#cp "${FERMI_PREFIX:?}/hostfile_workflow.txt" ./hostfile_workflow.txt
cp ../henson/python/decaf-henson_python ./decaf-henson_python
LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+${LD_LIBRARY_PATH:?}:}${DECAF_PREFIX:?three}/lib
export DECAF_PREFIX LD_LIBRARY_PATH
ls -lah
""")
runscript = dedent("""\
mpirun --hostfile hostfile_workflow.txt -np 4 ./decaf-henson_python
""")
intscript = dedent("""\
exec bash
""")
dockerfile_template = Template(dedent("""\
{% if not pull_dependencies %}
# Build stage with Spack pre-installed and ready to be used
FROM spack/ubuntu-bionic:latest as builder
# What we want to install and how we want to install it
# is specified in a manifest file (spack.yaml)
RUN mkdir /opt/spack-environment \\
&& (echo "spack:" \\
&& echo " view: /opt/view" \\
&& echo " specs:" \\
&& echo " - boost" \\
&& echo " - cmake" \\
&& echo " - henson +mpi-wrappers +python ^[email protected] ^[email protected]" \\
&& echo " - py-h5py ^[email protected]+hl ^[email protected] ^[email protected]" \\
&& echo " - [email protected] ^[email protected]" \\
&& echo " - autoconf" \\
&& echo " - automake" \\
&& echo " - [email protected]" \\
&& echo " - diy@master ^[email protected]" \\
&& echo " config:" \\
&& echo " install_tree: /opt/software" \\
&& echo " concretization: together") > /opt/spack-environment/spack.yaml
# Install the software, remove unecessary deps
RUN cd /opt/spack-environment && spack --env . install && spack gc -y
RUN cd /opt/spack-environment && spack --env . add pythia8 && spack --env . install && spack gc -y
## Strip all the binaries
#RUN find -L /opt/view/* -type f -exec readlink -f '{}' \; | \\
# xargs file -i | \\
# grep 'charset=binary' | \\
# grep 'x-executable\|x-archive\|x-sharedlib' | \\
# awk -F: '{print $1}' | xargs strip -s
# Modifications to the environment that are necessary to run
RUN cd /opt/spack-environment && \\
spack env activate --sh -d . >> /etc/profile.d/z10_spack_environment.sh
# Bare OS image to run the installed executables
FROM ubuntu:18.04 AS dependencies
RUN apt-get update && \\
apt-get install -y \\
build-essential \\
wget \\
gfortran \\
git \\
zlib1g-dev \\
&& \\
rm -rf /var/lib/apt/lists/*
COPY --from=builder /opt/spack-environment /opt/spack-environment
COPY --from=builder /opt/software /opt/software
COPY --from=builder /opt/view /opt/view
COPY --from=builder /etc/profile.d/z10_spack_environment.sh /etc/profile.d/z10_spack_environment.sh
RUN . /etc/profile && \\
python3.8 -m ensurepip && \\
python3.8 -m pip install virtualenv && \\
python3.8 -m virtualenv /opt/venv && \\
/opt/venv/bin/python -m pip install \\
pandas \\
networkx \\
cython \\
git+https://github.com/HEPonHPC/apprentice.git@<PASSWORD> \\
&& \\
echo '. /opt/venv/bin/activate' > /etc/profile.d/z15_python_environment.sh
RUN . /etc/profile && \\
mkdir /opt/rivet && \\
cd /opt/rivet && \\
wget https://phab.hepforge.org/source/rivetbootstraphg/browse/3.0.2/rivet-bootstrap?view=raw -O rivet-bootstrap && \\
chmod +x rivet-bootstrap && \\
{ RIVET_VERSION=2.7.2 YODA_VERSION=1.7.5 HEPMC_VERSION=2.06.09 FASTJET_VERSION=3.3.2 ./rivet-bootstrap || true; } && \\
rm YODA-1.7.5/pyext/yoda/util.cpp && \\
RIVET_VERSION=2.7.2 YODA_VERSION=1.7.5 HEPMC_VERSION=2.06.09 FASTJET_VERSION=3.3.2 ./rivet-bootstrap && \\
echo '. /opt/rivet/local/rivetenv.sh' > /etc/profile.d/z20_rivet_environment.sh
{% endif %}
{% if pull_dependencies %}
FROM {{ pull_dependencies }} AS dependencies
{% endif %}
FROM dependencies AS final
{% if not only_dependencies %}
SHELL ["/bin/bash", "--rcfile", "/etc/profile", "-l", "-c"]
WORKDIR /opt
COPY {{ decaf_root }} /opt/decaf
WORKDIR /opt/decaf
COPY go.sh docker.env.sh /opt/decaf/
RUN sed -ie '/extern "C" void \*(\*dlsym(void \*handle, const char \*symbol))();/d' /opt/view/include/Pythia8/PythiaStdlib.h && \\
rm -rf /opt/view/include/diy/thirdparty/fmt && \\
cp -r include/fmt /opt/view/include/diy/thirdparty/fmt
RUN . /etc/profile && \\
. /opt/venv/bin/activate && \\
ENV=docker ./go.sh cmake && \\
ENV=docker ./go.sh make
{% endif %}
ENTRYPOINT ["/bin/bash", "--rcfile", "/etc/profile"]
CMD ["-l"]
"""))
def _make_script(interactive: bool) -> str:
script = setupscript
script += hermeticscript
if interactive:
script += intscript
else:
script += runscript
return script
def main_build_docker_image(decaf_root, decaf_repo, decaf_repo_branch, tag, only_dependencies, pull_dependencies):
if not decaf_root.exists():
run(
['git', 'clone', str(decaf_repo), str(decaf_root)],
check=True,
)
run(
['git', 'checkout', str(decaf_repo_branch)],
cwd=decaf_root,
)
dockerfile = dockerfile_template.render(
decaf_root=decaf_root.relative_to(Path.cwd()),
only_dependencies=only_dependencies,
pull_dependencies=pull_dependencies,
).encode('utf-8')
if only_dependencies:
target = 'dependencies'
else:
target = 'final'
run(
['docker', 'build', '-t', str(tag), '-f', '-', '--target', str(target), '.'],
input=dockerfile,
check=True,
)
if '/' in tag:
run(
['docker', 'push', str(tag)],
check=True,
)
def main_run_docker_image(tag, interactive):
script = _make_script(interactive)
run(
['docker', 'run', '-it', '--rm', '--mount', 'type=bind,src=' + str(Path.cwd()) + ',dst=' + str(Path.cwd()), str(tag), '--rcfile', '/etc/profile', '-c', script, 'decaf-fermi-wrapper'],
check=True,
)
def main_build_singularity_image(tag, sif):
if '/' in tag:
tag = 'docker://{}'.format(tag)
else:
tag = 'docker-daemon://{}'.format(tag)
run(
['singularity', 'build', str(sif), str(tag)],
check=True,
)
def main_run_singularity_image(interactive):
script = _make_script(interactive)
run(
['singularity', 'exec', './decaf-fermi.sif', 'bash', '--rcfile', '/etc/profile', '-c', script, 'decaf-fermi-wrapper'],
check=True,
)
def cli():
def wrap_exception(func):
def inner(s):
try:
return func(s)
except Exception as e:
raise argparse.ArgumentTypeError(str(e))
return inner
@wrap_exception
def tag(s):
assert sum(1 for c in s if c == '/') <= 1, 'Expected 0 or 1 "/" separator in tag (user/imagename or imagename)'
assert ':' in s, 'Expected ":" separator in tag (version, e.g. "latest")'
return s
@wrap_exception
def sif(s):
path = Path(s)
path = path.resolve(strict=False) # new in Python3.6+
assert path.suffix == '.sif', 'Expected sif extension'
assert path.parent.exists(), 'Expected parent directory to exist: {}'.format(path.parent)
return path
import argparse
parser = argparse.ArgumentParser()
parser.set_defaults(main=None)
subparsers = parser.add_subparsers()
subparser = subparsers.add_parser('build-docker-image')
subparser.set_defaults(main=main_build_docker_image)
subparser.add_argument('--decaf-root', type=Path, default=Path.cwd() / 'decaf')
subparser.add_argument('--decaf-repo', default='<EMAIL>:tpeterka1/decaf.git')
subparser.add_argument('--decaf-repo-branch', default='fermi-workflow')
subparser.add_argument('--tag', default='decaf-fermi:latest', type=tag,
help='e.g. MyUsername/MyImage:latest or my.registry.example.com:5000/MyUsername/MyImage:latest')
subparser.add_argument('--only-dependencies', action='store_true')
subparser.add_argument('--pull-dependencies', type=tag)
subparser = subparsers.add_parser('run-docker-image')
subparser.set_defaults(main=main_run_docker_image)
subparser.add_argument('--tag', default='decaf-fermi:latest', type=tag,
help='e.g. MyUsername/MyImage:latest or my.registry.example.com:5000/MyUsername/MyImage:latest')
subparser.add_argument('--interactive', action='store_true')
subparser = subparsers.add_parser('build-singularity-image')
subparser.set_defaults(main=main_build_singularity_image)
subparser.add_argument('--tag', '-t', default='decaf-fermi:latest', type=tag,
help='e.g. MyUsername/MyImage:latest or my.registry.example.com:5000/MyUsername/MyImage:latest')
subparser.add_argument('--sif', '-s', default=sif('./decaf-fermi.sif'), type=sif,
help='e.g. path/to/image.sif')
subparser = subparsers.add_parser('run-singularity-image')
subparser.set_defaults(main=main_run_singularity_image)
subparser.add_argument('--sif', default=sif('./decaf-fermi.sif'), type=sif,
help='e.g. path/to/image.sif')
subparser.add_argument('--interactive', action='store_true')
args = vars(parser.parse_args())
main = args.pop('main')
if main is None:
parser.print_usage()
return
main(**args)
if __name__ == '__main__':
cli()
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.