id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
3588631
|
from dataclasses import dataclass
from typing import List, Optional, Dict
@dataclass
class Japanese:
word: Optional[str] = None
reading: Optional[str] = None
@dataclass
class Link:
text: str
url: str
@dataclass
class Source:
language: str
word: str
@dataclass
class Sense:
english_definitions: List[str]
parts_of_speech: List[Optional[str]]
links: List[Link]
tags: List[str]
restrictions: List[str]
see_also: List[str]
antonyms: List[str]
source: List[Source]
info: List[str]
sentences: Optional[List[str]] = None
@dataclass
class WordData:
slug: str
tags: List[str]
jlpt: List[str]
japanese: List[Japanese]
senses: List[Sense]
attribution: Dict[str, bool]
is_common: bool = False
|
StarcoderdataPython
|
11208347
|
<reponame>HHC0209/student_recognition_system
from PIL import Image
import os
# 将所有照片保存为450*800的缩略图
# (仅需在放入所有图片后运行一次)
class PictureResizer:
def __init__(self, path):
self.path = path
self.all_folder = []
def load_all_photo(self):
folders = os.listdir(self.path)
for folder in folders:
path = self.path + '/' + folder
photo = os.listdir(path)
self.all_folder.append([path + '/' + item for item in photo])
def compress_photo(self):
if self.all_folder:
for folder in self.all_folder:
if folder:
for picture in folder:
try:
img = Image.open(picture)
img.thumbnail((600, 800))
img.save(picture)
except:
print("学生照片损坏:" + picture)
def start(self):
self.load_all_photo()
self.compress_photo()
|
StarcoderdataPython
|
86889
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from typing import NoReturn
import signal
import sys
import os
from dotenv import load_dotenv, find_dotenv
from psycopg.rows import dict_row
import psycopg
load_dotenv(find_dotenv('.env.sample'))
# If `.env` exists, let it override the sample env file.
load_dotenv(override=True)
if os.getenv('docker') == 'true':
PSQL_URL = os.environ['PSQL_DOCKER_URL']
else:
PSQL_URL = os.environ['PSQL_URL']
PSQL = psycopg.Connection.connect(PSQL_URL, row_factory=dict_row)
_sql_path = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(_sql_path, 'get_pending_txs.sql')) as f:
get_pending_txs_sql = f.read()
IN_SQL = """
UPDATE
txs
SET
(
to_tx_hash,
received_value,
pending,
received_time,
received_token,
swap_success
) = (
%s,
%s,
false,
%s,
%s,
%s
)
WHERE
kappa = %s;
"""
DEL_SQL = """
DELETE FROM
lost_txs
WHERE
kappa = %s;
"""
if __name__ == '__main__':
updated = 0
def sig_handler(*_) -> NoReturn:
print(f'updated {updated} rows')
sys.exit(0)
signal.signal(signal.SIGINT, sig_handler)
with PSQL.cursor() as c:
c.execute(get_pending_txs_sql)
print(f'able to complete {c.rowcount} rows.')
for tx in c.fetchall():
c.execute(
IN_SQL,
(tx['to_tx_hash'], tx['received_value'], tx['received_time'],
tx['received_token'], tx['swap_success'], tx['kappa']))
assert c.rowcount == 1
c.execute(DEL_SQL, (tx['kappa'], ))
assert c.rowcount == 1
PSQL.commit()
updated += 1
print(f'updated {updated} rows')
|
StarcoderdataPython
|
335674
|
import progeny
class Base(progeny.Base):
pass
class Alpha(Base):
pass
class Bravo(Alpha):
@classmethod
def _get_progeny_key(cls):
return '={}='.format(cls.__name__)
class Charlie(Bravo):
__progeny_key__ = 'charlie'
class Delta(Charlie):
pass
def test_progeny():
assert Base.progeny == {
Alpha: Alpha,
'=Bravo=': Bravo,
'charlie': Charlie,
'=Delta=': Delta,
}
assert len(Base.progeny) == 4
|
StarcoderdataPython
|
193132
|
# coding=utf-8
"""Main bot file"""
import aiohttp
import time
from collections import Counter, deque
from pathlib import Path
import discord
from discord.ext import commands
from pyppeteer import launch, errors
from bot.utils.logging import setup_logger
from bot.utils.over import send, _default_help_command
discord.abc.Messageable.send = send
class Bot(commands.AutoShardedBot):
"""Subclasses AutoShardedBot to give more flexibility with design"""
def __init__(self, *args, **kwargs):
self.config = kwargs.pop('config')
self.start_time = time.time()
super().__init__(*args, **kwargs)
# pay no mind to this ugliness.
self.remove_command('help')
self.command(**self.help_attrs)(_default_help_command)
self.pm_help = None
self.case_insensitive = True
self.app_info = None
shard = f"| Shard {self.shard_id}" if self.shard_id else ""
self.activity = discord.Game(name=f"{self.command_prefix}help {shard}")
self.session = aiohttp.ClientSession(loop=self.loop, headers={"User-Agent": self.http.user_agent})
self.browser_page = None
self.browser = self.loop.create_task(self.create_browser())
self.priv = self.config['extras'].get('privatebin', 'https://privatebin.net')
self.polr = self.config['extras'].get('polr', None)
self.commands_used = Counter()
self.commands_used_in = Counter()
self.errors = deque(maxlen=10)
self.revisions = None
discord_logger = setup_logger("discord")
self.logger = setup_logger("Bot")
self.command_logger = setup_logger("Commands")
self.loggers = [discord_logger, self.logger, self.command_logger]
_modules = [mod.stem for mod in Path("bot/cogs").glob("*.py")]
self.load_extension(f"bot.cogs.core")
self.load_extension(f"bot.cogs.owner")
if 'bare' in kwargs.pop('argv'): # load the bot bare-bones to diagnose issues
return
for module in _modules:
try:
if module in ['core', 'errors']:
pass
self.load_extension(f"bot.cogs.{module}")
except discord.DiscordException as exc:
self.logger.error(f"{type(exc).__name__} occurred when loading {module}: {exc}")
# make sure to only print ready text once
self._loaded = False
async def on_ready(self):
"""Function called when bot is ready or resumed"""
if self._loaded is False:
end_time = time.time() - self.start_time
self.app_info = await self.application_info()
self.logger.info(f"Loaded Bot:")
self.logger.info(f"Logged in as {self.user}")
self.logger.info(f"ID is {self.user.id}")
self.logger.info(f"Owned by {self.app_info.owner}")
self.description = f"Hello, this is the help menu for {self.user.name}!"
self.logger.info(f"Bot started in {end_time} seconds")
self._loaded = True
return
self.logger.info(f"Resumed bot session on shard {self.shard_id}!")
async def create_browser(self):
"""Task to create browser for scraping purposes."""
await self.wait_until_ready()
self.browser = await launch(args=["--no-sandbox"], headless=True)
self.browser_page = await self.browser.newPage()
# noinspection PyProtectedMember
async def close(self):
"""Function called when closing the bot"""
try:
await self.browser_page.close() or self.logger.info("Browser page successfully closed!")
except (errors.PageError, AttributeError): # browser was never created; edge case
pass
await self.browser.close() or self.logger.info("Browser successfully closed!")
await super().close()
await self.http._session.close()
await self.session.close()
for logger in self.loggers:
for handler in logger.handlers:
logger.removeHandler(handler)
|
StarcoderdataPython
|
1823542
|
import pytest
from flask import Flask
from werkzeug.security import check_password_hash
from app.models import User, db, CountdownResult, get_db_column, Cite
from app.tools.auth import _check_token
from tests.conftest import test_user_email
def test_new_user(user, app: Flask):
"""Testing new user creation"""
# given a user model
# when user is created
with app.app_context():
db.session.add(user)
db.session.commit()
# then the email, password, name and admin role are set correctly
assert user.email == test_user_email
assert user.password != 'password'
assert check_password_hash(user.password, 'password')
assert user.name == 'Tomas'
assert not user.admin
def test_new_password_reset_token_creation(user: User, app: Flask):
# given a user model
with app.app_context():
# when a new token is created
user.create_token_for('password_reset')
db.session.add(user)
db.session.commit()
# then the fields connected with token should be set correctly
assert user.password_reset_token != ''
assert _check_token(user.password_reset_hash, user.password_reset_token)
assert user.password_reset_sent_at
def test_new_activation_account_token_creation(user: User, app: Flask):
# given a user model
with app.app_context():
# when a token is created
user.create_token_for('activation')
db.session.add(user)
db.session.commit()
# then fields associated with token shoud be set
assert user.activation_token != ''
assert _check_token(user.activation_hash, user.activation_token)
assert user.activation_sent_at
def test_activation_account_by_token(user: User, app: Flask):
# given a user model
with app.app_context():
# when a token is created
user.create_token_for('activation')
db.session.add(user)
db.session.commit()
# then activation function should return true
assert user.activate_user_account(user.activation_token)
assert user.activated
def test_check_user_password(user: User, app: Flask):
# given a user model
actual_user_password = 'password'
# when we create a user
with app.app_context():
db.session.add(user)
db.session.commit()
# we can should get a True from checking password function
assert user.check_password(actual_user_password)
def test_user_repr(user: User, app: Flask):
# given a user model
# when we create a user
with app.app_context():
db.session.add(user)
db.session.commit()
# then the repr should be equal to this
assert repr(user) == 'User(id=2, email=%s)' % test_user_email
def test_if_user_exist_with_valid_email(user: User, app: Flask):
# given a user model
# when we create a user
with app.app_context():
db.session.add(user)
db.session.commit()
# then the repr should be equal to this
assert User.is_user_with_email_exist(user.email)
def test_if_user_not_exist_with_nonvalid_email(user: User, app: Flask):
# given a user model
# when we create a user
with app.app_context():
db.session.add(user)
db.session.commit()
# then the repr should be equal to this
assert not User.is_user_with_email_exist('<EMAIL>')
def test_user_is_setting_a_new_password(user: User, app: Flask):
# given a user model
# when we create a user and setting a new password to existing one
with app.app_context():
db.session.add(user)
db.session.commit()
new_password = '<PASSWORD>'
user.set_password(new_password)
# then the new password hash will be valid
assert user.check_password(new_password)
def test_user_check_valid_password_reset_token(user: User, app: Flask):
# given a user model
# when we create a user and setting a new password to existing one
with app.app_context():
db.session.add(user)
db.session.commit()
user.create_token_for('password_reset')
db.session.commit()
# then the new password hash will be valid
assert user.check_password_reset_token(user.password_reset_token)
# Below CountdownResult class tests
def test_create_countdown_result(user: User, app: Flask, countdown_result: CountdownResult):
# given a user model and countdown result model
# when we create a countdown result entry
with app.app_context():
db.session.add(user)
countdown_result.user_id = user.id
db.session.add(countdown_result)
db.session.commit()
# all fields should be filled
assert countdown_result.id
assert countdown_result.start_date
assert countdown_result.finish_date
assert countdown_result.start_date < countdown_result.finish_date
assert countdown_result.success
assert countdown_result.user_id == user.id
def test_countdown_result_repr_function(countdown_result: CountdownResult, app: Flask, user: User):
# given a model
# when we call a repr function
with app.app_context():
db.session.add(user)
countdown_result.user_id = user.id
db.session.add(countdown_result)
db.session.commit()
# we will get a valid string
assert repr(countdown_result) == 'CountdownResult(id=1, start_date=2021-01-01 00:00:00,' \
' finish_date=2021-01-02 00:00:00, success=True, user_id=2)'
def test_countdown_result_find_by_valid_query_parameters(countdown_result_filters: dict, user: User, app: Flask,
countdown_result: CountdownResult):
# given a model
# when we add some countdown results to db
with app.app_context():
db.session.add(user)
countdown_result.user_id = user.id
db.session.add(countdown_result)
db.session.commit()
# we can find them by user id
countdown_results, number_of_elements = CountdownResult.find_by_query_parameter(countdown_result_filters,
user.id)
assert number_of_elements == 1
assert countdown_results
def test_countdown_result_delete_all_for_user(user: User, app: Flask, countdown_result: CountdownResult):
# given a model
# when we add some countdown results to db and then delete
with app.app_context():
db.session.add(user)
countdown_result.user_id = user.id
db.session.add(countdown_result)
db.session.commit()
number_of_records_before_removal = CountdownResult.query.count()
CountdownResult.delete_all_by_user_id(user.id)
db.session.commit()
# there should be no records left
assert CountdownResult.query.count() == 0
assert number_of_records_before_removal != CountdownResult.query.count()
def test_countdown_results_find_db_column():
# given a model
# when we have some countdown results filter key
filter_keys = ['startDate', 'finishDate', 'success']
# they should be found
for key in filter_keys:
assert get_db_column(key)
def test_countdown_results_doesnt_find_db_column():
# given a model
# when we have some countdown results filter key which are non valid
filter_keys = ['randomValue', 'DROP TABLE users;', 'SELECT * FROM USER']
# they should raise an KeyError because we didn't define them
for key in filter_keys:
with pytest.raises(KeyError):
get_db_column(key)
# Below Cite class tests
def test_cite_creation(cite: Cite, app: Flask):
# given a model
# when we create a cite and save in db
with app.app_context():
db.session.add(cite)
db.session.commit()
# all fields should be set
assert cite.id
assert cite
assert cite.text == 'Some important and brilliant cite text.'
def test_cite_repr_format(cite: Cite, app: Flask):
# given a model
# when we create a cite and save in db
with app.app_context():
db.session.add(cite)
db.session.commit()
# all fields should be set
number_of_cites = Cite.query.count()
assert repr(cite) == 'Cite(id=%d, cite=Some important and brilliant cite text.)' % number_of_cites
|
StarcoderdataPython
|
3443278
|
# -*- coding: utf-8 -*-
"""
@navrajnarula
https://machinelearningmastery.com/persistence-time-series-forecasting-with-python/
"""
import pandas
from pandas import read_csv
from pandas import datetime
from matplotlib import pyplot
from pandas import DataFrame
#def parser(x):
# return datetime.strptime('190'+x, '%Y-%m')
series = read_csv('data/130N_Cycles_1-47.csv', header=0, parse_dates=[0], index_col=0, squeeze=True)
series.plot()
pyplot.show()
# Create lagged dataset
values = DataFrame(series.values)
dataframe = pandas.concat([values.shift(1), values], axis=1)
dataframe.columns = ['t-1', 't+1']
print(dataframe.head(5))
# split into train and test sets
X = dataframe.values
train_size = int(len(X) * 0.66)
train, test = X[1:train_size], X[train_size:]
train_X, train_y = train[:,0], train[:,1]
test_X, test_y = test[:,0], test[:,1]
# persistence model
def model_persistence(x):
return x
print(test_X)
# walk-forward validation
predictions = list()
for x in test_X:
yhat = model_persistence(x)
predictions.append(yhat)
#test_score = mean_squared_error(test_y, predictions)
#print('Test MSE: %.3f' % test_score)
# plot predictions and expected results
pyplot.plot(train_y)
pyplot.xlabel('Time [Minutes]')
pyplot.ylabel('Load [Newtons]')
pyplot.plot([None for i in train_y] + [x for x in test_y])
pyplot.plot([None for i in train_y] + [x for x in predictions])
print(len)
pyplot.show()
print(len(train_y), len(test_y))
print(len(train_y), len(predictions))
print(len(train_y)+len(predictions))
predictions = predictions
import matplotlib.pyplot as plt
#lines = plt.plot(predictions)
#print(lines)
|
StarcoderdataPython
|
8010493
|
<gh_stars>0
"""Asynchronous client for the PVOutput API."""
|
StarcoderdataPython
|
3394026
|
import email, email.message
import os
"""
将 mht 文件转化为 html 文件
"""
def convert(filename):
mht = open(filename, "rb")
print("转化中...\n")
a = email.message_from_bytes(mht.read())
parts = a.get_payload()
if not type(parts) is list:
parts = [a]
for p in parts:
if not os.path.exists('./works'):
os.mkdir('./works')
open('./works/records.html', "wb").write(p.get_payload(decode=True))
print("完成转化.\n")
if __name__ == "__main__":
convert('./records.mht')
|
StarcoderdataPython
|
9611726
|
<gh_stars>0
def findDecision(obj): #obj[0]: Passanger, obj[1]: Time, obj[2]: Coupon, obj[3]: Education, obj[4]: Occupation, obj[5]: Bar, obj[6]: Restaurant20to50, obj[7]: Direction_same, obj[8]: Distance
# {"feature": "Coupon", "instances": 8147, "metric_value": 0.9848, "depth": 1}
if obj[2]>1:
# {"feature": "Distance", "instances": 5889, "metric_value": 0.9535, "depth": 2}
if obj[8]<=2:
# {"feature": "Passanger", "instances": 5308, "metric_value": 0.9386, "depth": 3}
if obj[0]<=2:
# {"feature": "Education", "instances": 3482, "metric_value": 0.9606, "depth": 4}
if obj[3]>1:
# {"feature": "Time", "instances": 1973, "metric_value": 0.9718, "depth": 5}
if obj[1]<=2:
# {"feature": "Direction_same", "instances": 1359, "metric_value": 0.9624, "depth": 6}
if obj[7]<=0:
# {"feature": "Restaurant20to50", "instances": 771, "metric_value": 0.975, "depth": 7}
if obj[6]<=3.0:
# {"feature": "Occupation", "instances": 749, "metric_value": 0.9778, "depth": 8}
if obj[4]<=13.370987400688758:
# {"feature": "Bar", "instances": 651, "metric_value": 0.9833, "depth": 9}
if obj[5]>-1.0:
return 'True'
elif obj[5]<=-1.0:
return 'True'
else: return 'True'
elif obj[4]>13.370987400688758:
# {"feature": "Bar", "instances": 98, "metric_value": 0.9217, "depth": 9}
if obj[5]<=1.0:
return 'True'
elif obj[5]>1.0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[6]>3.0:
# {"feature": "Occupation", "instances": 22, "metric_value": 0.7732, "depth": 8}
if obj[4]<=12:
# {"feature": "Bar", "instances": 19, "metric_value": 0.8315, "depth": 9}
if obj[5]>0.0:
return 'True'
elif obj[5]<=0.0:
return 'True'
else: return 'True'
elif obj[4]>12:
return 'True'
else: return 'True'
else: return 'True'
elif obj[7]>0:
# {"feature": "Occupation", "instances": 588, "metric_value": 0.9417, "depth": 7}
if obj[4]<=19.294968306511763:
# {"feature": "Bar", "instances": 541, "metric_value": 0.9335, "depth": 8}
if obj[5]>-1.0:
# {"feature": "Restaurant20to50", "instances": 537, "metric_value": 0.9308, "depth": 9}
if obj[6]<=1.0:
return 'True'
elif obj[6]>1.0:
return 'True'
else: return 'True'
elif obj[5]<=-1.0:
# {"feature": "Restaurant20to50", "instances": 4, "metric_value": 0.8113, "depth": 9}
if obj[6]<=2.0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[4]>19.294968306511763:
# {"feature": "Bar", "instances": 47, "metric_value": 0.9971, "depth": 8}
if obj[5]<=2.0:
# {"feature": "Restaurant20to50", "instances": 38, "metric_value": 0.998, "depth": 9}
if obj[6]>0.0:
return 'True'
elif obj[6]<=0.0:
return 'False'
else: return 'False'
elif obj[5]>2.0:
# {"feature": "Restaurant20to50", "instances": 9, "metric_value": 0.7642, "depth": 9}
if obj[6]>1.0:
return 'True'
elif obj[6]<=1.0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[1]>2:
# {"feature": "Direction_same", "instances": 614, "metric_value": 0.9877, "depth": 6}
if obj[7]<=0:
# {"feature": "Bar", "instances": 467, "metric_value": 0.9748, "depth": 7}
if obj[5]<=3.0:
# {"feature": "Occupation", "instances": 452, "metric_value": 0.9699, "depth": 8}
if obj[4]>0:
# {"feature": "Restaurant20to50", "instances": 450, "metric_value": 0.971, "depth": 9}
if obj[6]<=3.0:
return 'True'
elif obj[6]>3.0:
return 'True'
else: return 'True'
elif obj[4]<=0:
return 'True'
else: return 'True'
elif obj[5]>3.0:
# {"feature": "Occupation", "instances": 15, "metric_value": 0.9183, "depth": 8}
if obj[4]<=16:
# {"feature": "Restaurant20to50", "instances": 14, "metric_value": 0.8631, "depth": 9}
if obj[6]<=3.0:
return 'False'
elif obj[6]>3.0:
return 'True'
else: return 'True'
elif obj[4]>16:
return 'True'
else: return 'True'
else: return 'False'
elif obj[7]>0:
# {"feature": "Occupation", "instances": 147, "metric_value": 0.9984, "depth": 7}
if obj[4]<=8.013605442176871:
# {"feature": "Restaurant20to50", "instances": 93, "metric_value": 0.9992, "depth": 8}
if obj[6]>0.0:
# {"feature": "Bar", "instances": 72, "metric_value": 0.9978, "depth": 9}
if obj[5]<=3.0:
return 'False'
elif obj[5]>3.0:
return 'True'
else: return 'True'
elif obj[6]<=0.0:
# {"feature": "Bar", "instances": 21, "metric_value": 0.9183, "depth": 9}
if obj[5]<=3.0:
return 'True'
elif obj[5]>3.0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[4]>8.013605442176871:
# {"feature": "Bar", "instances": 54, "metric_value": 0.9751, "depth": 8}
if obj[5]<=1.0:
# {"feature": "Restaurant20to50", "instances": 37, "metric_value": 0.909, "depth": 9}
if obj[6]<=2.0:
return 'False'
elif obj[6]>2.0:
return 'False'
else: return 'False'
elif obj[5]>1.0:
# {"feature": "Restaurant20to50", "instances": 17, "metric_value": 0.9774, "depth": 9}
if obj[6]>1.0:
return 'False'
elif obj[6]<=1.0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'False'
else: return 'False'
else: return 'True'
elif obj[3]<=1:
# {"feature": "Restaurant20to50", "instances": 1509, "metric_value": 0.9431, "depth": 5}
if obj[6]<=2.0:
# {"feature": "Time", "instances": 1408, "metric_value": 0.9512, "depth": 6}
if obj[1]<=1:
# {"feature": "Direction_same", "instances": 847, "metric_value": 0.9751, "depth": 7}
if obj[7]<=0:
# {"feature": "Occupation", "instances": 432, "metric_value": 0.996, "depth": 8}
if obj[4]<=18.882886482140197:
# {"feature": "Bar", "instances": 400, "metric_value": 0.9974, "depth": 9}
if obj[5]<=3.0:
return 'True'
elif obj[5]>3.0:
return 'True'
else: return 'True'
elif obj[4]>18.882886482140197:
# {"feature": "Bar", "instances": 32, "metric_value": 0.9544, "depth": 9}
if obj[5]<=0.0:
return 'False'
elif obj[5]>0.0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[7]>0:
# {"feature": "Occupation", "instances": 415, "metric_value": 0.9335, "depth": 8}
if obj[4]<=13.389134189221542:
# {"feature": "Bar", "instances": 355, "metric_value": 0.9477, "depth": 9}
if obj[5]>0.0:
return 'True'
elif obj[5]<=0.0:
return 'True'
else: return 'True'
elif obj[4]>13.389134189221542:
# {"feature": "Bar", "instances": 60, "metric_value": 0.8113, "depth": 9}
if obj[5]<=2.0:
return 'True'
elif obj[5]>2.0:
return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[1]>1:
# {"feature": "Direction_same", "instances": 561, "metric_value": 0.8994, "depth": 7}
if obj[7]<=0:
# {"feature": "Bar", "instances": 466, "metric_value": 0.8711, "depth": 8}
if obj[5]<=3.0:
# {"feature": "Occupation", "instances": 460, "metric_value": 0.8759, "depth": 9}
if obj[4]<=19.025502608326256:
return 'True'
elif obj[4]>19.025502608326256:
return 'True'
else: return 'True'
elif obj[5]>3.0:
return 'True'
else: return 'True'
elif obj[7]>0:
# {"feature": "Bar", "instances": 95, "metric_value": 0.9864, "depth": 8}
if obj[5]>0.0:
# {"feature": "Occupation", "instances": 53, "metric_value": 0.9977, "depth": 9}
if obj[4]<=21:
return 'False'
elif obj[4]>21:
return 'True'
else: return 'True'
elif obj[5]<=0.0:
# {"feature": "Occupation", "instances": 42, "metric_value": 0.8926, "depth": 9}
if obj[4]<=19:
return 'True'
elif obj[4]>19:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[6]>2.0:
# {"feature": "Occupation", "instances": 101, "metric_value": 0.7562, "depth": 6}
if obj[4]<=18:
# {"feature": "Bar", "instances": 93, "metric_value": 0.7893, "depth": 7}
if obj[5]<=1.0:
# {"feature": "Time", "instances": 53, "metric_value": 0.6122, "depth": 8}
if obj[1]<=2:
# {"feature": "Direction_same", "instances": 36, "metric_value": 0.5033, "depth": 9}
if obj[7]>0:
return 'True'
elif obj[7]<=0:
return 'True'
else: return 'True'
elif obj[1]>2:
# {"feature": "Direction_same", "instances": 17, "metric_value": 0.7871, "depth": 9}
if obj[7]<=0:
return 'True'
elif obj[7]>0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[5]>1.0:
# {"feature": "Direction_same", "instances": 40, "metric_value": 0.9341, "depth": 8}
if obj[7]<=0:
# {"feature": "Time", "instances": 25, "metric_value": 0.9896, "depth": 9}
if obj[1]<=1:
return 'True'
elif obj[1]>1:
return 'True'
else: return 'True'
elif obj[7]>0:
# {"feature": "Time", "instances": 15, "metric_value": 0.7219, "depth": 9}
if obj[1]>0:
return 'True'
elif obj[1]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[4]>18:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[0]>2:
# {"feature": "Time", "instances": 1826, "metric_value": 0.8821, "depth": 4}
if obj[1]>0:
# {"feature": "Occupation", "instances": 1422, "metric_value": 0.8981, "depth": 5}
if obj[4]<=13.119242508776624:
# {"feature": "Bar", "instances": 1227, "metric_value": 0.9082, "depth": 6}
if obj[5]<=2.0:
# {"feature": "Education", "instances": 1074, "metric_value": 0.9183, "depth": 7}
if obj[3]<=2:
# {"feature": "Restaurant20to50", "instances": 852, "metric_value": 0.9036, "depth": 8}
if obj[6]<=1.0:
# {"feature": "Direction_same", "instances": 600, "metric_value": 0.9216, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
elif obj[6]>1.0:
# {"feature": "Direction_same", "instances": 252, "metric_value": 0.8524, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[3]>2:
# {"feature": "Restaurant20to50", "instances": 222, "metric_value": 0.9631, "depth": 8}
if obj[6]<=2.0:
# {"feature": "Direction_same", "instances": 202, "metric_value": 0.977, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
elif obj[6]>2.0:
# {"feature": "Direction_same", "instances": 20, "metric_value": 0.6098, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[5]>2.0:
# {"feature": "Restaurant20to50", "instances": 153, "metric_value": 0.819, "depth": 7}
if obj[6]>0.0:
# {"feature": "Education", "instances": 138, "metric_value": 0.7936, "depth": 8}
if obj[3]<=3:
# {"feature": "Direction_same", "instances": 119, "metric_value": 0.7726, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
elif obj[3]>3:
# {"feature": "Direction_same", "instances": 19, "metric_value": 0.8997, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[6]<=0.0:
# {"feature": "Education", "instances": 15, "metric_value": 0.971, "depth": 8}
if obj[3]<=2:
# {"feature": "Direction_same", "instances": 14, "metric_value": 0.9403, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
elif obj[3]>2:
return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[4]>13.119242508776624:
# {"feature": "Bar", "instances": 195, "metric_value": 0.8213, "depth": 6}
if obj[5]<=3.0:
# {"feature": "Restaurant20to50", "instances": 179, "metric_value": 0.7764, "depth": 7}
if obj[6]<=2.0:
# {"feature": "Education", "instances": 169, "metric_value": 0.7993, "depth": 8}
if obj[3]<=3:
# {"feature": "Direction_same", "instances": 151, "metric_value": 0.8341, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
elif obj[3]>3:
# {"feature": "Direction_same", "instances": 18, "metric_value": 0.3095, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[6]>2.0:
return 'True'
else: return 'True'
elif obj[5]>3.0:
# {"feature": "Education", "instances": 16, "metric_value": 0.9887, "depth": 7}
if obj[3]<=0:
# {"feature": "Restaurant20to50", "instances": 10, "metric_value": 0.8813, "depth": 8}
if obj[6]>1.0:
# {"feature": "Direction_same", "instances": 6, "metric_value": 1.0, "depth": 9}
if obj[7]<=0:
return 'False'
else: return 'False'
elif obj[6]<=1.0:
return 'True'
else: return 'True'
elif obj[3]>0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'True'
elif obj[1]<=0:
# {"feature": "Bar", "instances": 404, "metric_value": 0.8152, "depth": 5}
if obj[5]<=3.0:
# {"feature": "Restaurant20to50", "instances": 386, "metric_value": 0.7923, "depth": 6}
if obj[6]<=1.0:
# {"feature": "Occupation", "instances": 265, "metric_value": 0.8492, "depth": 7}
if obj[4]<=18.686908748292247:
# {"feature": "Education", "instances": 243, "metric_value": 0.8767, "depth": 8}
if obj[3]>1:
# {"feature": "Direction_same", "instances": 133, "metric_value": 0.8315, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
elif obj[3]<=1:
# {"feature": "Direction_same", "instances": 110, "metric_value": 0.9213, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[4]>18.686908748292247:
# {"feature": "Education", "instances": 22, "metric_value": 0.2668, "depth": 8}
if obj[3]<=2:
return 'True'
elif obj[3]>2:
# {"feature": "Direction_same", "instances": 8, "metric_value": 0.5436, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[6]>1.0:
# {"feature": "Occupation", "instances": 121, "metric_value": 0.6271, "depth": 7}
if obj[4]>3:
# {"feature": "Education", "instances": 98, "metric_value": 0.7095, "depth": 8}
if obj[3]<=3:
# {"feature": "Direction_same", "instances": 93, "metric_value": 0.7304, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
elif obj[3]>3:
return 'True'
else: return 'True'
elif obj[4]<=3:
return 'True'
else: return 'True'
else: return 'True'
elif obj[5]>3.0:
# {"feature": "Occupation", "instances": 18, "metric_value": 0.9911, "depth": 6}
if obj[4]<=12:
# {"feature": "Restaurant20to50", "instances": 12, "metric_value": 0.9799, "depth": 7}
if obj[6]>1.0:
# {"feature": "Education", "instances": 8, "metric_value": 1.0, "depth": 8}
if obj[3]>0:
# {"feature": "Direction_same", "instances": 7, "metric_value": 0.9852, "depth": 9}
if obj[7]<=0:
return 'False'
else: return 'False'
elif obj[3]<=0:
return 'True'
else: return 'True'
elif obj[6]<=1.0:
# {"feature": "Education", "instances": 4, "metric_value": 0.8113, "depth": 8}
if obj[3]<=0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 1.0, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
elif obj[3]>0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[4]>12:
# {"feature": "Education", "instances": 6, "metric_value": 0.65, "depth": 7}
if obj[3]<=0:
return 'False'
elif obj[3]>0:
# {"feature": "Restaurant20to50", "instances": 2, "metric_value": 1.0, "depth": 8}
if obj[6]<=2.0:
return 'True'
elif obj[6]>2.0:
return 'False'
else: return 'False'
else: return 'True'
else: return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[8]>2:
# {"feature": "Passanger", "instances": 581, "metric_value": 0.9944, "depth": 3}
if obj[0]>0:
# {"feature": "Time", "instances": 561, "metric_value": 0.9903, "depth": 4}
if obj[1]>0:
# {"feature": "Education", "instances": 480, "metric_value": 0.999, "depth": 5}
if obj[3]<=3:
# {"feature": "Bar", "instances": 438, "metric_value": 0.9956, "depth": 6}
if obj[5]>-1.0:
# {"feature": "Restaurant20to50", "instances": 435, "metric_value": 0.9963, "depth": 7}
if obj[6]>-1.0:
# {"feature": "Occupation", "instances": 432, "metric_value": 0.997, "depth": 8}
if obj[4]<=7.599537037037037:
# {"feature": "Direction_same", "instances": 275, "metric_value": 1.0, "depth": 9}
if obj[7]<=0:
return 'False'
else: return 'False'
elif obj[4]>7.599537037037037:
# {"feature": "Direction_same", "instances": 157, "metric_value": 0.9786, "depth": 9}
if obj[7]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[6]<=-1.0:
return 'False'
else: return 'False'
elif obj[5]<=-1.0:
return 'False'
else: return 'False'
elif obj[3]>3:
# {"feature": "Bar", "instances": 42, "metric_value": 0.8926, "depth": 6}
if obj[5]<=2.0:
# {"feature": "Occupation", "instances": 35, "metric_value": 0.7755, "depth": 7}
if obj[4]<=11:
# {"feature": "Restaurant20to50", "instances": 29, "metric_value": 0.8498, "depth": 8}
if obj[6]<=2.0:
# {"feature": "Direction_same", "instances": 26, "metric_value": 0.8905, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
elif obj[6]>2.0:
return 'True'
else: return 'True'
elif obj[4]>11:
return 'True'
else: return 'True'
elif obj[5]>2.0:
# {"feature": "Occupation", "instances": 7, "metric_value": 0.8631, "depth": 7}
if obj[4]<=1:
# {"feature": "Restaurant20to50", "instances": 6, "metric_value": 0.65, "depth": 8}
if obj[6]<=0.0:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.9183, "depth": 9}
if obj[7]<=0:
return 'False'
else: return 'False'
elif obj[6]>0.0:
return 'False'
else: return 'False'
elif obj[4]>1:
return 'True'
else: return 'True'
else: return 'False'
else: return 'True'
elif obj[1]<=0:
# {"feature": "Education", "instances": 81, "metric_value": 0.7412, "depth": 5}
if obj[3]<=2:
# {"feature": "Occupation", "instances": 57, "metric_value": 0.8564, "depth": 6}
if obj[4]<=19:
# {"feature": "Restaurant20to50", "instances": 52, "metric_value": 0.8905, "depth": 7}
if obj[6]>0.0:
# {"feature": "Bar", "instances": 37, "metric_value": 0.9569, "depth": 8}
if obj[5]>0.0:
# {"feature": "Direction_same", "instances": 26, "metric_value": 0.8404, "depth": 9}
if obj[7]<=0:
return 'False'
else: return 'False'
elif obj[5]<=0.0:
# {"feature": "Direction_same", "instances": 11, "metric_value": 0.9457, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[6]<=0.0:
# {"feature": "Bar", "instances": 15, "metric_value": 0.5665, "depth": 8}
if obj[5]<=3.0:
# {"feature": "Direction_same", "instances": 14, "metric_value": 0.3712, "depth": 9}
if obj[7]<=0:
return 'False'
else: return 'False'
elif obj[5]>3.0:
return 'True'
else: return 'True'
else: return 'False'
elif obj[4]>19:
return 'False'
else: return 'False'
elif obj[3]>2:
# {"feature": "Occupation", "instances": 24, "metric_value": 0.2499, "depth": 6}
if obj[4]<=16:
return 'False'
elif obj[4]>16:
# {"feature": "Bar", "instances": 3, "metric_value": 0.9183, "depth": 7}
if obj[5]<=0.0:
# {"feature": "Restaurant20to50", "instances": 2, "metric_value": 1.0, "depth": 8}
if obj[6]<=0.0:
return 'False'
elif obj[6]>0.0:
return 'True'
else: return 'True'
elif obj[5]>0.0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[0]<=0:
# {"feature": "Bar", "instances": 20, "metric_value": 0.6098, "depth": 4}
if obj[5]>0.0:
# {"feature": "Occupation", "instances": 12, "metric_value": 0.8113, "depth": 5}
if obj[4]>1:
# {"feature": "Education", "instances": 9, "metric_value": 0.5033, "depth": 6}
if obj[3]<=0:
# {"feature": "Restaurant20to50", "instances": 5, "metric_value": 0.7219, "depth": 7}
if obj[6]<=1.0:
return 'True'
elif obj[6]>1.0:
# {"feature": "Time", "instances": 2, "metric_value": 1.0, "depth": 8}
if obj[1]<=0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 1.0, "depth": 9}
if obj[7]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[3]>0:
return 'True'
else: return 'True'
elif obj[4]<=1:
# {"feature": "Education", "instances": 3, "metric_value": 0.9183, "depth": 6}
if obj[3]>0:
return 'False'
elif obj[3]<=0:
return 'True'
else: return 'True'
else: return 'False'
elif obj[5]<=0.0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'False'
elif obj[2]<=1:
# {"feature": "Bar", "instances": 2258, "metric_value": 0.9865, "depth": 2}
if obj[5]>0.0:
# {"feature": "Time", "instances": 1296, "metric_value": 0.9972, "depth": 3}
if obj[1]>0:
# {"feature": "Passanger", "instances": 933, "metric_value": 0.9999, "depth": 4}
if obj[0]<=2:
# {"feature": "Restaurant20to50", "instances": 743, "metric_value": 0.9941, "depth": 5}
if obj[6]<=1.0:
# {"feature": "Occupation", "instances": 457, "metric_value": 0.9772, "depth": 6}
if obj[4]<=13.909237135911898:
# {"feature": "Education", "instances": 377, "metric_value": 0.9915, "depth": 7}
if obj[3]>1:
# {"feature": "Distance", "instances": 211, "metric_value": 0.9698, "depth": 8}
if obj[8]>1:
# {"feature": "Direction_same", "instances": 138, "metric_value": 0.9903, "depth": 9}
if obj[7]<=0:
return 'False'
elif obj[7]>0:
return 'False'
else: return 'False'
elif obj[8]<=1:
# {"feature": "Direction_same", "instances": 73, "metric_value": 0.8989, "depth": 9}
if obj[7]<=0:
return 'False'
elif obj[7]>0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[3]<=1:
# {"feature": "Distance", "instances": 166, "metric_value": 0.9999, "depth": 8}
if obj[8]<=2:
# {"feature": "Direction_same", "instances": 121, "metric_value": 0.996, "depth": 9}
if obj[7]<=0:
return 'False'
elif obj[7]>0:
return 'False'
else: return 'False'
elif obj[8]>2:
# {"feature": "Direction_same", "instances": 45, "metric_value": 0.9565, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[4]>13.909237135911898:
# {"feature": "Distance", "instances": 80, "metric_value": 0.8113, "depth": 7}
if obj[8]<=2:
# {"feature": "Education", "instances": 60, "metric_value": 0.6873, "depth": 8}
if obj[3]<=3:
# {"feature": "Direction_same", "instances": 56, "metric_value": 0.7147, "depth": 9}
if obj[7]<=0:
return 'False'
elif obj[7]>0:
return 'False'
else: return 'False'
elif obj[3]>3:
return 'False'
else: return 'False'
elif obj[8]>2:
# {"feature": "Education", "instances": 20, "metric_value": 0.9928, "depth": 8}
if obj[3]>0:
# {"feature": "Direction_same", "instances": 12, "metric_value": 0.9799, "depth": 9}
if obj[7]<=0:
return 'False'
else: return 'False'
elif obj[3]<=0:
# {"feature": "Direction_same", "instances": 8, "metric_value": 1.0, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'False'
else: return 'False'
elif obj[6]>1.0:
# {"feature": "Education", "instances": 286, "metric_value": 0.9983, "depth": 6}
if obj[3]<=3:
# {"feature": "Occupation", "instances": 261, "metric_value": 0.9995, "depth": 7}
if obj[4]>0:
# {"feature": "Direction_same", "instances": 260, "metric_value": 0.9996, "depth": 8}
if obj[7]<=0:
# {"feature": "Distance", "instances": 225, "metric_value": 1.0, "depth": 9}
if obj[8]>1:
return 'False'
elif obj[8]<=1:
return 'True'
else: return 'True'
elif obj[7]>0:
# {"feature": "Distance", "instances": 35, "metric_value": 0.971, "depth": 9}
if obj[8]>1:
return 'True'
elif obj[8]<=1:
return 'False'
else: return 'False'
else: return 'True'
elif obj[4]<=0:
return 'True'
else: return 'True'
elif obj[3]>3:
# {"feature": "Direction_same", "instances": 25, "metric_value": 0.9427, "depth": 7}
if obj[7]<=0:
# {"feature": "Occupation", "instances": 20, "metric_value": 0.8813, "depth": 8}
if obj[4]>1:
# {"feature": "Distance", "instances": 11, "metric_value": 0.994, "depth": 9}
if obj[8]<=2:
return 'True'
elif obj[8]>2:
return 'False'
else: return 'False'
elif obj[4]<=1:
# {"feature": "Distance", "instances": 9, "metric_value": 0.5033, "depth": 9}
if obj[8]>1:
return 'True'
elif obj[8]<=1:
return 'True'
else: return 'True'
else: return 'True'
elif obj[7]>0:
# {"feature": "Distance", "instances": 5, "metric_value": 0.971, "depth": 8}
if obj[8]>1:
# {"feature": "Occupation", "instances": 3, "metric_value": 0.9183, "depth": 9}
if obj[4]>1:
return 'True'
elif obj[4]<=1:
return 'True'
else: return 'True'
elif obj[8]<=1:
return 'False'
else: return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[0]>2:
# {"feature": "Occupation", "instances": 190, "metric_value": 0.9364, "depth": 5}
if obj[4]<=19.70715618872958:
# {"feature": "Restaurant20to50", "instances": 179, "metric_value": 0.9539, "depth": 6}
if obj[6]>0.0:
# {"feature": "Education", "instances": 160, "metric_value": 0.9395, "depth": 7}
if obj[3]<=4:
# {"feature": "Distance", "instances": 158, "metric_value": 0.9433, "depth": 8}
if obj[8]>1:
# {"feature": "Direction_same", "instances": 134, "metric_value": 0.953, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
elif obj[8]<=1:
# {"feature": "Direction_same", "instances": 24, "metric_value": 0.8709, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[3]>4:
return 'True'
else: return 'True'
elif obj[6]<=0.0:
# {"feature": "Education", "instances": 19, "metric_value": 0.998, "depth": 7}
if obj[3]<=2:
# {"feature": "Distance", "instances": 14, "metric_value": 0.9403, "depth": 8}
if obj[8]>1:
# {"feature": "Direction_same", "instances": 13, "metric_value": 0.9612, "depth": 9}
if obj[7]<=0:
return 'False'
else: return 'False'
elif obj[8]<=1:
return 'False'
else: return 'False'
elif obj[3]>2:
# {"feature": "Distance", "instances": 5, "metric_value": 0.7219, "depth": 8}
if obj[8]<=1:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.9183, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
elif obj[8]>1:
return 'True'
else: return 'True'
else: return 'True'
else: return 'False'
elif obj[4]>19.70715618872958:
return 'True'
else: return 'True'
else: return 'True'
elif obj[1]<=0:
# {"feature": "Passanger", "instances": 363, "metric_value": 0.9542, "depth": 4}
if obj[0]<=1:
# {"feature": "Distance", "instances": 333, "metric_value": 0.9298, "depth": 5}
if obj[8]<=1:
# {"feature": "Restaurant20to50", "instances": 170, "metric_value": 0.8338, "depth": 6}
if obj[6]<=1.0:
# {"feature": "Education", "instances": 102, "metric_value": 0.9183, "depth": 7}
if obj[3]<=3:
# {"feature": "Occupation", "instances": 98, "metric_value": 0.9313, "depth": 8}
if obj[4]>2.1406480683313776:
# {"feature": "Direction_same", "instances": 78, "metric_value": 0.952, "depth": 9}
if obj[7]<=1:
return 'True'
else: return 'True'
elif obj[4]<=2.1406480683313776:
# {"feature": "Direction_same", "instances": 20, "metric_value": 0.8113, "depth": 9}
if obj[7]<=1:
return 'True'
else: return 'True'
else: return 'True'
elif obj[3]>3:
return 'True'
else: return 'True'
elif obj[6]>1.0:
# {"feature": "Education", "instances": 68, "metric_value": 0.6385, "depth": 7}
if obj[3]>1:
# {"feature": "Occupation", "instances": 40, "metric_value": 0.469, "depth": 8}
if obj[4]>8:
# {"feature": "Direction_same", "instances": 22, "metric_value": 0.684, "depth": 9}
if obj[7]<=1:
return 'True'
else: return 'True'
elif obj[4]<=8:
return 'True'
else: return 'True'
elif obj[3]<=1:
# {"feature": "Occupation", "instances": 28, "metric_value": 0.8113, "depth": 8}
if obj[4]>5:
# {"feature": "Direction_same", "instances": 21, "metric_value": 0.4537, "depth": 9}
if obj[7]<=1:
return 'True'
else: return 'True'
elif obj[4]<=5:
# {"feature": "Direction_same", "instances": 7, "metric_value": 0.8631, "depth": 9}
if obj[7]<=1:
return 'False'
else: return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[8]>1:
# {"feature": "Restaurant20to50", "instances": 163, "metric_value": 0.9856, "depth": 6}
if obj[6]>0.0:
# {"feature": "Education", "instances": 140, "metric_value": 0.9666, "depth": 7}
if obj[3]<=3:
# {"feature": "Direction_same", "instances": 130, "metric_value": 0.9792, "depth": 8}
if obj[7]<=0:
# {"feature": "Occupation", "instances": 128, "metric_value": 0.9745, "depth": 9}
if obj[4]<=13.844008971972023:
return 'True'
elif obj[4]>13.844008971972023:
return 'True'
else: return 'True'
elif obj[7]>0:
return 'False'
else: return 'False'
elif obj[3]>3:
# {"feature": "Occupation", "instances": 10, "metric_value": 0.469, "depth": 8}
if obj[4]<=2:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.7219, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
elif obj[4]>2:
return 'True'
else: return 'True'
else: return 'True'
elif obj[6]<=0.0:
# {"feature": "Occupation", "instances": 23, "metric_value": 0.9321, "depth": 7}
if obj[4]<=21:
# {"feature": "Direction_same", "instances": 22, "metric_value": 0.9024, "depth": 8}
if obj[7]<=0:
# {"feature": "Education", "instances": 21, "metric_value": 0.8631, "depth": 9}
if obj[3]<=3:
return 'False'
elif obj[3]>3:
return 'True'
else: return 'True'
elif obj[7]>0:
return 'True'
else: return 'True'
elif obj[4]>21:
return 'True'
else: return 'True'
else: return 'False'
else: return 'True'
elif obj[0]>1:
# {"feature": "Occupation", "instances": 30, "metric_value": 0.8813, "depth": 5}
if obj[4]<=14:
# {"feature": "Restaurant20to50", "instances": 23, "metric_value": 0.9656, "depth": 6}
if obj[6]>0.0:
# {"feature": "Education", "instances": 21, "metric_value": 0.9183, "depth": 7}
if obj[3]<=2:
# {"feature": "Direction_same", "instances": 16, "metric_value": 0.9887, "depth": 8}
if obj[7]<=0:
# {"feature": "Distance", "instances": 13, "metric_value": 0.9957, "depth": 9}
if obj[8]>1:
return 'False'
elif obj[8]<=1:
return 'False'
else: return 'False'
elif obj[7]>0:
# {"feature": "Distance", "instances": 3, "metric_value": 0.9183, "depth": 9}
if obj[8]<=2:
return 'False'
else: return 'False'
else: return 'False'
elif obj[3]>2:
return 'False'
else: return 'False'
elif obj[6]<=0.0:
return 'True'
else: return 'True'
elif obj[4]>14:
return 'False'
else: return 'False'
else: return 'False'
else: return 'True'
elif obj[5]<=0.0:
# {"feature": "Restaurant20to50", "instances": 962, "metric_value": 0.8792, "depth": 3}
if obj[6]<=2.0:
# {"feature": "Distance", "instances": 914, "metric_value": 0.8629, "depth": 4}
if obj[8]<=2:
# {"feature": "Time", "instances": 746, "metric_value": 0.8912, "depth": 5}
if obj[1]<=2:
# {"feature": "Education", "instances": 406, "metric_value": 0.9294, "depth": 6}
if obj[3]<=4:
# {"feature": "Passanger", "instances": 404, "metric_value": 0.9263, "depth": 7}
if obj[0]<=2:
# {"feature": "Occupation", "instances": 368, "metric_value": 0.9137, "depth": 8}
if obj[4]>1.659942678627642:
# {"feature": "Direction_same", "instances": 293, "metric_value": 0.9324, "depth": 9}
if obj[7]<=0:
return 'False'
elif obj[7]>0:
return 'False'
else: return 'False'
elif obj[4]<=1.659942678627642:
# {"feature": "Direction_same", "instances": 75, "metric_value": 0.8165, "depth": 9}
if obj[7]<=0:
return 'False'
elif obj[7]>0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[0]>2:
# {"feature": "Occupation", "instances": 36, "metric_value": 0.9978, "depth": 8}
if obj[4]<=21:
# {"feature": "Direction_same", "instances": 34, "metric_value": 1.0, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
elif obj[4]>21:
return 'False'
else: return 'False'
else: return 'False'
elif obj[3]>4:
return 'True'
else: return 'True'
elif obj[1]>2:
# {"feature": "Occupation", "instances": 340, "metric_value": 0.8338, "depth": 6}
if obj[4]<=6.644117647058824:
# {"feature": "Passanger", "instances": 197, "metric_value": 0.7379, "depth": 7}
if obj[0]>0:
# {"feature": "Direction_same", "instances": 172, "metric_value": 0.6806, "depth": 8}
if obj[7]<=0:
# {"feature": "Education", "instances": 161, "metric_value": 0.6524, "depth": 9}
if obj[3]<=4:
return 'False'
elif obj[3]>4:
return 'True'
else: return 'True'
elif obj[7]>0:
# {"feature": "Education", "instances": 11, "metric_value": 0.9457, "depth": 9}
if obj[3]>1:
return 'False'
elif obj[3]<=1:
return 'True'
else: return 'True'
else: return 'False'
elif obj[0]<=0:
# {"feature": "Education", "instances": 25, "metric_value": 0.971, "depth": 8}
if obj[3]<=3:
# {"feature": "Direction_same", "instances": 24, "metric_value": 0.9544, "depth": 9}
if obj[7]<=0:
return 'False'
else: return 'False'
elif obj[3]>3:
return 'True'
else: return 'True'
else: return 'False'
elif obj[4]>6.644117647058824:
# {"feature": "Education", "instances": 143, "metric_value": 0.9273, "depth": 7}
if obj[3]>0:
# {"feature": "Passanger", "instances": 87, "metric_value": 0.8653, "depth": 8}
if obj[0]>0:
# {"feature": "Direction_same", "instances": 69, "metric_value": 0.8281, "depth": 9}
if obj[7]<=0:
return 'False'
elif obj[7]>0:
return 'False'
else: return 'False'
elif obj[0]<=0:
# {"feature": "Direction_same", "instances": 18, "metric_value": 0.9641, "depth": 9}
if obj[7]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[3]<=0:
# {"feature": "Passanger", "instances": 56, "metric_value": 0.9852, "depth": 8}
if obj[0]<=2:
# {"feature": "Direction_same", "instances": 41, "metric_value": 0.9474, "depth": 9}
if obj[7]<=0:
return 'False'
elif obj[7]>0:
return 'True'
else: return 'True'
elif obj[0]>2:
# {"feature": "Direction_same", "instances": 15, "metric_value": 0.971, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[8]>2:
# {"feature": "Passanger", "instances": 168, "metric_value": 0.6899, "depth": 5}
if obj[0]<=1:
# {"feature": "Occupation", "instances": 164, "metric_value": 0.6594, "depth": 6}
if obj[4]>0:
# {"feature": "Education", "instances": 159, "metric_value": 0.6276, "depth": 7}
if obj[3]<=4:
# {"feature": "Time", "instances": 158, "metric_value": 0.6146, "depth": 8}
if obj[1]<=1:
# {"feature": "Direction_same", "instances": 148, "metric_value": 0.6395, "depth": 9}
if obj[7]<=0:
return 'False'
else: return 'False'
elif obj[1]>1:
return 'False'
else: return 'False'
elif obj[3]>4:
return 'True'
else: return 'True'
elif obj[4]<=0:
# {"feature": "Time", "instances": 5, "metric_value": 0.971, "depth": 7}
if obj[1]<=1:
# {"feature": "Education", "instances": 5, "metric_value": 0.971, "depth": 8}
if obj[3]<=0:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.971, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[0]>1:
# {"feature": "Education", "instances": 4, "metric_value": 0.8113, "depth": 6}
if obj[3]>1:
return 'True'
elif obj[3]<=1:
# {"feature": "Occupation", "instances": 2, "metric_value": 1.0, "depth": 7}
if obj[4]<=1:
return 'True'
elif obj[4]>1:
return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
else: return 'False'
elif obj[6]>2.0:
# {"feature": "Education", "instances": 48, "metric_value": 0.995, "depth": 4}
if obj[3]<=0:
# {"feature": "Occupation", "instances": 25, "metric_value": 0.795, "depth": 5}
if obj[4]<=4:
# {"feature": "Passanger", "instances": 14, "metric_value": 0.9852, "depth": 6}
if obj[0]<=2:
# {"feature": "Time", "instances": 12, "metric_value": 1.0, "depth": 7}
if obj[1]<=2:
# {"feature": "Direction_same", "instances": 8, "metric_value": 0.9544, "depth": 8}
if obj[7]>0:
# {"feature": "Distance", "instances": 4, "metric_value": 0.8113, "depth": 9}
if obj[8]<=1:
return 'True'
elif obj[8]>1:
return 'False'
else: return 'False'
elif obj[7]<=0:
# {"feature": "Distance", "instances": 4, "metric_value": 1.0, "depth": 9}
if obj[8]<=1:
return 'False'
elif obj[8]>1:
return 'True'
else: return 'True'
else: return 'False'
elif obj[1]>2:
# {"feature": "Direction_same", "instances": 4, "metric_value": 0.8113, "depth": 8}
if obj[7]<=0:
# {"feature": "Distance", "instances": 3, "metric_value": 0.9183, "depth": 9}
if obj[8]<=2:
return 'False'
elif obj[8]>2:
return 'False'
else: return 'False'
elif obj[7]>0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[0]>2:
return 'True'
else: return 'True'
elif obj[4]>4:
return 'True'
else: return 'True'
elif obj[3]>0:
# {"feature": "Time", "instances": 23, "metric_value": 0.8865, "depth": 5}
if obj[1]<=3:
# {"feature": "Occupation", "instances": 20, "metric_value": 0.9341, "depth": 6}
if obj[4]<=6:
# {"feature": "Direction_same", "instances": 10, "metric_value": 0.7219, "depth": 7}
if obj[7]<=0:
# {"feature": "Distance", "instances": 8, "metric_value": 0.8113, "depth": 8}
if obj[8]<=2:
# {"feature": "Passanger", "instances": 6, "metric_value": 0.9183, "depth": 9}
if obj[0]>1:
return 'False'
elif obj[0]<=1:
return 'True'
else: return 'True'
elif obj[8]>2:
return 'False'
else: return 'False'
elif obj[7]>0:
return 'False'
else: return 'False'
elif obj[4]>6:
# {"feature": "Passanger", "instances": 10, "metric_value": 1.0, "depth": 7}
if obj[0]>1:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.971, "depth": 8}
if obj[7]<=0:
# {"feature": "Distance", "instances": 4, "metric_value": 0.8113, "depth": 9}
if obj[8]>1:
return 'False'
elif obj[8]<=1:
return 'False'
else: return 'False'
elif obj[7]>0:
return 'True'
else: return 'True'
elif obj[0]<=1:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.971, "depth": 8}
if obj[7]<=0:
# {"feature": "Distance", "instances": 4, "metric_value": 0.8113, "depth": 9}
if obj[8]<=2:
return 'True'
elif obj[8]>2:
return 'False'
else: return 'False'
elif obj[7]>0:
return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[1]>3:
return 'False'
else: return 'False'
else: return 'False'
else: return 'True'
else: return 'False'
else: return 'False'
|
StarcoderdataPython
|
8025973
|
<reponame>Altyrost/poediscordbot
# http://poeurl.com/api/?shrink={%22url%22:%22https://www.pathofexile.com/passive-skill-tree/AAAABAMBAHpwm6FR-zeDAx7quvfX0PW2-o5kpys3ZsMJ62PviLmT8h3v66EvGyUfQR1PDkiMNkuutUjbXq6zBUJJUZEHQnrsGNfPlS6-iocTf8ZFfjQKDXxfalgHj0ZwUvrSjun3wVF0b57G93gvOw3B86aZES-TJx0UzRYBb9-K0NBGcRhq8NUXL21sgKSQ1hV-D8QsnL46lSCDCYnTdwcOXL6Au_wtH0yzLL9JsUGWtAycpI_6NbmsmMEAsZC4yqKjXGuEb6brV8kRD9lb96YRUOv1VdYrCsNtUDAfGIt6avp88JJ0ZOf5N9AfhEjndG0ZO3zpAioLBx4spl3yfOXK0-L3EZbUQvVLLag=%22}
import json
import urllib.request
from poediscordbot.util.logging import log
def shrink_tree_url(tree):
"""
Shrink url with poeurl
:param tree:
:return: valid poeurl if possible else raise a value error
"""
# sanitize
tree = tree.strip()
# build requesturl
param = f'{{"url":"{tree}"}}'
url = f'http://poeurl.com/api/?shrink={param}'
log.debug(f"Poeurl payload={url}")
contents = urllib.request.urlopen(url).read().decode('utf-8')
log.debug(f"Poeurl contents={contents}")
contents = json.loads(contents)
log.debug(f"Got json content from poeurl ... {contents}")
if contents['url']:
return f"http://poeurl.com/{contents['url']}"
else:
raise ValueError("Unable to retrieve URL")
|
StarcoderdataPython
|
24909
|
<reponame>jim-bo/silp2<gh_stars>1-10
#!/usr/bin/python
'''
creates bundle graph from filtered multigraph
'''
### imports ###
import sys
import os
import logging
import networkx as nx
import numpy as np
import scipy.stats as stats
import cPickle
import helpers.io as io
import helpers.misc as misc
### definitions ###
### functions ###
def compress_edges(MG, p, q):
''' compresses the edges '''
# check for types.
bcnts = [0, 0, 0, 0]
for z in MG[p][q]:
bcnts[MG[p][q][z]['state']] += 1
# build numpy arrays for each distance type.
bdists = list()
for i in range(4):
bdists.append(np.zeros(bcnts[i], dtype=np.float))
# populate array with distances.
bidxs = [0, 0, 0, 0]
for z in MG[p][q]:
state = MG[p][q][z]['state']
dist = MG[p][q][z]['dist']
bdists[state][bidxs[state]] = dist
bidxs[state] += 1
# compute bundle info.
devs = list()
means = list()
mins = list()
maxs = list()
for i in range(4):
if bdists[i].shape[0] <= 0:
devs.append(-1)
means.append(-1)
mins.append(-1)
maxs.append(-1)
else:
devs.append(np.std(bdists[i]))
means.append(np.mean(bdists[i]))
mins.append(bdists[i].min())
maxs.append(bdists[i].max())
# return summaries.
return bcnts, bdists, devs, means, mins, maxs
def _load_reps(file_path):
''' loads repeat info from cpickle'''
# no weights.
if file_path == None:
return dict()
# try dictionary emthod.
if os.path.isdir(file_path) == True:
reps = dict()
for f in os.listdir(file_path):
n = f.replace(".npy","")
try:
reps[n] = np.load("%s/%s" % (file_path, f))
except:
continue
return reps
# get weights.
try:
with open(file_path) as fin:
return cPickle.load(fin)
except:
logging.warning("unable to load repeat pickle, ignoring weights")
return dict()
def create_bundles(paths, args):
""" creates bundles
Parameters
----------
paths.edge_file : string
args.bundle_size : int
args.pthresh : int
args.bup : int
"""
# load repeat annotations.
repcnts = _load_reps(args.rep_file)
# load the multi graph.
MG = nx.read_gpickle(paths.edge_file)
# create bundle graph.
BG = nx.Graph()
# add nodes.
for n in MG.nodes():
BG.add_node(n, MG.node[n])
# build set of adjacencies.
adjset = set()
for p, nbrs in MG.adjacency_iter():
for q in nbrs:
adjset.add(tuple(sorted([p,q])))
# compute bundles from adjacencies.
zerod = 0
zcnt = 0
ztot = len(adjset)
for p, q in adjset:
#logging.info("progress: %d of %d" % (zcnt, ztot))
zcnt += 1
# sanity check.
if MG.node[p]['cov'] == 0.0 or MG.node[q]['cov'] == 0.0:
logging.error("how can this happen?")
sys.exit()
# bundle size check.
bsize = len(MG[p][q])
if bsize < args.bundle_size:
continue
# group by insert size.
groups = dict()
std_devs = dict()
for z in MG[p][q]:
ins_size = MG[p][q][z]['ins_size']
if ins_size not in groups:
groups[ins_size] = list()
std_devs[ins_size] = MG[p][q][z]['std_dev']
groups[ins_size].append(z)
# loop over groups.
for ins_size in groups:
# compress info.
bcnts, bdists, devs, means, mins, maxs = compress_edges(MG, p, q)
# compute weights.
cov = 1 - abs(MG.node[p]['cov'] - MG.node[q]['cov']) / (MG.node[p]['cov'] + MG.node[q]['cov'])
# swap bdists for python lists.
for i in range(len(bdists)):
bdists[i] = list(bdists[i])
# add start stop info.
poses1 = list()
poses2 = list()
for z in MG[p][q]:
tmp = MG[p][q][z]
poses1.append((tmp['left1'], tmp['right1']))
poses2.append((tmp['left2'], tmp['right2']))
# create bundle.
if BG.has_edge(p, q):
logging.error("can't have multiple insert sizes between same node")
sys.exit(1)
# zero out negative distances.
avgs = [np.average(bdists[i]) for i in range(4)]
for i in range(4):
if avgs[i] == np.nan:
bcnts[i] = 0.0
if avgs[i] < -2 * args.bundle_size:
bcnts[i] = 0.0
zerod += 1
# don't add it if no support.
if np.sum(bcnts) == 0:
continue
#BG.add_edge(p, q, bcnts=bcnts, bdists=bdists, devs=devs, means=means, mins=mins, maxs=maxs, ins_size=ins_size, std_dev=std_devs[ins_size], poses1=poses1, poses2=poses2)
BG.add_edge(p, q, bcnts=bcnts, bdists=bdists, ins_size=ins_size, std_dev=std_devs[ins_size], cov=cov)
# start the slimming.
logging.info("starting repeat based slimming")
# do repeat mods.
track_upped = 0
track_remed = 0
track_ogedg = len(BG.edges())
idxs = np.zeros(1)
if repcnts != dict():
# create repeat distrib.
repavgs = np.zeros(len(repcnts), dtype=np.dtype([('name','S256'),('avg',np.float)]))
i = 0
for name in repcnts:
# save the name.
repavgs[i]['name'] = name
# skip no repeat info.
if name not in repcnts or repcnts[name] == None:
repavgs[i]['avg'] = 0
i += 1
continue
# take the average over ins_size + 6 (std_dev)
d = args.ins_size + (6 * args.std_dev)
if repcnts[name].shape[0] < d:
repavgs[i]['avg'] = np.average(repcnts[name])
else:
r = range(0,d)+range(len(repcnts[name])-d,len(repcnts[name]))
repavgs[i]['avg'] = np.average(repcnts[name][r])
i += 1
# compute the cutoff threshold.
score = stats.scoreatpercentile(repavgs[:]['avg'], args.pthresh)
idxs = repavgs[:]['avg'] > score
# look at each bundle and see if the repeats necessitates attention.
for p, q in BG.edges():
# get index of pairs.
idp = np.where(repavgs[:]['name'] == p)[0]
idq = np.where(repavgs[:]['name'] == q)[0]
# skip if both not high.
if idxs[idp] == False and idxs[idq] == False:
continue
# get score.
scp = repavgs[idp]['avg']
scq = repavgs[idq]['avg']
# check if this bundle needs attention.
if max(scp, scq) > score:
track_upped += 1
# it gets its minumm bundle size upped.
for i in range(len(BG[p][q]['bcnts'])):
# clear if it doesn't meet criteria.
if BG[p][q]['bcnts'][i] < args.bundle_size + args.bup:
BG[p][q]['bcnts'][i] = 0
# remove bundle if no support.
if np.sum(BG[p][q]['bcnts']) == 0:
track_remed += 1
BG.remove_edge(p,q)
else:
logging.info('no repeat information supplied')
# add repeat weights.
for p, q in BG.edges():
# create weight.
BG[p][q]['u'] = [0.0] * 4
# sum weights.
for z in MG[p][q]:
left1 = MG[p][q][z]['left1']
left2 = MG[p][q][z]['left2']
right1 = MG[p][q][z]['right1']
right2 = MG[p][q][z]['right2']
cntl = np.sum(repcnts[p][left1:left2])
cntr = np.sum(repcnts[p][right1:right2])
try:
propl = 1.0 - (float(cntl) / float(left2-left1))
propr = 1.0 - (float(cntr) / float(right2-right1))
except:
continue
# add average.
p_k = (propl + propr) / 2.0
# add it.
BG[p][q]['u'][MG[p][q][z]['state']] += p_k
# note the modifications due to filtering.
logging.info("contigs with repeat regions in %.2f threshold: %i of %i" % (args.pthresh, np.sum(idxs), len(idxs)))
logging.info("bundles effected by repeats: %i of %i" % (track_upped, track_ogedg))
logging.info("bundles removed by repeats: %i of %i" % (track_remed, track_ogedg))
logging.info("bundles removed by neg dist: %i" % (zerod))
logging.info("total bundles: %i" % (len(BG.edges())))
# write to disk.
nx.write_gpickle(BG, paths.bundle_file)
|
StarcoderdataPython
|
3339258
|
<reponame>karlwnw/adventofcode2019
import unittest
from day12 import run
class TestDay12(unittest.TestCase):
def test_examples(self):
moons = [(-1, 0, 2), (2, -10, -7), (4, -8, 8), (3, 5, -1)]
self.assertEqual(run(moons, 10), 179)
moons = [(-8, -10, 0), (5, 5, 10), (2, -7, 3), (9, -8, -3)]
self.assertEqual(run(moons, 100), 1940)
|
StarcoderdataPython
|
3550038
|
from solver import Solver
from run_solver import get_prolog_file_info, get_tile_ids_dictionary
from trial import TRIAL_CONFIG_FORMATS
import utils
import os
import argparse
def main(trial, levels, num_sol, asp, state_graph):
if not (asp or state_graph):
utils.error_exit("Must specify at least one validation test to run: --asp or --state_graph")
# Get file formats
config_formats = TRIAL_CONFIG_FORMATS.get(trial)
if config_formats is None:
utils.error_exit("--trial must be one of %s" % str(list(TRIAL_CONFIG_FORMATS.keys())))
prolog_file_format = "level_saved_files_block/prolog_files/%s.pl"
model_str_file_format = "level_saved_files_block/generated_level_model_strs/%s.txt"
assignments_dict_file_format = "level_saved_files_block/generated_level_assignments_dicts/%s.pickle"
# Initialize validation counts
asp_checked_count = 0
asp_valid_count = 0
state_graph_checked_count = 0
state_graph_valid_count = 0
# Validate each solver run
for level in levels:
for config_file_format in config_formats:
for sol in range(num_sol):
prolog_file = prolog_file_format % level
prolog_filename = utils.get_basepath_filename(prolog_file, 'pl')
config_file = config_file_format % level
config_filename = utils.get_basepath_filename(config_file, 'json')
answer_set_filename = '_'.join([prolog_filename, config_filename, 'a%d' % sol])
if asp:
# Determine ASP checks to perform based on config file contents
config_file_contents = utils.read_json(config_file)
config = config_file_contents['config']
require_all_platforms_reachable = True
require_all_bonus_tiles_reachable = True
if config.get('require_all_platforms_reachable') is not None:
require_all_platforms_reachable = eval(config['require_all_platforms_reachable'])
if config.get('require_all_bonus_tiles_reachable') is not None:
require_all_bonus_tiles_reachable = eval(config['require_all_bonus_tiles_reachable'])
prolog_file_info = get_prolog_file_info(prolog_file)
tile_ids = get_tile_ids_dictionary(prolog_file_info)
model_str_file = model_str_file_format % answer_set_filename
if os.path.exists(model_str_file):
model_str = utils.read_txt(model_str_file)
asp_valid = Solver.asp_is_valid(check_path=True,
check_onground=require_all_platforms_reachable,
check_bonus=require_all_bonus_tiles_reachable,
model_str=model_str,
player_img='block',
answer_set_filename=answer_set_filename,
tile_ids=tile_ids,
save=False)
status = "ASP VALID" if asp_valid else "ASP INVALID"
print("%s: %s" % (answer_set_filename, status))
asp_checked_count += 1
asp_valid_count += 1 if asp_valid else 0
if state_graph:
assignments_dict_file = assignments_dict_file_format % answer_set_filename
if os.path.exists(assignments_dict_file):
assignments_dict = utils.read_pickle(assignments_dict_file)
valid_path = Solver.get_state_graph_valid_path(assignments_dict=assignments_dict,
player_img='block',
prolog_filename=prolog_filename,
answer_set_filename=answer_set_filename,
save=True)
status = "GRAPH VALID" if valid_path else "GRAPH INVALID"
print("%s: %s" % (answer_set_filename, status))
state_graph_checked_count += 1
state_graph_valid_count += 1 if valid_path is not None else 0
# Print validation results summary
if asp:
print("ASPs Checked: %d" % asp_checked_count)
print("ASPs Valid: %d" % asp_valid_count)
if state_graph:
print("State Graphs Checked: %d" % state_graph_checked_count)
print("State Graphs Valid: %d" % state_graph_valid_count)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Validate generated levels')
parser.add_argument('trial', type=str, help="Trial to run: options %s" % str(list(TRIAL_CONFIG_FORMATS.keys())))
parser.add_argument('levels', type=str, nargs='+', help="Level names")
parser.add_argument('--num_sol', type=int, default=1, help="Number of answer sets per config to validate")
parser.add_argument('--asp', const=True, nargs='?', type=bool, default=False, help="Validate generated level ASP model str")
parser.add_argument('--state_graph', const=True, nargs='?', type=bool, default=False, help="Validate generated level state graph")
args = parser.parse_args()
main(trial=args.trial, levels=args.levels, num_sol=args.num_sol, asp=args.asp, state_graph=args.state_graph)
|
StarcoderdataPython
|
1711109
|
<reponame>mvadari/xrpl-py-interview
"""Top-level exports for the wallet generation package."""
from xrpl.asyncio.wallet import XRPLFaucetException
from xrpl.wallet.main import Wallet
from xrpl.wallet.wallet_generation import generate_faucet_wallet
__all__ = ["Wallet", "generate_faucet_wallet", "XRPLFaucetException"]
|
StarcoderdataPython
|
3469055
|
from datetime import datetime
from enum import Enum
ChangeType = Enum('ChangeType', 'block tile_entity entity status')
def getType(value):
if value == 'status':
return ChangeType.status
elif value == 'BLOCK':
return ChangeType.block
elif value.startswith('TILE_ENTITY'):
return ChangeType.tile_entity
else:
return ChangeType.entity
def to_date(str):
return datetime.strptime(str, '%d-%m-%Y_%H:%M:%S:%f')
# data structure for storing change information
# takes lines/rows as lists of string values and saves them (some values converted to more useful data types) in order
class IntervalData:
def __init__(self):
self.entries = [] # all change entries can be found in this list
self.status_entries = [] # additional list only storing the status information entries
def addLogRowItems(self, rowItems):
logTime = to_date(rowItems[0]) # wall clock time of log entry creation
worldFullTime = int(rowItems[1]) # MC server full time (ticks since startup)
type = getType(rowItems[2]) # Category/Type of change (ChangeType.(status|block|tile_entity|entity)
# store basic information (common to entries of all types)
entry = {'logTime': logTime, 'worldFullTime': worldFullTime, 'type': type, 'typeStr': rowItems[2]}
if type == ChangeType.status: # information specific to status entries
# 20-10-2018_19:34:40:724 time type="status" #loadedChunks #changedChunks #tileEntities #changedTileEntities #entities #changedEntities #onlinePlayers totalStateDiffTime
loadedChunks = int(rowItems[3]) # total number of loaded chunks
changedChunks = int(rowItems[4]) # number of chunks that changed (indicated by Events)
tileEntities = int(rowItems[5]) # total number of tile/block-entities
changedTileEntities = int(rowItems[6]) # number of tile entities that changed
entities = int(rowItems[7]) # total number of entities
changedEntities = int(rowItems[8]) # number of entities that changed
onlinePlayers = int(rowItems[9]) # total number of players logged in to the server
totalStateDiffTime = float(rowItems[10].replace('ms','')) # time it took the measurement plugin to compare the current with the last state (comparing "dirty" chunks as indicated by Events)
# update dictionary with type-specific information
entry.update({"loadedChunks": loadedChunks, 'changedChunks': changedChunks, 'tileEntities': tileEntities, 'changedTileEntities': changedTileEntities, 'entities': entities
, 'changedEntities': changedEntities, 'onlinePlayers': onlinePlayers, 'totalStateDiffTime': totalStateDiffTime})
# store change entry (in all lists)
self.entries.append(entry)
self.status_entries.append(entry)
else:
# change must be involving a block, tile/block-entity or entity, which all share the following properties
xpos = rowItems[3] # global coordinate system (block x coordinate)
ypos = rowItems[4] # global coordinate system (block y coordinate)
zpos = rowItems[5] # global coordinate system (block z coordinate)
world = rowItems[6] # name of the world (e.g. "world", "world_nether", "world_the_end"
chunk = rowItems[7] # x,z coordinates of the chunk that the change happened in
section = rowItems[8] # section number (0-15) of the section that the change happened in (inside the chunk)
# add properties common to block and (tile) entity
entry.update({'xpos': xpos, 'ypos': ypos, 'zpos': zpos, 'world': world, 'chunk': chunk, 'section': section})
if type == ChangeType.entity or type == ChangeType.tile_entity:
# change involves tile/block-entity or entity
# 20-10-2018_19:34:40:724 time type="entity" xpos ypos zpos world chunk section uuid [changed attributes]
uuid = rowItems[9] # all entities and tileEntities have an identifier (uuid)
changes = rowItems[10] # the NBT diff of the previous and current state of the (tile) entity
# update dict with (tile-)entity specific infos
entry.update({'uuid': uuid, 'changes': changes})
# store change entry
self.entries.append(entry)
elif type == ChangeType.block:
# change involves a block
# 20-10-2018_19:34:40:724 time type="block" xpos ypos zpos world chunk section material skylight emittedLight BlockData
material = rowItems[9] # the material a block consists of
skylight = rowItems[10] # the transparency regarding light from above (sun/moon)
emittedLight = rowItems[11] # light emitted/reflected by/from the block itself
blockData = rowItems[12] # additional data (<= one byte)
# update dictionary with block specific information
entry.update({'material' : material, 'skylight': skylight, 'emittedLight': emittedLight, 'blockData': blockData})
# store change entry
self.entries.append(entry)
else:
raise ValueError("type '" + type + "' is not handled!") # handle type that is not handled otherwise
def clearEntries(self):
del(self.entries)
self.entries=[]
def getNumStatusEntries(self):
return len(self.status_entries)
def append(self, other):
if type(other) != type(self):
raise ValueError("Object types do not match up!")
self.entries += other.entries
self.status_entries += other.status_entries
|
StarcoderdataPython
|
1899270
|
<reponame>AstraZeneca/jazzy<gh_stars>0
"""Test cases for the visualisations methods."""
import base64
import numpy as np
import pytest
from rdkit import Chem
from jazzy.core import calculate_polar_strength_map
from jazzy.core import get_charges_from_kallisto_molecule
from jazzy.core import get_covalent_atom_idxs
from jazzy.core import kallisto_molecule_from_rdkit_molecule
from jazzy.core import rdkit_molecule_from_smiles
from jazzy.visualisation import _create_color_scale
from jazzy.visualisation import _exclude_hydrogens
from jazzy.visualisation import _get_highlighted_atoms_and_strength_colors
from jazzy.visualisation import _increase_explicit_hydrogen_for_bond_atom
from jazzy.visualisation import _increase_explicit_hydrogens
from jazzy.visualisation import _remove_excluded_hydrogens
from jazzy.visualisation import _remove_strong_acceptor_hydrogens
from jazzy.visualisation import _set_acceptor_props
from jazzy.visualisation import _set_donor_props
from jazzy.visualisation import _zero_positive_value_check
from jazzy.visualisation import depict_strengths
def test_depict_strengths():
"""It correctly depicts strengths."""
rdkit_molecule = rdkit_molecule_from_smiles("OO")
kallisto_molecule = kallisto_molecule_from_rdkit_molecule(rdkit_molecule)
atoms_and_nbrs = get_covalent_atom_idxs(rdkit_molecule)
charges = get_charges_from_kallisto_molecule(kallisto_molecule, 0)
atomic_map = calculate_polar_strength_map(
rdkit_molecule, kallisto_molecule, atoms_and_nbrs, charges
)
img_txt = depict_strengths(
rdkit_molecule,
atomic_map,
flatten_molecule=True,
)
base64_hash = base64.b64encode(img_txt.encode("utf-8"))
assert str(base64_hash[:20]) == "b'PD94bWwgdmVyc2lvbj0n'"
def test_get_highlighted_atoms_and_strength_colors():
"""It correctly gets highlighted atoms and strength colors."""
rdkit_molecule = rdkit_molecule_from_smiles("OO")
kallisto_molecule = kallisto_molecule_from_rdkit_molecule(rdkit_molecule)
atoms_and_nbrs = get_covalent_atom_idxs(rdkit_molecule)
charges = get_charges_from_kallisto_molecule(kallisto_molecule, 0)
atomic_map = calculate_polar_strength_map(
rdkit_molecule, kallisto_molecule, atoms_and_nbrs, charges
)
mw = Chem.RWMol(rdkit_molecule)
sa_threshold = 0.5
sdc_threshold = 0.5
sdx_threshold = 0.5
ignore_sa = True
ignore_sdc = False
ignore_sdx = False
_exclude_hydrogens(
mw,
atomic_map,
sa_threshold,
sdc_threshold,
sdx_threshold,
ignore_sa,
ignore_sdc,
ignore_sdx,
)
mol = mw.GetMol()
colors = _get_highlighted_atoms_and_strength_colors(mol, True)
assert colors[0][0] == 2
assert colors[0][1] == 3
sa_threshold = 0.5
sdc_threshold = 0.5
sdx_threshold = 0.5
ignore_sa = False
ignore_sdc = False
ignore_sdx = False
_exclude_hydrogens(
mw,
atomic_map,
sa_threshold,
sdc_threshold,
sdx_threshold,
ignore_sa,
ignore_sdc,
ignore_sdx,
)
mol = mw.GetMol()
colors = _get_highlighted_atoms_and_strength_colors(mol, True)
assert colors[0][0] == 0
assert colors[0][1] == 1
assert colors[0][2] == 2
assert colors[0][3] == 3
def test_exclude_hydrogens():
"""It successfully creates an `excluded_hydrogen` list from RDKit molecule."""
rdkit_molecule = rdkit_molecule_from_smiles("OO")
kallisto_molecule = kallisto_molecule_from_rdkit_molecule(rdkit_molecule)
atoms_and_nbrs = get_covalent_atom_idxs(rdkit_molecule)
charges = get_charges_from_kallisto_molecule(kallisto_molecule, 0)
atomic_map = calculate_polar_strength_map(
rdkit_molecule, kallisto_molecule, atoms_and_nbrs, charges
)
mw = Chem.RWMol(rdkit_molecule)
# acceptor strengths
sa_threshold = 2.0
sdc_threshold = 2.0
sdx_threshold = 2.0
ignore_sa = False
ignore_sdc = False
ignore_sdx = False
exclude_hydrogens = _exclude_hydrogens(
mw,
atomic_map,
sa_threshold,
sdc_threshold,
sdx_threshold,
ignore_sa,
ignore_sdc,
ignore_sdx,
)
assert len(exclude_hydrogens) == 2
assert exclude_hydrogens[0] == 2
assert exclude_hydrogens[1] == 3
# donor strengths
sa_threshold = 0.5
sdc_threshold = 0.5
sdx_threshold = 0.5
ignore_sa = True
ignore_sdc = False
ignore_sdx = False
exclude_hydrogens = _exclude_hydrogens(
mw,
atomic_map,
sa_threshold,
sdc_threshold,
sdx_threshold,
ignore_sa,
ignore_sdc,
ignore_sdx,
)
# extract hydrogen donor strength
hydrogen_donor_strength = float(mw.GetAtomWithIdx(2).GetProp("sd"))
assert len(exclude_hydrogens) == 0
assert np.isclose(hydrogen_donor_strength, 1.0593)
def test_set_donor_props():
"""It correctly sets donor properties."""
# RDKit carbon molecule
m = rdkit_molecule_from_smiles("C")
atom = m.GetAtomWithIdx(0)
sd = 10.0
sd_threshold = 7.0
ignore_sd = False
condition = _set_donor_props(atom, sd, sd_threshold, ignore_sd)
assert atom.GetProp("atomNote") == str(sd)
assert atom.GetProp("sd") == str(sd)
assert condition is True
sd = 0.0
sd_threshold = 7.0
ignore_sd = False
condition = _set_donor_props(atom, sd, sd_threshold, ignore_sd)
assert condition is False
# RDKit oxygen molecule
m = rdkit_molecule_from_smiles("O")
atom = m.GetAtomWithIdx(0)
sd = 10.0
sd_threshold = 7.0
ignore_sd = False
condition = _set_donor_props(atom, sd, sd_threshold, ignore_sd)
assert atom.GetProp("atomNote") == str(sd)
assert atom.GetProp("sd") == str(sd)
assert condition is True
sd = 0.0
sd_threshold = 7.0
ignore_sd = False
condition = _set_donor_props(atom, sd, sd_threshold, ignore_sd)
assert condition is False
def test_create_color_scale():
"""It creates successfully RGB mappings."""
# only valid modes (donor, acceptor)
with pytest.raises(Exception):
mode = "invalid"
idx2strength = dict()
_create_color_scale(idx2strength, mode)
idx2strength = {1: 10}
reds = _create_color_scale(idx2strength, mode="donor")
assert reds[1] == (1.0, 9.1, 9.1)
idx2strength = {1: 10}
blues = _create_color_scale(idx2strength, mode="acceptor")
assert blues[1] == (9.1, 0.7, 1.0)
idx2strength = {}
blues = _create_color_scale(idx2strength, mode="acceptor")
assert len(blues) == 0
def test_remove_strong_acceptor_hydrogens() -> None:
"""It removes hydrogens that are bonded to strong acceptors."""
# RDKit molecule
m = rdkit_molecule_from_smiles("CC(=O)C=CC=C")
mw = Chem.RWMol(m)
# index 8 is a hydrogen atom within the embedded molecule
idx = 8
hs_to_remove = [idx]
updated_hs_to_remove = _remove_strong_acceptor_hydrogens(mw, hs_to_remove)
assert hs_to_remove == updated_hs_to_remove
# extract neighbor and set as stronh acceptor
hs_to_remove = [idx]
neighbor_atom = mw.GetAtomWithIdx(idx).GetNeighbors()[0]
# strength is 10.0, threshold is 7.0, we do not ignore acceptors (False)
_set_acceptor_props(neighbor_atom, 10.0, 7.0, False)
updated_hs_to_remove = _remove_strong_acceptor_hydrogens(mw, hs_to_remove)
assert updated_hs_to_remove == []
def test_increase_explicit_hydrogens_for_bond_atom() -> None:
"""It increases the number of explicit hydrogens for atom with index `idx`."""
# RDKit molecule
m = Chem.MolFromSmiles("CC(=O)C=CC=C")
# bond start atom index
bidx = 0
# bond end atom index
eidx = 1
# remove_bidx: True && remove_edix: True
mw = Chem.RWMol(m)
ai_to_remove = list() # type: ignore
remove_bidx = True
remove_eidx = True
batom = mw.GetAtomWithIdx(bidx)
eatom = mw.GetAtomWithIdx(eidx)
assert batom.GetNumExplicitHs() == 0
assert eatom.GetNumExplicitHs() == 0
# increase explicit number of hydrogens
mw, ai_to_remove = _increase_explicit_hydrogen_for_bond_atom(
mw, remove_bidx, bidx, remove_eidx, eidx, ai_to_remove
)
# both indices should be in ai_to_remove list
assert len(ai_to_remove) == 2
assert ai_to_remove[0] == bidx
assert ai_to_remove[1] == eidx
bwant = 1
ewant = 1
batom = mw.GetAtomWithIdx(bidx)
eatom = mw.GetAtomWithIdx(eidx)
got = batom.GetNumExplicitHs()
assert got == bwant
got = eatom.GetNumExplicitHs()
assert got == ewant
# remove_bidx: False && remove_eidx: True
mw = Chem.RWMol(m)
ai_to_remove = list()
remove_bidx = False
remove_eidx = True
batom = mw.GetAtomWithIdx(bidx)
eatom = mw.GetAtomWithIdx(eidx)
assert batom.GetNumExplicitHs() == 0
assert eatom.GetNumExplicitHs() == 0
# increase explicit number of hydrogens
mw, ai_to_remove = _increase_explicit_hydrogen_for_bond_atom(
mw, remove_bidx, bidx, remove_eidx, eidx, ai_to_remove
)
# only eidx index should be in ai_to_remove list
assert len(ai_to_remove) == 1
assert ai_to_remove[0] == eidx
bwant = 1
ewant = 0
batom = mw.GetAtomWithIdx(bidx)
eatom = mw.GetAtomWithIdx(eidx)
got = batom.GetNumExplicitHs()
assert got == bwant
got = eatom.GetNumExplicitHs()
assert got == ewant
# remove_bidx: True && remove_eidx: False
mw = Chem.RWMol(m)
ai_to_remove = list()
remove_bidx = True
remove_eidx = False
batom = mw.GetAtomWithIdx(bidx)
eatom = mw.GetAtomWithIdx(eidx)
assert batom.GetNumExplicitHs() == 0
assert eatom.GetNumExplicitHs() == 0
# increase explicit number of hydrogens
mw, ai_to_remove = _increase_explicit_hydrogen_for_bond_atom(
mw, remove_bidx, bidx, remove_eidx, eidx, ai_to_remove
)
# only bidx index should be in ai_to_remove list
assert len(ai_to_remove) == 1
assert ai_to_remove[0] == bidx
bwant = 0
ewant = 1
batom = mw.GetAtomWithIdx(bidx)
eatom = mw.GetAtomWithIdx(eidx)
got = batom.GetNumExplicitHs()
assert got == bwant
got = eatom.GetNumExplicitHs()
assert got == ewant
# remove_bidx: False && remove_eidx: False
mw = Chem.RWMol(m)
ai_to_remove = list()
remove_bidx = False
remove_eidx = False
batom = mw.GetAtomWithIdx(bidx)
eatom = mw.GetAtomWithIdx(eidx)
assert batom.GetNumExplicitHs() == 0
assert eatom.GetNumExplicitHs() == 0
# increase explicit number of hydrogens
mw, ai_to_remove = _increase_explicit_hydrogen_for_bond_atom(
mw, remove_bidx, bidx, remove_eidx, eidx, ai_to_remove
)
# no index should be in ai_to_remove list
assert len(ai_to_remove) == 0
bwant = 0
ewant = 0
batom = mw.GetAtomWithIdx(bidx)
eatom = mw.GetAtomWithIdx(eidx)
got = batom.GetNumExplicitHs()
assert got == bwant
got = eatom.GetNumExplicitHs()
assert got == ewant
def test_remove_excluded_hydrogens() -> None:
"""It increases the number of explicit hydrogens for list of implicit hydrogens."""
m = Chem.MolFromSmiles("CC(=O)C=CC=CN")
mw = Chem.RWMol(m)
idx = 0
exclude = [idx]
_remove_excluded_hydrogens(mw, exclude)
atom = mw.GetAtomWithIdx(idx)
want = 1
got = atom.GetNumExplicitHs()
assert got == want
def test_increase_explicit_hydrogens() -> None:
"""It increases the number of explicit hydrogens for atom with index `idx`."""
# increase for non-hydrogen atoms
m = Chem.MolFromSmiles("CC(=O)C=CC=C")
mw = Chem.RWMol(m)
idx = 0
atom = mw.GetAtomWithIdx(idx)
_increase_explicit_hydrogens(mw, idx)
want = 1
got = atom.GetNumExplicitHs()
assert got == want
# do not increase for hydrogen
m = Chem.MolFromSmiles("[H]")
mw = Chem.RWMol(m)
idx = 0
atom = mw.GetAtomWithIdx(idx)
_increase_explicit_hydrogens(mw, idx)
want = 0
got = atom.GetNumExplicitHs()
assert got == want
def test_function_fails_for_negative_input() -> None:
"""It exits with a ValueError when a negative value is entered."""
with pytest.raises(Exception):
value = -1
_zero_positive_value_check(value)
|
StarcoderdataPython
|
5063595
|
# -*- coding: utf-8 -*-
"""
Highcharts Demos
Spiderweb: http://www.highcharts.com/demo/polar-spider
"""
from highcharts import Highchart
H = Highchart(width=550, height=400)
options = {
'chart': {
'polar': True,
'type': 'line',
'renderTo': 'test'
},
'title': {
'text': 'Budget vs spending',
'x': -80
},
'pane': {
'size': '80%'
},
'xAxis': {
'categories': ['Sales', 'Marketing', 'Development', 'Customer Support',
'Information Technology', 'Administration'],
'tickmarkPlacement': 'on',
'lineWidth': 0
},
'yAxis': {
'gridLineInterpolation': 'polygon',
'lineWidth': 0,
'min': 0
},
'tooltip': {
'shared': True,
'pointFormat': '<span style="color:{series.color}">{series.name}: <b>${point.y:,.0f}</b><br/>'
},
'legend': {
'align': 'right',
'verticalAlign': 'top',
'y': 70,
'layout': 'vertical'
},
}
data1 = [43000, 19000, 60000, 35000, 17000, 10000]
data2 = [50000, 39000, 42000, 31000, 26000, 14000]
H.set_dict_options(options)
H.add_data_set(data1, name='Allocated Budget', pointPlacement='on')
H.add_data_set(data2, name='Actual Spending', pointPlacement='on')
H.htmlcontent
|
StarcoderdataPython
|
353514
|
import torch
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
import logging
logging.basicConfig(level=logging.INFO)
"""
Script for evaluating the neural network on test set
"""
def evaluate_test_set(model, data, data_loader, device):
"""
Evaluates the model performance on test data
"""
model.eval()
logging.info('Evaluating accuracy on test set')
target_names = ['non hate speech', 'hate speech']
y_true = list()
y_pred = list()
total_loss = 0
for batch, targets, lengths, raw_data in data_loader['test_loader']:
batch, targets, lengths = data.sort_batch(batch, targets, lengths) ## sorts the batch wrt the length of sequences
pred = model(torch.autograd.Variable(batch).to(device), lengths.cpu().numpy()) ## perform forward pass
pred = torch.squeeze(pred)
y_true += list(targets.int())
pred_val = pred >= 0.5
y_pred += list(pred_val.data.int().detach().cpu().numpy())
acc = accuracy_score(y_true, y_pred) ## computing accuracy using sklearn's function
print("Test acc: {}".format(acc))
print('\n\n')
print(classification_report(y_true, y_pred, target_names=target_names)) ## computing other classification metrics via sklearn in classification report
|
StarcoderdataPython
|
157307
|
<gh_stars>10-100
import torch
from torch import nn
from torch.utils.data import DataLoader
import argparse
import numpy as np
import datetime
import os
import json
from types import SimpleNamespace
from datasets import MiniImagenetHorizontal
from res12 import resnet12
from res10 import res10
from models import conv64
from utils import adjust_learning_rate
class ModelWrapper(nn.Module):
def __init__(self, embed, fc_sizes):
super(ModelWrapper, self).__init__()
self.embed = embed
seq = []
for i in range(len(fc_sizes)-2):
seq += [nn.Linear(fc_sizes[i], fc_sizes[i+1]), nn.ReLU(), nn.Dropout(0.5)]
seq += [nn.Linear(fc_sizes[-2], fc_sizes[-1])]
self.output_layer = nn.Sequential(*seq)
def forward(self, x):
x = self.embed(x)
return self.output_layer(torch.relu(x))
# CAUTION: must not use default values as they would override the json config
parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
parser.add_argument('config', type=str)
#parser.add_argument('--dataset', type=str)
parser.add_argument('--batch-size', type=int)
parser.add_argument('--lr', type=float)
parser.add_argument('--momentum', type=float)
parser.add_argument('--weight-decay', type=float)
parser.add_argument('--epochs', type=int)
parser.add_argument('--gpu', type=int, nargs='+')
parser.add_argument('--seed', type=int)
parser.add_argument('--dropout', type=float)
parser.add_argument('--backbone', choices=['conv64', 'res12', 'res10'])
parser.add_argument('--num-workers', type=int)
#parser.add_argument('--relu-out', action='store_true')
#parser.add_argument('--flag', type=bool)
args = parser.parse_args()
with open(args.config) as f:
config = json.load(f)
# override config with cmd line args
config.update(vars(args))
args = SimpleNamespace(**config)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
assert(torch.cuda.is_available())
device = torch.device(args.gpu[0])
train_data = MiniImagenetHorizontal('train', small=(args.backbone!='res10'))
train_loader = DataLoader(train_data, batch_size=args.batch_size, shuffle=True,
num_workers=args.num_workers, drop_last=True)
val_data = MiniImagenetHorizontal('val', small=(args.backbone!='res10'))
val_loader = DataLoader(val_data, batch_size=args.batch_size, shuffle=False,
num_workers=args.num_workers, drop_last=False)
runid = datetime.datetime.now().strftime('%y%m%dT%H%M%S') + f'P{os.getpid()}'
print(f'runid={runid}')
if args.backbone == 'res12':
embed = resnet12(avg_pool=False, drop_rate=args.dropout, dropblock_size=5)
fc_sizes = [16000, 4000, 1000, len(train_data.classes)]
elif args.backbone == 'res10':
embed = res10()
fc_sizes = [25088, 4000, 1000, len(train_data.classes)]
elif args.backbone == 'conv64':
embed = conv64()
fc_sizes = [1600, 400, 100, len(train_data.classes)]
model = ModelWrapper(embed, fc_sizes)
model = model.to(device)
model = nn.DataParallel(model, device_ids=args.gpu)
lossf = nn.CrossEntropyLoss().to(device)
#optim = torch.optim.Adam(model.parameters(), lr=args.lr)
optim = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum,
weight_decay=args.weight_decay)
best_loss = float('inf')
for epoch in range(args.epochs):
adjust_learning_rate(optim, epoch, args.lr)
model.train()
for i, (x, y) in enumerate(train_loader):
x, y = x.to(device), y.to(device)
y_hat = model(x)
loss = lossf(y_hat, y)
acc = (y_hat.argmax(dim=1) == y).float().mean()
print(f'\33[2K\repoch: {epoch}/{args.epochs} iter: {i}/{len(train_loader)} ' + \
f'loss: {loss.item():.4f} acc: {acc.item()*100:.2f}%',
end='')
optim.zero_grad()
loss.backward()
optim.step()
model.eval()
loss = []
acc = []
with torch.no_grad():
for x, y in val_loader:
x, y = x.to(device), y.to(device)
y_hat = model(x)
loss.append(lossf(y_hat, y))
acc.append((y_hat.argmax(dim=1) == y).float().mean())
loss = sum(loss) / len(loss)
acc = sum(acc) / len(acc)
post = ''
if loss < best_loss:
torch.save(model.state_dict(), f'{runid}_{args.backbone}.pth')
best_loss = loss
post = 'model saved'
print(f'\33[2K\repoch: {epoch+1}/{args.epochs} iter: 1/1 ' + \
f'loss: {loss:.4f} acc: {acc*100:.2f}% ' + post)
|
StarcoderdataPython
|
6573615
|
#modules-and-pip
|
StarcoderdataPython
|
6406162
|
n = int(input())
hash = dict()
for t in range(n):
arr = input().split()
x = arr[0]
y = arr[1:]
hash[x] = y
name = input()
val = hash[name]
sum = 0.0
for x in val:
sum += float(x)
print("{0:.2f}".format(sum/3))
|
StarcoderdataPython
|
8082820
|
<gh_stars>1-10
"""
"""
import numpy as np
__all__ = ["bazin09", "karpenka12", "firth17",
"bazin09_listarg", "karpenka12_listarg", "firth17_listarg",
"_defined_models"]
_defined_models = ["bazin09", "karpenka12", "firth17"]
def bazin09(x, a, t_0, t_rise, t_fall):
return a * np.exp(-(x - t_0) / t_fall) / (1.0 + np.exp(- (x - t_0) / t_rise))
def bazin09_listarg(x, params):
return params[0] * np.exp(-(x - params[1]) / params[3]) / (1.0 + np.exp(- (x - params[1]) / params[2]))
def karpenka12(x, a, b, t_0, t_1, t_rise, t_fall):
return a * (1. + b * (x - t_1)*(x - t_1)) * np.exp(- (x - t_0)/t_fall) / (1. + np.exp(- (x - t_0) / t_rise))
def karpenka12_listarg(x, params):
return params[0] * (1. + params[1] * (x - params[3])*(x - params[3])) * np.exp(- (x - params[2])/params[5]) / (1. + np.exp(- (x - params[2]) / params[4]))
def firth17(x, a, b, t_0, t_1, t_2, t_x, t_rise, t_fall):
numerator = a * (1. + b * (x - t_1) * (x - t_1)) * np.exp(- (x - t_0) / t_fall)
denominator = (1. + np.exp(- (x - t_0) / t_rise)) / (1. + np.exp(- (x - t_2) / t_x))
return numerator/denominator
def firth17_listarg(x, params):
numerator = params[0] * (1. + params[1] * (x - params[3]) * (x - params[3])) * np.exp(- (x - params[2]) / params[7])
denominator = (1. + np.exp(- (x - params[2]) / params[6])) / (1. + np.exp(- (x - params[4]) / params[5]))
return numerator/denominator
|
StarcoderdataPython
|
28434
|
<gh_stars>0
#
# Copyright (c) 2013 Docker, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo.utils import importutils
import six
from heat.common import exception
from heat.common import template_format
from heat.engine import resource
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.tests.common import HeatTestCase
from heat.tests import utils
from testtools import skipIf
from ..resources import docker_container # noqa
from .fake_docker_client import FakeDockerClient # noqa
docker = importutils.try_import('docker')
template = '''
{
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Test template",
"Parameters": {},
"Resources": {
"Blog": {
"Type": "DockerInc::Docker::Container",
"Properties": {
"image": "samalba/wordpress",
"env": [
"FOO=bar"
]
}
}
}
}
'''
class DockerContainerTest(HeatTestCase):
def setUp(self):
super(DockerContainerTest, self).setUp()
for res_name, res_class in docker_container.resource_mapping().items():
resource._register_class(res_name, res_class)
self.addCleanup(self.m.VerifyAll)
def create_container(self, resource_name):
t = template_format.parse(template)
stack = utils.parse_stack(t)
resource = docker_container.DockerContainer(
resource_name,
stack.t.resource_definitions(stack)[resource_name], stack)
self.m.StubOutWithMock(resource, 'get_client')
resource.get_client().MultipleTimes().AndReturn(FakeDockerClient())
self.assertIsNone(resource.validate())
self.m.ReplayAll()
scheduler.TaskRunner(resource.create)()
self.assertEqual((resource.CREATE, resource.COMPLETE),
resource.state)
return resource
def get_container_state(self, resource):
client = resource.get_client()
return client.inspect_container(resource.resource_id)['State']
def test_resource_create(self):
container = self.create_container('Blog')
self.assertTrue(container.resource_id)
running = self.get_container_state(container)['Running']
self.assertIs(True, running)
client = container.get_client()
self.assertEqual(['samalba/wordpress'], client.pulled_images)
self.assertIsNone(client.container_create[0]['name'])
def test_create_with_name(self):
t = template_format.parse(template)
stack = utils.parse_stack(t)
definition = stack.t.resource_definitions(stack)['Blog']
definition['Properties']['name'] = 'super-blog'
resource = docker_container.DockerContainer(
'Blog', definition, stack)
self.m.StubOutWithMock(resource, 'get_client')
resource.get_client().MultipleTimes().AndReturn(FakeDockerClient())
self.assertIsNone(resource.validate())
self.m.ReplayAll()
scheduler.TaskRunner(resource.create)()
self.assertEqual((resource.CREATE, resource.COMPLETE),
resource.state)
client = resource.get_client()
self.assertEqual(['samalba/wordpress'], client.pulled_images)
self.assertEqual('super-blog', client.container_create[0]['name'])
@mock.patch.object(docker_container.DockerContainer, 'get_client')
def test_create_failed(self, test_client):
mock_client = mock.Mock()
mock_client.inspect_container.return_value = {
"State": {
"ExitCode": -1
}
}
mock_client.logs.return_value = "Container startup failed"
test_client.return_value = mock_client
mock_stack = mock.Mock()
mock_stack.db_resource_get.return_value = None
res_def = mock.Mock(spec=rsrc_defn.ResourceDefinition)
docker_res = docker_container.DockerContainer("test", res_def,
mock_stack)
exc = self.assertRaises(resource.ResourceInError,
docker_res.check_create_complete,
'foo')
self.assertIn("Container startup failed", six.text_type(exc))
def test_start_with_bindings_and_links(self):
t = template_format.parse(template)
stack = utils.parse_stack(t)
definition = stack.t.resource_definitions(stack)['Blog']
definition['Properties']['port_bindings'] = {
'80/tcp': [{'HostPort': '80'}]}
definition['Properties']['links'] = {'db': 'mysql'}
resource = docker_container.DockerContainer(
'Blog', definition, stack)
self.m.StubOutWithMock(resource, 'get_client')
resource.get_client().MultipleTimes().AndReturn(FakeDockerClient())
self.assertIsNone(resource.validate())
self.m.ReplayAll()
scheduler.TaskRunner(resource.create)()
self.assertEqual((resource.CREATE, resource.COMPLETE),
resource.state)
client = resource.get_client()
self.assertEqual(['samalba/wordpress'], client.pulled_images)
self.assertEqual({'db': 'mysql'}, client.container_start[0]['links'])
self.assertEqual(
{'80/tcp': [{'HostPort': '80'}]},
client.container_start[0]['port_bindings'])
def test_resource_attributes(self):
container = self.create_container('Blog')
# Test network info attributes
self.assertEqual('172.17.42.1', container.FnGetAtt('network_gateway'))
self.assertEqual('172.17.0.3', container.FnGetAtt('network_ip'))
self.assertEqual('1080', container.FnGetAtt('network_tcp_ports'))
self.assertEqual('', container.FnGetAtt('network_udp_ports'))
# Test logs attributes
self.assertEqual('---logs_begin---', container.FnGetAtt('logs_head'))
self.assertEqual('---logs_end---', container.FnGetAtt('logs_tail'))
# Test a non existing attribute
self.assertRaises(exception.InvalidTemplateAttribute,
container.FnGetAtt, 'invalid_attribute')
def test_resource_delete(self):
container = self.create_container('Blog')
scheduler.TaskRunner(container.delete)()
self.assertEqual((container.DELETE, container.COMPLETE),
container.state)
running = self.get_container_state(container)['Running']
self.assertIs(False, running)
def test_resource_already_deleted(self):
container = self.create_container('Blog')
scheduler.TaskRunner(container.delete)()
running = self.get_container_state(container)['Running']
self.assertIs(False, running)
scheduler.TaskRunner(container.delete)()
self.m.VerifyAll()
@skipIf(docker is None, 'docker-py not available')
def test_resource_delete_exception(self):
response = mock.MagicMock()
response.status_code = 404
response.content = 'some content'
container = self.create_container('Blog')
self.m.StubOutWithMock(container.get_client(), 'kill')
container.get_client().kill(container.resource_id).AndRaise(
docker.errors.APIError('Not found', response))
self.m.StubOutWithMock(container, '_get_container_status')
container._get_container_status(container.resource_id).AndRaise(
docker.errors.APIError('Not found', response))
self.m.ReplayAll()
scheduler.TaskRunner(container.delete)()
self.m.VerifyAll()
def test_resource_suspend_resume(self):
container = self.create_container('Blog')
# Test suspend
scheduler.TaskRunner(container.suspend)()
self.assertEqual((container.SUSPEND, container.COMPLETE),
container.state)
running = self.get_container_state(container)['Running']
self.assertIs(False, running)
# Test resume
scheduler.TaskRunner(container.resume)()
self.assertEqual((container.RESUME, container.COMPLETE),
container.state)
running = self.get_container_state(container)['Running']
self.assertIs(True, running)
|
StarcoderdataPython
|
8191866
|
from model import *
class ArticleService:
def find_all_articles(self):
articles = Article.query.filter_by(hidden=0).all()
return articles
def find_by_subject(self, subject):
articles = Article.query.filter_by(hidden=0).filter_by(
subject=subject).order_by(Article.date.desc())
return articles
def find_by_id(self, id):
article = Article.query.filter_by(id=id).first()
return article
def insert(self, article):
db.session.add(article)
db.session.commit()
def search(self, content):
articles = Article.query.filter_by(hidden=0).whooshee_search(content)
return articles
def find_by_user(self, user):
articles = Article.query.filter_by(hidden=0).filter_by(user=user)
return articles
def set_hidden(self, article, hidden):
article.hidden = hidden
db.session.commit()
def delete(self, article):
db.session.delete(article)
db.session.commit()
class SubjectService:
def find_children(self, subject):
ret = []
rs = SubjectTree.query.filter_by(parent_id=subject.id)
for result in rs:
ret.append(Subject.query.filter_by(id=result.child_id).first())
return ret
def find_parents(self, subject):
ret = []
rs = SubjectTree.query.filter_by(child_id=subject.id)
for result in rs:
ret.append(Subject.query.filter_by(id=result.parent_id).first())
return ret
def find_all(self):
subjects = Subject.query.filter(Subject.id != '1')
return subjects
def find_by_id(self, id):
subject = Subject.query.filter_by(id=id).first()
return subject
def find_by_name(self, name):
subject = Subject.query.filter_by(name=name).first()
return subject
def find_similar_by_name(self, name):
subject = Subject.query.filter(
Subject.similar_name.like('%' + name + '%')).first()
return subject
def insert(self, subject, parent_id):
db.session.add(subject)
db.session.add(SubjectTree(parent_id=parent_id, child_id=subject.id))
db.session.commit()
class UserService:
def find_by_id(self, id):
user = User.query.filter_by(id=id).first()
return user
def find_by_email(self, email):
user = User.query.filter_by(email=email).first()
return user
def insert(self, user):
db.session.add(user)
db.session.commit()
class CommentService:
def insert(self, comment):
db.session.add(comment)
db.session.commit()
def find_by_id(self, id):
return Comment.query.filter_by(id=id).first()
def find_by_email(self, email):
return Comment.query.filter_by(email=email)
def up_vote(self, comment_id, ip_id):
comment = self.find_by_id(comment_id)
current = self.get_current_vote(comment_id, ip_id)
if current is None:
db.session.add(
CommentIP(ip_id=ip_id, comment_id=comment_id, vote=1))
comment.up_votes += 1
else:
if current.vote == 1:
current.vote = 0
comment.up_votes -= 1
elif current.vote == 2:
comment.up_votes += 1
comment.down_votes -= 1
current.vote = 1
else:
comment.up_votes += 1
current.vote = 1
db.session.commit()
def down_vote(self, comment_id, ip_id):
comment = self.find_by_id(comment_id)
current = self.get_current_vote(comment_id, ip_id)
if current is None:
db.session.add(
CommentIP(ip_id=ip_id, comment_id=comment_id, vote=2))
comment.down_votes += 1
else:
if current.vote == 2:
current.vote = 0
else:
if current.vote == 2:
current.vote = 0
comment.down_votes -= 1
elif current.vote == 1:
comment.up_votes -= 1
comment.down_votes += 1
current.vote = 2
else:
comment.down_votes += 1
current.vote = 2
db.session.commit()
def get_current_vote(self, comment_id, ip_id):
current = CommentIP.query.filter_by(ip_id=ip_id) \
.filter_by(comment_id=comment_id).first()
return current
def search(self, content):
comments = Comment.query.whooshee_search(content)
return comments
def delete(self, comment):
db.session.delete(comment)
db.session.commit()
class IPService:
def get_by_ip(self, addr):
ip = IP.query.filter_by(addr=addr).first()
return ip
def insert(self, ip):
db.session.add(ip)
db.session.commit()
class MetricService:
def add_visit(self, article_id):
current = Metric.query.filter_by(article_id=article_id).first()
current.visits += 1
db.session.commit()
def add_comments(self, article_id):
current = Metric.query.filter_by(article_id=article_id).first()
current.comments += 1
db.session.commit()
def up_vote(self, article_id, ip_id):
current = self.get_current_vote(article_id, ip_id)
metric = Metric.query.filter_by(article_id=article_id).first()
if current is None:
db.session.add(
ArticleIP(ip_id=ip_id, article_id=article_id, vote=1))
metric.up_votes += 1
else:
if current.vote == 1:
current.vote = 0
metric.up_votes -= 1
elif current.vote == 2:
current.vote = 1
metric.up_votes += 1
metric.down_votes -= 1
else:
current.vote = 1
metric.up_votes += 1
db.session.commit()
def down_vote(self, article_id, ip_id):
current = self.get_current_vote(article_id, ip_id)
metric = Metric.query.filter_by(article_id=article_id).first()
if current is None:
db.session.add(
ArticleIP(ip_id=ip_id, article_id=article_id, vote=2))
metric.down_votes += 1
else:
if current.vote == 2:
current.vote = 0
metric.down_votes -= 1
elif current.vote == 1:
current.vote = 2
metric.up_votes -= 1
metric.down_votes += 1
else:
current.vote = 2
metric.down_votes += 1
db.session.commit()
def get_current_vote(self, article_id, ip_id):
current = ArticleIP.query.filter_by(ip_id=ip_id) \
.filter_by(article_id=article_id).first()
return current
def set_visited(self, article_id, ip_id):
db.session.add(ArticleIP(ip_id=ip_id, article_id=article_id, vote=0))
db.session.commit()
article_service = ArticleService()
ip_service = IPService()
user_service = UserService()
metric_service = MetricService()
comment_service = CommentService()
subject_service = SubjectService()
if __name__ == "__main__":
pass
|
StarcoderdataPython
|
5049436
|
<reponame>francisar/rds_manager<filename>aliyun/api/rest/Rds20140815ModifyDBInstanceSpecRequest.py
'''
Created by auto_sdk on 2015.06.23
'''
from aliyun.api.base import RestApi
class Rds20140815ModifyDBInstanceSpecRequest(RestApi):
def __init__(self,domain='rds.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.DBInstanceClass = None
self.DBInstanceId = None
self.DBInstanceStorage = None
self.PayType = None
def getapiname(self):
return 'rds.aliyuncs.com.ModifyDBInstanceSpec.2014-08-15'
|
StarcoderdataPython
|
4992573
|
import scrapy
class BloombergSpider(scrapy.Spider):
name = 'bloomberg'
start_urls = [
'http://www.bloomberg.com/quote/AAPL:US',
'http://www.bloomberg.com/quote/GOOGL:US',
'http://www.bloomberg.com/quote/AMZN:US',
]
def parse(self, response):
for sel in response.css('meta'):
itemprop = sel.css('::attr(itemprop)').extract()
if itemprop:
yield {
'itemprop': itemprop,
'content': sel.css('::attr(content)').extract(),
}
|
StarcoderdataPython
|
3502079
|
<reponame>mikepm35/estatusboard<filename>app/views.py
from flask import render_template, jsonify
from app import application
import urllib2, json
from os import listdir
from os.path import isfile, join
import config
@application.route('/')
@application.route('/index')
def index():
imgpath = 'app/static/img/'
imgfiles = [f for f in listdir(imgpath) if isfile(join(imgpath, f))]
imgfullpath = []
for i in imgfiles:
imgfullpath.append('/static/img/'+i)
imgfullpath_json = json.dumps(imgfullpath)
return render_template("index.html", imgfullpath=imgfullpath_json)
@application.route('/getweather')
def getweather():
wu_url_hourly = "http://api.wunderground.com/api/" + config.WUNDERGROUND_APIKEY + "/hourly/q/" + config.STATE_CODE + "/" + config.CITY + ".json"
wu_url_daily = "http://api.wunderground.com/api/" + config.WUNDERGROUND_APIKEY + "/forecast/q/" + config.STATE_CODE + "/" + config.CITY + ".json"
result_hourly = urllib2.urlopen(wu_url_hourly).read()
data_hourly = json.loads(result_hourly)
temp = data_hourly['hourly_forecast'][0]['temp']['english']
# rain = data['hourly_forecast'][0]['pop']
result_daily = urllib2.urlopen(wu_url_daily).read()
data_daily = json.loads(result_daily)
rain = data_daily['forecast']['simpleforecast']['forecastday'][0]['pop']
icon = data_daily['forecast']['simpleforecast']['forecastday'][0]['icon_url']
return jsonify(temp=temp, rain=rain, icon=icon)
|
StarcoderdataPython
|
3245273
|
<reponame>reanimat0r/isf<gh_stars>100-1000
__author__ = 'fwkz'
|
StarcoderdataPython
|
3571473
|
<filename>lib/rram_NN/test.py
# file: test.py
# Author : <NAME>
# Date : 05/11/2017
# Project : RRAM training NN
import tensorflow as tf
import numpy as np
import random
import matplotlib.pyplot as plt
from rram_NN.config import cfg
from rram_NN.rram_modeling import addDefects
def eval_net(network, dataset, weights):
"""
Evaluates the accuracy of a trained net.
"""
accuracy = network.accuracy
saver = tf.train.Saver()
sess = tf.InteractiveSession()
saver.restore(sess, weights)
netAccuracy = accuracy.eval(feed_dict={
network.x: dataset.test.images,
network.y_: dataset.test.labels,
network.keep_prob : 1.0})
print 'Model test accuracy : {}'.format(netAccuracy)
return netAccuracy
# def test_net(network, dataset, weights):
# """
# tests a network. inputs:
# network as tensorflow graph, dataset, pretrained weights
# """
# accuracy = network.accuracy
# saver = tf.train.Saver()
# sess = tf.InteractiveSession()
# saver.restore(sess, weights)
# print("Original Model test accuracy : %g"%accuracy.eval(feed_dict={
# network.x: dataset.test.images,
# network.y_: dataset.test.labels,
# network.keep_prob : 1.0}))
# allParameters = [v.eval() for v in tf.trainable_variables()]
# allparameter_tensors = [v for v in tf.trainable_variables()]
# netAccuracy = []
# for exp in range(50):
# percentVar = exp*5.0
# truncatedParams = readVariation(allParameters, percentVar)
# for i in range(len(allparameter_tensors)):
# allparameter_tensors[i].load(truncatedParams[i], sess)
# netAccuracy.append(accuracy.eval(feed_dict={
# network.x: dataset.test.images,
# network.y_: dataset.test.labels,
# network.keep_prob : 1.0}))
# x = np.arange(0. , 50*5.0 , 5.0)
# plt.plot(x , netAccuracy, 'xb-')
# plt.ylabel('Accuracy')
# plt.xlabel('Write Variation %')
# plt.show()
# netAccuracy = []
# for exp in range(50):
# percentVar = exp*5.0
# netOut = []
# for i in range(len(dataset.test.images)):
# readVarParams = readVariation(truncatedParams, percentVar)
# for j in range(len(allparameter_tensors)):
# allparameter_tensors[j].load(readVarParams[j], sess)
# netOut.append(accuracy.eval(feed_dict={
# network.x : [dataset.test.images[i,:,:,:]],
# network.y_ : [dataset.test.labels[i,:]],
# network.keep_prob : 1.0}))
# netAccuracy.append(sum(netOut)/len(netOut))
# print 'Model accuracy with read variation {} : {}'.format(percentVar , sum(netOut)/len(netOut))
#
# x = np.arange(0. , 50*5.0 , 5.0)
# plt.plot(x , netAccuracy, 'xb-')
# plt.ylabel('accuracy')
# plt.xlabel('% variation')
# plt.show()
|
StarcoderdataPython
|
8070372
|
<reponame>BennettDixon/holbertonschool-higher_level_programming
#!/usr/bin/python3
class BaseGeometry():
"""for use with shapes. Super class.
"""
def area(self):
"""instance method to calculate area of shape
"""
raise Exception("area() is not implemented")
def integer_validator(self, name, value):
"""validates integer input
"""
if type(value) != int:
raise TypeError(name + " must be an integer")
elif value <= 0:
raise ValueError(name + " must be greater than 0")
|
StarcoderdataPython
|
6534205
|
<reponame>lqill/PlatKendaraan
import pygame
import pygame.freetype
import pygame.sprite
import pygame.image
import pygame.font
import pygame.time
import pygame.event
import pygame.display
import pygame_gui
from pygame_gui.elements import UIButton
from pygame_gui.windows import UIFileDialog
from component import Plat, Conveyor, Press
from time import process_time as time
class Pabrik:
def __init__(self) -> None:
super().__init__()
pygame.init()
self.FONT = pygame.freetype.Font("sprites/UniversCondensed.ttf", 65)
self.SIZE = self.WIDTH, self.HEIGHT = 800, 600
pygame.display.set_caption('Plat Kendaraan')
self.window_surface = pygame.display.set_mode(self.SIZE)
self.ui_manager = pygame_gui.UIManager(self.SIZE)
self.background = pygame.Surface(self.SIZE)
self.background.fill(pygame.Color("#f0f0f0"))
self.load_button = UIButton(pygame.Rect((20, 20), (100, 50)),
text="PILIH FILE",
manager=self.ui_manager)
self.start_button = UIButton(pygame.Rect((20, 90), (100, 50)),
text="TURN ON",
manager=self.ui_manager)
self.add_button = UIButton(pygame.Rect((20, 150), (100, 50)),
text="TAMBAH",
manager=self.ui_manager)
self.add_button.disable()
self.file_dialog = None
self.file_csv = None
self.list_plat = []
self.font = pygame.font.Font("freesansbold.ttf", 32)
self.time = time()
self.clock = pygame.time.Clock()
self.is_running = True
self.belt = Conveyor()
self.press = Press()
self.group = pygame.sprite.Group(self.belt)
self.hold = (pygame.image.load("sprites/hold.png"),
pygame.image.load("sprites/hold.png").get_rect().move(220, -130))
self.plats = []
self.sekali = True
self.satusatu = True
self.print = False
# self.plats.append(Plat(self.belt, "BG 1029 AY", self.FONT))
def run(self):
while self.is_running:
time_delta = self.clock.tick(10)/1000.0
# Otomatis tambah file
if self.start_button.text == "TURN OFF" and not self.sekali and self.satusatu:
self.plats.append(
Plat(self.belt, self.list_plat.pop(), self.FONT))
self.satusatu = False
for plat in self.plats:
if plat.pos.left == 261 and not self.press.working and self.sekali:
self.belt.switch()
for plat2 in self.plats:
plat2.switch()
self.press.switch()
self.satusatu = True
self.sekali = False
break
elif plat.pos.left == 261 and self.print and self.press.working:
plat.setPlat()
self.print = False
break
if not self.press.working and not self.sekali:
self.belt.switch()
for plat in self.plats:
plat.switch()
self.sekali = True
# Tombol dan event
for event in pygame.event.get():
# QUIT
if event.type == pygame.QUIT:
self.is_running = False
# Tombol Pilih File
if (event.type == pygame.USEREVENT and
event.user_type == pygame_gui.UI_BUTTON_PRESSED and
event.ui_element == self.load_button):
self.file_dialog = UIFileDialog(pygame.Rect(160, 50, 300, 400),
self.ui_manager,
window_title="Pilih list plat kendaraan",
allow_existing_files_only=True)
self.load_button.disable()
# Tombol Start
if (event.type == pygame.USEREVENT and
event.user_type == pygame_gui.UI_BUTTON_PRESSED and
event.ui_element == self.start_button):
self.belt.switch()
for plat in self.plats:
plat.switch()
if self.start_button.text == "TURN ON":
self.start_button.set_text("TURN OFF")
else:
self.start_button.set_text("TURN ON")
# Tombol add
if (event.type == pygame.USEREVENT and
event.user_type == pygame_gui.UI_BUTTON_PRESSED and
event.ui_element == self.add_button):
self.plats.append(
Plat(self.belt, self.list_plat.pop(), self.FONT))
pass
# Event file di pilih
if (event.type == pygame.USEREVENT and
event.user_type == pygame_gui.UI_WINDOW_CLOSE and
event.ui_element == self.file_dialog):
self.load_button.enable()
self.add_button.enable()
try:
self.file_csv = self.file_dialog.current_file_path.as_posix()
if self.file_csv[-3:] == "csv":
self.file_dialog = None
with open(self.file_csv, 'r') as f:
self.list_plat = [
i for i in f.read().split("\n")]
else:
pop_up = pygame_gui.windows.UIMessageWindow(pygame.Rect(
200, 300, 200, 300), manager=self.ui_manager, window_title="Peringatan", html_message="Pilih file dengan format CSV!")
except AttributeError:
pop_up = pygame_gui.windows.UIMessageWindow(pygame.Rect(
200, 300, 200, 300), manager=self.ui_manager, window_title="Peringatan", html_message="Pilih filenya dahulu!")
break
self.ui_manager.process_events(event)
# Render Window
self.ui_manager.update(time_delta)
self.window_surface.blit(self.background, (0, 0))
self.group.update()
self.group.draw(self.window_surface)
for plat in self.plats:
plat.move()
self.window_surface.blit(plat.image, plat.pos)
self.window_surface.blit(plat.teks, plat.pos.move(13, 30))
self.print = self.press.update()
self.window_surface.blit(self.press.image, self.press.pos)
self.window_surface.blit(self.hold[0], self.hold[1])
self.ui_manager.draw_ui(self.window_surface)
pygame.display.update()
if __name__ == "__main__":
app = Pabrik()
app.run()
|
StarcoderdataPython
|
1925602
|
<gh_stars>1-10
import numpy as np
import pandas as pd
import py2neo
import sys
from scipy import sparse
# connect to the database
if __name__ == "__main__":
outputfile = sys.argv[1]
username = "neo4j"
password = "<PASSWORD>"
uri = "bolt://127.0.0.1:7687"
graph = py2neo.Graph(bolt=True, host="localhost", user=username, password=password)
query = """
MATCH (j:Journal)<-[:published_from]-(p)
return ID(j) as id, count(p) as pcount, p.Year as year
"""
pcount = graph.run(query).to_data_frame()
pcount.to_csv(outputfile, sep="\t")
#with open("data/networks/pcount.pickle", "wb") as f:
# pickle.dump(pcount, f)
|
StarcoderdataPython
|
3548770
|
<gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import griddata as intGrid
from random import randint
class Particle:
def __init__(self,position,isdead=False):
self.XY = np.array(position)
self.isDed = isdead
return None
def whereami(self):
return self.XY
def wheretogo(self,VelField):
self.v = np.array([float(intGrid(VelField.XY,VelField.Vx,self.XY)),\
float(intGrid(VelField.XY,VelField.Vy,self.XY))])
return self.v
def move(self,deltaT):
self.XY = self.XY + self.v*deltaT
return None
class Swarm:
def __init__(self,nombre,size=0):
self.swarm = list()
self.X = list()
self.Y = list()
self.size = 0
self.name = str(nombre)
return None
def appendParticle(self,pepa):
self.swarm.append(pepa)
self.X.append(pepa.XY[0])
self.Y.append(pepa.XY[1])
self.size = len(self.swarm)
return None
def moveSwarm(self,VelField,dT):
for pepa in self.swarm:
pepa.wheretogo(VelField)
pepa.move(dT)
self.refreshXY()
def refreshXY(self):
del self.X
del self.Y
self.X = list()
self.Y = list()
for pepa in self.swarm:
self.X.append(pepa.XY[0])
self.Y.append(pepa.XY[1])
class Mesh:
def __init__(self,size):
#self.X = np.arange(-size,size,10)
#self.Y = np.arange(-size,size,10)
#self.poorXY = list()
#for i in range(len(self.X)):
# self.poorXY.append([self.X[i],self.Y[i]])
#self.XY = np.array(self.poorXY)
#del self.poorXY
self.XY = np.array([[-size,-size],[size,-size],[-size,size],[size,size]])
self.Vx = np.array([5,0,0,-5])
self.Vy = np.array([0,5,-5,0])
return None
part1 = Particle(np.array([99,0]),False)
mesh1 = Mesh(150.0)
swarm1 = Swarm(nombre="Holi??")
for t in range(200):
#if t<11:
wh = np.array([randint(-99,99),randint(-99,99)])
#wh = np.array([90.,-90.])
swarm1.appendParticle(Particle(wh,False))
swarm1.moveSwarm(mesh1,4)
plt.scatter(wh[0],wh[1],s=5,c='red',alpha=0.5)
plt.scatter(swarm1.X,swarm1.Y,s=5,c='blue',alpha=0.1)
plt.ylim(-110,110)
plt.xlim(-110,110)
plt.show()
#print(swarm1.X)
|
StarcoderdataPython
|
8143320
|
<reponame>thejerrytan/CarND-Behavioural-Cloning-P3
import csv
import cv2
import numpy as np
import os
import sklearn
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
# Use this to train track2
TRAINING_DIR = "harder_data"
# Use this to train track1
# TRAINING_DIR = "data"
# Model filename for track1
# MODEL_FILENAME = "model.h5"
# Model filename for track2
MODEL_FILENAME = "model_hard.h5"
# Steering angle correction factor for left and right cameras
CORRECTION_FACTOR = 0.20
samples = []
def load_data():
with open('./{TRAINING_DIR!s}/driving_log.csv'.format(**globals())) as csvfile:
reader = csv.reader(csvfile)
for line in reader:
samples.append(line)
images = []
measurements = []
return train_test_split(samples, test_size=0.2)
def visualize_data():
# Shows the distribution of steering angles among the full unaugmented dataset
angles = []
for line in samples:
angles.append(float(line[3]))
angles.append(float(line[3]) + CORRECTION_FACTOR)
angles.append(float(line[3]) - CORRECTION_FACTOR)
hist, bin_edges = np.histogram(angles, bins=32)
plt.hist(angles, bins=32)
plt.show()
return hist, bin_edges
def random_shift(img, angle, shift_range):
STEERING_ANGLE_SHIFT_PER_PIXEL = 0.002
dx = np.random.uniform(-shift_range, shift_range, 1)
M = np.float32([[1, 0, dx], [0, 1, 0]])
dst = cv2.warpAffine(img, M, (img.shape[1], img.shape[0]))
return (dst, STEERING_ANGLE_SHIFT_PER_PIXEL * dx + angle)
def generator(samples, batch_size=32):
num_samples = len(samples)
new_batch_size = batch_size // 9 # Because each loop generates x9 more images
while 1: # Loop forever so the generator never terminates
sklearn.utils.shuffle(samples)
for offset in range(0, num_samples, new_batch_size):
batch_samples = samples[offset:offset+new_batch_size]
images = []
angles = []
for batch_sample in batch_samples:
measurement = float(batch_sample[3])
preprocess_data('./{TRAINING_DIR!s}/IMG/'.format(**globals()) + batch_sample[0], measurement, images, angles)
# Add left camera
measurement_left = measurement + CORRECTION_FACTOR
preprocess_data('./{TRAINING_DIR!s}/IMG/'.format(**globals()) + batch_sample[1], measurement_left, images, angles)
# Add right camera
measurement_right = measurement - CORRECTION_FACTOR
preprocess_data('./{TRAINING_DIR!s}/IMG/'.format(**globals()) + batch_sample[2], measurement_right, images, angles)
# trim image to only see section with road
X_train = np.array(images)
y_train = np.array(angles)
yield sklearn.utils.shuffle(X_train, y_train)
# Data augmentation
saved = False
def preprocess_data(source_path, measurement, images, angles):
global saved
image = cv2.imread('./{TRAINING_DIR!s}/IMG/'.format(**globals()) + source_path.split(os.sep)[-1])
b,g,r = cv2.split(image) # get b,g,r
image = cv2.merge([r,g,b]) # switch it to rgb
image = cv2.resize(image, (0,0), fx=0.5, fy=0.5) # resize image by half
images.append(image)
angles.append(measurement)
# Flip horizontally
measurement_flipped = -measurement
flipped = np.fliplr(image)
images.append(flipped)
angles.append(measurement_flipped)
# Translate horizontally
translated, new_steering_angle = random_shift(image, measurement, 10)
images.append(translated)
angles.append(new_steering_angle)
if not saved:
from scipy.misc import imsave # imsave saves images in R,G,B order
# imsave("./report/track1_normal.jpg", image)
# imsave("./report/track1_flipped.jpg", flipped)
# imsave("./report/track1_translated.jpg", translated)
imsave("./report/track2_normal.jpg", image)
imsave("./report/track2_flipped.jpg", flipped)
imsave("./report/track2_translated.jpg", translated)
saved = True
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Dropout
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
from keras.optimizers import Adam
from keras.layers import Cropping2D
from keras.models import load_model
def main():
train_samples, validation_samples = load_data()
visualize_data()
train_generator = generator(train_samples, batch_size=32)
validation_generator = generator(validation_samples, batch_size=32)
# Compile and train model
if os.path.isfile(MODEL_FILENAME):
model = load_model(MODEL_FILENAME)
else:
model = Sequential()
model.add(Lambda(lambda x: x/255.0 - 0.5, input_shape=(80,160,3)))
model.add(Cropping2D(cropping=((35,13), (0,0))))
model.add(Convolution2D(6,5,5, activation='relu'))
model.add(MaxPooling2D())
model.add(Convolution2D(6,5,5, activation='relu'))
model.add(MaxPooling2D())
model.add(Dropout(0.5))
model.add(Flatten(input_shape=(37,160,3)))
model.add(Dense(120))
model.add(Dense(84))
model.add(Dense(1))
adam = Adam(lr=0.0001)
model.compile(loss='mse', optimizer=adam)
model.fit_generator(train_generator, nb_epoch=20, samples_per_epoch=len(train_samples), validation_data=validation_generator, \
nb_val_samples=len(validation_samples))
model.save(MODEL_FILENAME)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
88824
|
<gh_stars>1-10
import copy
from urllib.parse import quote_plus
from cryptojwt import KeyJar
from cryptojwt.key_jar import init_key_jar
from idpyoidc.impexp import ImpExp
def add_issuer(conf, issuer):
res = {}
for key, val in conf.items():
if key == "abstract_storage_cls":
res[key] = val
else:
_val = copy.copy(val)
_val["issuer"] = quote_plus(issuer)
res[key] = _val
return res
class OidcContext(ImpExp):
parameter = {"keyjar": KeyJar, "issuer": None}
def __init__(self, config=None, keyjar=None, entity_id=""):
ImpExp.__init__(self)
if config is None:
config = {}
self.keyjar = self._keyjar(keyjar, conf=config, entity_id=entity_id)
def _keyjar(self, keyjar=None, conf=None, entity_id=""):
if keyjar is None:
if "keys" in conf:
keys_args = {k: v for k, v in conf["keys"].items() if k != "uri_path"}
_keyjar = init_key_jar(**keys_args)
elif "key_conf" in conf and conf["key_conf"]:
keys_args = {k: v for k, v in conf["key_conf"].items() if k != "uri_path"}
_keyjar = init_key_jar(**keys_args)
else:
_keyjar = KeyJar()
if "jwks" in conf:
_keyjar.import_jwks(conf["jwks"], "")
if "" in _keyjar and entity_id:
# make sure I have the keys under my own name too (if I know it)
_keyjar.import_jwks_as_json(_keyjar.export_jwks_as_json(True, ""), entity_id)
_httpc_params = conf.get("httpc_params")
if _httpc_params:
_keyjar.httpc_params = _httpc_params
return _keyjar
else:
return keyjar
|
StarcoderdataPython
|
12858165
|
<reponame>turing4ever/illustrated-python-3-course
# place super_test.py code here
# place keyword_test.py code here
|
StarcoderdataPython
|
178977
|
#!/usr/bin/python3
from zoo.serving.server import ClusterServing
serving = ClusterServing()
print("Cluster Serving has been properly set up.")
|
StarcoderdataPython
|
11386131
|
'''----------------------------------------------------------------------------------
Tool Name: WriteFeaturesFromTextFile
Source Name: WriteFeaturesFromTextFile.py
Version: ArcGIS 9.1
Author: Environmental Systems Research Institute Inc.
Required Argumuments: An input feature class
An output text file
An input decimal separator character that indicates what character
should be used to separate the whole number from its decimal.
Description: Writes the features of a feature class out to a text file.
----------------------------------------------------------------------------------'''
import string, os, sys, locale, arcgisscripting
gp = arcgisscripting.create()
gp.overwriteoutput = 1
msgNotEnoughParams = "Incorrect number of input parameters."
msgUseValidDecimalPointSep = "Please use one of the valid decimal point separators."
try:
if len(sys.argv) < 4: raise Exception, msgNotEnoughParams
inputFC = sys.argv[1]
outFile = open(sys.argv[2], "w")
arg3poss = ['default python output', 'locale decimal point', 'comma', 'period', '$sep$']
if sys.argv[3].lower() not in arg3poss: raise Exception, msgUseValidDecimalPointSep
if sys.argv[3].lower() == arg3poss[1]:
locale.setlocale(locale.LC_ALL, '')
sepchar = locale.localeconv()['decimal_point']
elif sys.argv[3].lower() == arg3poss[2]: sepchar = ','
elif sys.argv[3].lower() == arg3poss[3]: sepchar = '.'
elif sys.argv[3].lower() == arg3poss[4]: sepchar = '$SEP$'
elif sys.argv[3].lower() == arg3poss[0]: sepchar = ""
inDesc = gp.describe(inputFC)
inRows = gp.searchcursor(inputFC)
inRow = inRows.next()
outFile.write(inDesc.ShapeType + "\n")
while inRow:
feat = inRow.GetValue(inDesc.ShapeFieldName)
if inDesc.ShapeType.lower() == "point":
pnt = feat.getpart()
outLine = str(inRow.GetValue(inDesc.OIDFieldName)) + " " + str(pnt.x) + " " + str(pnt.y) + " " + str(pnt.z) + " " + str(pnt.m) + "\n"
if sepchar == "": outFile.write(outLine)
else: outFile.write(outLine.replace(".", sepchar))
elif inDesc.ShapeType.lower() == "multipoint":
partnum = 0
partcount = feat.partcount
outFile.write(str(inRow.GetValue(inDesc.OIDFieldName)) + " " + str(partnum) + "\n")
while partnum < partcount:
pnt = feat.getpart(partnum)
outLine = str(partnum) + " " + str(pnt.x) + " " + str(pnt.y) + " " + str(pnt.z) + " " + str(pnt.m) + "\n"
if sepchar == "": outFile.write(outLine)
else: outFile.write(outLine.replace(".", sepchar))
partnum += 1
else:
partnum = 0
partcount = feat.partcount
while partnum < partcount:
outFile.write(str(inRow.GetValue(inDesc.OIDFieldName)) + " " + str(partnum) + "\n")
part = feat.getpart(partnum)
part.reset()
pnt = part.next()
pnt_count = 0
while pnt:
outLine = str(pnt_count) + " " + str(pnt.x) + " " + str(pnt.y) + " " + str(pnt.z) + " " + str(pnt.m) + "\n"
if sepchar == "": outFile.write(outLine)
else: outFile.write(outLine.replace(".", sepchar))
pnt = part.next()
pnt_count += 1
if not pnt:
pnt = part.next()
if pnt:
outFile.write("InteriorRing\n")
partnum += 1
inRow = inRows.next()
outFile.write("END")
outFile.flush()
outFile.close()
except Exception, ErrorDesc:
gp.AddError(ErrorDesc[0])
if outFile: outFile.close()
gp.AddError(gp.getmessages(2))
|
StarcoderdataPython
|
1863623
|
from panda import Panda
panda = Panda()
panda.set_safety_mode(Panda.SAFETY_ELM327)
panda.can_clear(0)
print(panda.can_recv())
global kmsgs
while 1:
kmsgs = panda.can_recv()
nmsgs = []
#print(kmsgs)
for i in range(len(kmsgs)):
if kmsgs[i][0] == 1042:
print(kmsgs[i])
kmsgs = nmsgs[-256:]
|
StarcoderdataPython
|
1971970
|
# -*- coding: utf-8 -*-
# Future work
|
StarcoderdataPython
|
140078
|
<filename>core-python/Core_Python/exception/ExceptionMethods.py
''' e. and use all methods '''
''' also try different exception like java file,array,string,numberformat '''
''' user define exception '''
try:
raise Exception('spam,','eggs')
except Exception as inst:
print("Type of instance : ",type(inst)) # the exception instance
print("Arguments of instance : ",inst.args) # arguments stored in .args
print("Instance print : ",inst) # __str__ allows args to be printed directly,but may be overridden in exception subclasses
a,b = inst.args # unpack args
print("a : ",a)
print("b : ",b)
|
StarcoderdataPython
|
208128
|
"""create DOEs and execute design workflow
Caution:
This module requires fa_pytuils and delismm!
Please contatct the developers for these additional packages.
"""
import os
from collections import OrderedDict
import datetime
import numpy as np
import matplotlib.pyplot as plt
from delismm.model.doe import LatinizedCentroidalVoronoiTesselation, DOEfromFile
from delismm.model.samplecalculator import getY
from delismm.model.customsystemfunction import BoundsHandler, AbstractTargetFunction
from fa_pyutils.service.systemutils import getRunDir
from tankoh2.control_sf import createWindingDesign
from tankoh2 import programDir, log, pychain
from tankoh2.service import indent
dome = 'circle' # isotensoid circle
safetyFactor = 1 # 2.25
lb = OrderedDict([('r', 500.), ('lzylByR', 0.01), ('dp', 0.13 * safetyFactor)]) # [mm, - , MPa]
ub = OrderedDict([('r', 1600.), ('lzylByR', 12.), ('dp', 0.5 * safetyFactor)])
useFibreFailure = False
numberOfSamples = 201
class TankWinder(AbstractTargetFunction):
""""""
name = 'tank winder'
def __init__(self, lb, ub, runDir):
""""""
resultNames = ['frpMass', 'volume', 'area', 'lzylinder', 'numberOfLayers', 'angles', 'hoopLayerShifts']
AbstractTargetFunction.__init__(self, lb, ub, resultNames=resultNames)
self.doParallelization = []
self.runDir = runDir
self.allowFailedSample = True
def _call(self, parameters):
"""call function for the model"""
runDir = getRunDir(basePath=os.path.join(self.runDir), useMilliSeconds=True)
r, lzyl, burstPressure = parameters
result = createWindingDesign(dzyl=r * 2, lzylByR=lzyl, burstPressure=burstPressure,
minPolarOpening=r / 10, runDir=runDir,
domeType=pychain.winding.DOME_TYPES.ISOTENSOID if dome == 'isotensoid' else pychain.winding.DOME_TYPES.CIRCLE,
useFibreFailure = useFibreFailure)
return result
volumeFunc = lambda r, lzylByR: (4 / 3 * np.pi * r ** 3 + r * lzylByR * np.pi * r ** 2)
"""[m**3]"""
def plotGeometryRange(radii, lzylByRs, plotDir='', show=False, samples=None):
"""
:param radii: tuple with min and max radius [mm]
:param lzylByRs: tuple with min and max lzylByR [-]
:return: None
"""
radii = np.array(radii) / 1e3 # convert to m
if samples is not None:
samplesR, samplesLzylByR = samples[:2, :]
samplesR = samplesR / 1e3
fig = plt.figure(figsize=(15,6))
axes = [fig.add_subplot(1, 2, 1), fig.add_subplot(1, 2, 2)]
axes[1].set_yscale("log")
for ax in axes:
ax.set_title("Parameter bounds")
ax.set_xlabel('Radius [m]')
ax.set_ylabel('Volume [m^3]')
color = 'tab:blue'
for lzylByR in lzylByRs:
x = np.linspace(*radii,11)
volumes = [volumeFunc(r, lzylByR) for r in x]
ax.plot(x, volumes, color=color, label=f'lzylByR={lzylByR}')
color = 'tab:orange'
ax.legend()
if samples is not None:
volumes = volumeFunc(samplesR, samplesLzylByR)
ax.scatter(samplesR, volumes, label=f'samples')
if plotDir:
plt.savefig(plotDir+'/geometryRange.png')
if show:
plt.show()
def main():
sampleFile = '' + 'C:/PycharmProjects/tankoh2/tmp/doe_circle_20210520_135237_cvt/sampleX.txt'
startTime = datetime.datetime.now()
names = list(lb.keys())
runDir = getRunDir(f'doe_{dome}_{"puckff" if useFibreFailure else "puckiff"}',
basePath=os.path.join(programDir, 'tmp'))
winder = TankWinder(lb, ub, runDir)
if sampleFile:
lcvt = DOEfromFile(sampleFile)
else:
lcvt = LatinizedCentroidalVoronoiTesselation(numberOfSamples, len(names))
sampleX = BoundsHandler.scaleToBoundsStatic(lcvt.sampleXNormalized, list(lb.values()), list(ub.values()))
plotGeometryRange([lb['r'], ub['r']],[lb['lzylByR'], ub['lzylByR']], plotDir=runDir, samples=sampleX)
lcvt.xToFile(os.path.join(runDir, 'sampleX.txt'))
lcvt.xToFileStatic(os.path.join(runDir, 'sampleX_bounds.txt'), sampleX)
sampleY = getY(sampleX, winder, verbose=True, runDir=runDir)
# store samples
lcvt.yToFile(os.path.join(runDir, 'sampleY.txt'), winder, sampleY)
# lcvt.xyToFile(os.path.join(runDir, 'full_doe2.txt'), winder, sampleY, True)
allSamples = [names + winder.resultNames]
for inputSample, outputSample in zip(sampleX.T, sampleY):
if hasattr(outputSample, '__iter__'):
allSamples.append(list(inputSample) + list(outputSample))
else:
allSamples.append(list(inputSample) + list([outputSample]))
with open(os.path.join(runDir, 'full_doe.txt'), 'w') as f:
f.write(indent(allSamples, hasHeader=True))
duration = datetime.datetime.now() - startTime
log.info(f'runtime {duration.seconds} seconds')
if __name__ == '__main__':
if 1:
main()
else:
plotGeometryRange([lb['r'], ub['r']],[lb['lzylByR'], ub['lzylByR']], show=True)
|
StarcoderdataPython
|
9799982
|
from os import path
from setuptools import setup, find_packages
from qmap import __version__
directory = path.dirname(path.abspath(__file__))
with open(path.join(directory, 'requirements.txt')) as f:
required = f.read().splitlines()
# Get the long description from the README file
with open(path.join(directory, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='qmap',
version=__version__,
description='Manage job executions in a cluster',
long_description=long_description,
long_description_content_type='text/x-rst',
url="https://github.com/bbglab/qmap",
author="Barcelona Biomedical Genomics Lab",
author_email="<EMAIL>",
license="Apache Software License 2.0",
packages=find_packages(),
install_requires=required,
include_package_data=True,
entry_points={
'console_scripts': [
'qmap = qmap.main:cli',
]
},
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
],
)
|
StarcoderdataPython
|
89347
|
from __future__ import print_function
import os
import sys
from distutils.core import setup, Extension
# Need an 'open' function that supports the 'encoding' argument:
if sys.version_info[0] < 3:
from codecs import open
## Command-line argument parsing
# --with-zlib: use zlib for compressing and decompressing
# --without-zlib: ^ negated
# --with-zlib=<dir>: path to zlib if needed
# --with-libmemcached=<dir>: path to libmemcached package if needed
cmd = None
use_zlib = True
pkgdirs = [] # incdirs and libdirs get these
libs = ["memcached"]
defs = []
incdirs = []
libdirs = []
def append_env(L, e):
v = os.environ.get(e)
if v and os.path.exists(v):
L.append(v)
append_env(pkgdirs, "LIBMEMCACHED")
append_env(pkgdirs, "ZLIB")
# Hack up sys.argv, yay
unprocessed = []
for arg in sys.argv[1:]:
if arg == "--with-zlib":
use_zlib = True
continue
elif arg == "--without-zlib":
use_zlib = False
continue
elif arg == "--with-sasl2":
libs.append("sasl2")
continue
elif arg == "--gen-setup":
cmd = arg[2:]
elif "=" in arg:
if arg.startswith("--with-libmemcached=") or \
arg.startswith("--with-zlib="):
pkgdirs.append(arg.split("=", 1)[1])
continue
unprocessed.append(arg)
sys.argv[1:] = unprocessed
for pkgdir in pkgdirs:
incdirs.append(os.path.join(pkgdir, "include"))
libdirs.append(os.path.join(pkgdir, "lib"))
if use_zlib:
libs.append("z")
defs.append(("USE_ZLIB", None))
## OS X non-PPC workaround
# Apple OS X 10.6 with Xcode 4 have Python compiled with PPC but they removed
# support for compiling with that arch, so we have to override ARCHFLAGS.
if sys.platform == "darwin" and not os.environ.get("ARCHFLAGS"):
compiler_dirn = "/usr/libexec/gcc/darwin"
if os.path.exists(compiler_dirn):
dir_items = os.listdir(compiler_dirn)
if "ppc" not in dir_items:
print("enabling osx-specific ARCHFLAGS/ppc hack", file=sys.stderr)
os.environ["ARCHFLAGS"] = "-arch i386 -arch x86_64"
# There's a bug in <py3 with Py_True/False that will propagate with GCC's
# strict aliasing rules. Let's skip this flag for now.
cflags = ["-fno-strict-aliasing", "-std=c99"]
# Extension definitions
pylibmc_ext = Extension("_pylibmc", ["src/_pylibmcmodule.c"],
libraries=libs, include_dirs=incdirs,
library_dirs=libdirs, define_macros=defs,
extra_compile_args=cflags)
# Hidden secret: generate Setup file for statically compiling the extension.
if cmd == "gen-setup":
line = " ".join((
pylibmc_ext.name,
" ".join("-l" + lib for lib in pylibmc_ext.libraries),
" ".join("-I" + incdir for incdir in pylibmc_ext.include_dirs),
" ".join("-L" + libdir for libdir in pylibmc_ext.library_dirs),
" ".join("-D" + name + ("=" + str(value), "")[value is None] for (name, value) in pylibmc_ext.define_macros)))
with open("Setup", "w") as s:
s.write(line + "\n")
sys.exit(0)
with open("README.rst", "U", encoding="utf-8") as r:
readme_text = r.read()
with open("src/pylibmc-version.h", "U", encoding="utf-8") as r:
version = r.read().strip().split("\"")[1]
setup(
name="pylibmc",
version=version,
url="http://sendapatch.se/projects/pylibmc/",
author="<NAME>",
author_email="<EMAIL>",
license="3-clause BSD <http://www.opensource.org/licenses/bsd-license.php>",
description="Quick and small memcached client for Python",
long_description=readme_text,
ext_modules=[pylibmc_ext],
package_dir={'': 'src'},
packages=['pylibmc'],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
|
StarcoderdataPython
|
4957781
|
<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright (c) 2021 The HERA Collaboration
# Licensed under the MIT License
"""Utilities for comparing k-space covered by different surveys."""
import numpy as np
import matplotlib.pyplot as plt
from astropy import constants as const
from matplotlib.patches import Rectangle
from astropy.cosmology import FlatLambdaCDM
from .line import Line
from .util import line_registry
cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725)
survey_registry = {
"hera": {
"target_lines": ["HI"],
"rval": 1533.0,
"hemisphere": "S",
"freq_range": [50.0, 200.0],
"angular_res": 0.00727,
},
"spherex": {
"target_lines": ["Ha", "Hb", "Lya", "OII", "OIII"],
"rval": 150.0,
"hemisphere": "B",
"lambda_range": [0.75, 5.0],
"angular_res": 3.0058 * 1e-5,
},
"fyst": {
"target_lines": ["CII", "CO21", "CO32", "CO43", "CO54"],
"rval": 100.0,
"hemisphere": "S",
"freq_range": [220000.0, 405000.0],
"angular_res": 0.000193925,
},
"tim": {
"target_lines": ["CII", "CO32", "CO43", "CO54"],
"rval": 250.0,
"hemisphere": "S",
"freq_range": [240000.0, 420000.0],
"angular_res": 5.8178 * 1e-5,
},
"concerto": {
"target_lines": ["CII", "CO21", "CO32", "CO43", "CO54"],
"rval": 200.0,
"hemisphere": "S",
"freq_range": [200000.0, 360000.0],
"angular_res": 5.8178 * 1e-5,
},
"roman": {
"target_lines": ["Lya"],
"rval": 461, # Really 461 * wave, with `wave` in microns.
"hemisphere": "S",
"lambda_range": [1, 1.93],
"angular_res": 5.333e-07, # 0.11 arcsec/pixel
},
"euclid": {
"target_lines": ["Lya"],
"rval": 250,
"hemisphere": "S",
"lambda_range": [0.92, 2.0],
"angular_res": 4.848e-07, # 0.1 arcsec/pixel
},
}
survey_registry["ccatp"] = survey_registry["fyst"]
class Survey(object):
"""
Define an object for handling surveys.
Parameters
----------
survey_name : str
The name of the survey. List of known surveys is: hera, spherex, fyst,
tim, concerto.
target_lines : list of str
The lines to include in the survey. Ignored if `survey_name` matches one
of the known surveys.
freq_range : 2-element list of float
The range of frequencies covered by the survey, in MHz. Ignored if
`survey_name` matches one of the known surveys. Should not be specified
if `lambda_range` is specified.
lambda_range : 2-element list of float
The range of wavelengths covered by the survey, in µm. Ignored if
`survey_name` matches one of the known surveys. Should not be specified
if `freq_range` is specified.
rval : float
The resolving power "R" of the experiment. Ignored if `survey_name`
matches one of the known surveys.
hemisphere : str
The hemisphere the survey measures. Should be one of: "N" (north), "S"
(south), or "B" (both).
angular_res : float
The angular resolution of the experiment, in radians. Ignored if
`survey_name` matches one of the known surveys.
Raises
------
ValueError
This is raised if `survey_name` is not one of the known surveys, and
either both `freq_range` and `lambda_range` are specified or neither is
specified.
"""
def __init__(
self,
survey_name,
target_lines=None,
freq_range=None,
lambda_range=None,
rval=None,
hemisphere=None,
angular_res=None,
):
"""Initialize a survey."""
self.survey_name = survey_name
if survey_name in survey_registry.keys():
reg = survey_registry[survey_name]
# More elegant way to do this but I'm lazy
if "freq_range" in reg:
freq_range = reg["freq_range"]
if "lambda_range" in reg:
lambda_range = reg["lambda_range"]
if "angular_res" in reg:
angular_res = reg["angular_res"]
if "target_lines" in reg:
target_lines = [
Line(line, **line_registry[line]) for line in reg["target_lines"]
]
if "rval" in reg:
rval = reg["rval"]
if freq_range is None and lambda_range is None:
raise ValueError("Must specify either wavelength or frequency range")
if freq_range is not None and lambda_range is not None:
raise ValueError("Cannot specify freq_range and lambda_range")
else:
if freq_range is None:
self.lambda_range = np.array(lambda_range)
# Dividing m/s by microns gives frequencies in MHz
self.freq_range = const.c.to_value() / self.lambda_range
else:
self.freq_range = np.array(freq_range)
# Dividing m/s by MHz gives wavelengths in microns
self.lambda_range = const.c.to_value() / self.freq_range
self.angular_res = angular_res
self.hemisphere = hemisphere
self.target_lines = target_lines
self.redshift_ranges = self.calc_redshift_ranges()
self.rval = rval
kpara_max = self.calc_kpara_max()
kpara_min = self.calc_kpara_min()
self.kpara_range = {}
for line in self.target_lines:
line_name = line.get_line_name()
self.kpara_range[line_name] = np.array(
[kpara_min[line_name], kpara_max[line_name]]
)
kperp_max = self.calc_kperp_max()
self.kperp_range = {}
for line in self.target_lines:
line_name = line.get_line_name()
self.kperp_range[line_name] = np.array([0.01, kperp_max[line_name]])
def get_kperp_range(self):
"""
Get the k_perpendicular range covered by a survey.
Parameters
----------
None
Returns
-------
dict of 2-element list of float
The minimum and maximum k_perpendicular values, in units of
1/Mpc. The keys of the dictionary correspond to target_lines for the
survey and the values are the lists of ranges.
"""
return self.kperp_range
def get_hemisphere(self):
"""
Get the hemisphere covered by a survey.
Parameters
----------
None
Returns
-------
str
The hemisphere covered by the survey.
"""
return self.hemisphere
def get_kpara_range(self):
"""
Get the k_parallel range covered by a survey.
Parameters
----------
None
Returns
-------
dict of 2-element list of float
The minimum and maximum k_parallel values, in units of 1/Mpc. The
keys of the dictionary correspond to target_lines for the survey and
the values are the lists of ranges.
"""
return self.kpara_range
def calc_kperp_max(self):
"""
Calculate the maximum k_perpendicular modes for a survey.
Parameters
----------
None
Returns
-------
dict of float
The minimum k_perpendicular value for a survey, in units of
1/Mpc. The keys correspond to the target_lines of the survey.
"""
kperp_max = {}
base_factors = np.pi / (self.angular_res * cosmo.h)
for line in self.target_lines:
min_z, max_z = self.redshift_ranges[line.get_line_name()]
kperp_max[line.get_line_name()] = (
base_factors / cosmo.comoving_distance(max_z).to_value()
)
return kperp_max
def calc_kpara_max(self):
"""
Calculate the maximum k_parallel modes for a survey.
Parameters
----------
None
Returns
-------
dict of float
The maximum k_parallel value for a survey, in units of 1/Mpc. The
keys correspond to the target_lines of the survey.
"""
kpara_max = {}
base_factors = (
10 ** 3
* self.rval
* np.pi
* cosmo.H0.to_value()
/ (const.c.to_value() * cosmo.h)
)
for line in self.target_lines:
min_z, max_z = self.redshift_ranges[line.get_line_name()]
kpara_max[line.get_line_name()] = (
base_factors * cosmo.efunc(min_z) / (1 + min_z)
)
return kpara_max
def calc_kpara_min(self):
"""
Calculate the minimum k_parallel modes for a survey.
Parameters
----------
None
Returns
-------
dict of float
The minimum k_parallel values for a survey, in units of 1/Mpc. The
keys correspond to the target_lines of the survey.
"""
kpara_min = {}
for line in self.target_lines:
min_z, max_z = self.redshift_ranges[line.get_line_name()]
rpara_range = cosmo.comoving_distance(max_z) - cosmo.comoving_distance(
min_z
)
rpara_range = rpara_range.to_value()
kpara_min[line.get_line_name()] = 2.0 * np.pi / (rpara_range * cosmo.h)
return kpara_min
def get_survey_name(self):
"""
Get the name of a survey.
Parameters
----------
None
Returns
-------
str
The survey name.
"""
return self.survey_name
def get_target_lines(self):
"""
Get the targeted transition lines for a survey.
Parameters
----------
None
Returns
-------
list of str
The names of the lines.
"""
return [line.get_line_name() for line in self.target_lines]
def get_freq_range(self):
"""
Get the frequency range for a survey.
Parameters
----------
None
Returns
-------
2-element list of float
The minimum and maximum frequencies of the survey, in MHz.
"""
return self.freq_range
def get_lambda_range(self):
"""
Get the wavelength range for a survey.
Parameters
----------
None
Returns
-------
2-element list of float
The minimum and maximum wavelengths of the survye, in µm.
"""
return self.lambda_range
def calc_redshift_ranges(self):
"""
Calculate a dictionary with the line name and the redshift range.
Parameters
----------
None
Returns
-------
dict of 2-element 1d-arrays of float
The minimum and maximum redshift values for a survey. Dictionary
keys correspond to target_lines, and values are the redshift range.
"""
redshift_ranges = {}
for line in self.target_lines:
min_redshift = np.max([line.freq_to_z(np.max(self.freq_range)), 0.0])
max_redshift = np.max([line.freq_to_z(np.min(self.freq_range)), 0.0])
redshift_ranges[line.get_line_name()] = np.array(
[min_redshift, max_redshift]
)
return redshift_ranges
def get_redshift_ranges(self, lines=None):
"""
Get the redshift range for a survey for a particular line.
Parameters
----------
lines : list of str, optional
The list of lines to get redshifts for. If `None`, then all redshift
ranges are returned.
Returns
-------
dict of 2-element 1d-arrays of float
The minimum and maximum redshift values for a survey. Dictionary
keys correspond to target_lines, and values are the redshift range.
"""
if lines is None:
return self.redshift_ranges
else:
redshift_ranges = {}
for line in lines:
redshift_ranges[line.get_line_name()] = self.redshift_ranges[
line.get_line_name()
]
return redshift_ranges
def plot_coverage_k(self, ax=None, fig=1, **kwargs):
"""
Plot in (k_perp, k_para) space for each line targeted by the survey.
Parameters
----------
ax : matplotlib axis object, optional
The axis to add the plot to. If None, create a new axis.
fig : int, optional
The figure number to attach to.
kwargs : dict
The options to pass through to `matplotlib.pyplot.fill_between`.
Returns
-------
ax : matplotlib axis object
The axis the figure was plotted to.
"""
had_ax = True
if ax is None:
fig, ax = plt.subplots(1, 1, num=fig)
had_ax = False
kpara = self.get_kpara_range()
kperp = self.get_kperp_range()
for line in self.get_target_lines():
rect = Rectangle(
(kperp[line][0], kpara[line][0]),
np.diff(kperp[line])[0],
np.diff(kpara[line])[0],
**kwargs
)
ax.add_patch(rect)
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_xlim(1e-3, 1e2)
ax.set_ylim(1e-3, 1e2)
if not had_ax:
ax.set_xlabel(r"$k_\perp$ [$h$Mpc$^{-1}$]")
ax.set_ylabel(r"$k_\parallel$ [$h$Mpc$^{-1}$]")
return ax
def plot_coverage_z(self, ax=None, fig=1, use_labels=True, start=0, **kwargs):
"""
Plot the redshift coverage for each line targeted by a survey.
This method plots a horizontal line for each emission line targeted by
survey vs. redshift on the x-axis.
Parameters
----------
ax : matplotlib axis object, optional
The axis to add the plot to. If None, create a new axis.
fig : int, optional
The figure number to attach to.
use_labels : bool, optional
Whether to add labels to the plot.
start : int, optional
Which line to start plotting with.
kwargs : dict
The keyword arguments passed to `matplotlib.pyplot.plot`.
Returns
-------
ax : matplotlib axis object
The axis the figure was plotted to.
"""
had_ax = True
if ax is None:
fig, ax = plt.subplots(1, 1, num=fig)
had_ax = False
line_zs = self.get_redshift_ranges()
line_names = self.get_target_lines()
for i, line in enumerate(line_names):
label = "{} {}".format(self.survey_name, line) if use_labels else None
ax.plot(line_zs[line], [start + i] * 2, label=label, **kwargs)
ax.set_xlim(0, 15)
ax.set_yticklabels([])
ax.legend()
if not had_ax:
ax.set_xlabel(r"$z$")
return ax
|
StarcoderdataPython
|
1890792
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index),
path('index.html', views.index),
path('about.html', views.about),
path('search.html', views.search),
path('results.html', views.results),
path('login.html', views.login),
path('signup.html', views.signup),
path('activate/<uidb64>/<str:token>/', views.activate, name='activate'),
path('account_activation_sent.html', views.account_activation_sent, name='account_activation_sent'),
path('account_activation_invalid.html', views.account_activation_invalid, name='account_activation_invalid'),
path('account_activation_email.html', views.signup),
path('items/<str:item_name>/',views.render_items, name='item'),
]
|
StarcoderdataPython
|
4852849
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that provides the API entrypoint for the py2log python package.
"""
import logging.config
import sys
try:
import colorlog
except ImportError:
colorlog = False
TRACE = 5
def configure(config=None, filepath=None, force=True, level=None, name=None):
"""
Function that configures the python standard library logging package.
Args:
level (int, NoneType, optional):
config (dict, NoneType, optional):
name (str, NoneType, optional):
force (bool, optional):
"""
level = level or TRACE
sys.excepthook = log_excepthook
if force or not logging.root.handlers:
configure_trace()
logging_config = (
config
if config
else {
"disable_existing_loggers": False,
"formatters": {
"console": {
"()": "colorlog.ColoredFormatter",
"datefmt": "%Y-%m-%d %H:%M:%S",
"format": "%(log_color)s%(lineno)-4d %(filename)-11s %(name)-17s %(funcName)-27s %(message)s",
"log_colors": {
"CRITICAL": "white,bold,bg_red",
"DEBUG": "cyan",
"ERROR": "red",
"INFO": "white",
"WARNING": "yellow",
"TRACE": "black,bold,bg_cyan",
},
},
"file": {
"datefmt": "%Y-%m-%d %H:%M:%S",
"format": "%(levelname)-8s %(lineno)-4d %(filename)-11s %(name)-17s %(funcName)-27s %(message)s",
},
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"formatter": "console",
"level": 10,
},
"file": {
"class": "logging.FileHandler",
"filename": filepath or "py2log.log",
"formatter": "file",
"level": 10,
"mode": "w",
},
},
"root": {"handlers": ["console", "file"], "level": 10},
"version": 1,
}
)
if not colorlog:
logging_config["formatters"].pop("console", None)
logging_config["handlers"]["console"]["formatter"] = "file"
logging_config["handlers"]["console"]["level"] = level
logging_config["handlers"]["file"]["level"] = level
logging_config["root"]["level"] = level
logging.config.dictConfig(logging_config)
logger = logging.getLogger(name)
logger.info("Successfully configured logging!")
return logger
def configure_trace():
"""
Function that configures a new TRACE log level.
"""
logging.addLevelName(TRACE, "TRACE")
logging.TRACE = TRACE
logging.getLoggerClass().trace = log_trace_to_class
logging.trace = log_trace_to_root
def log_excepthook(exc_type, exc_value, exc_traceback):
"""
Function that automatically logs raised exceptions.
Args:
exc_type (type):
exc_value (BaseException):
exc_traceback (traceback):
"""
if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
logging.critical(
"Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback)
)
def log_trace_to_class(logger, message, *args, **kwargs):
"""
Function that provides trace logging to a Logger class.
Args:
logger (logging.Logger):
message (str): Log message.
*args (list, optional): Variable length argument list.
**kwargs (dict, optional): Arbitrary keyword arguments.
"""
if logger.isEnabledFor(TRACE):
# pylint: disable=protected-access
logger._log(TRACE, message, args, **kwargs)
def log_trace_to_root(message, *args, **kwargs):
"""
Function that provides trace logging to the root logger.
Args:
message (str): Log message.
*args (list, optional): Variable length argument list.
**kwargs (dict, optional): Arbitrary keyword arguments.
"""
logging.log(TRACE, message, *args, **kwargs)
|
StarcoderdataPython
|
3476913
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import os
from builtins import object, str
import mock
from pants.binaries.binary_util import (BinaryRequest, BinaryToolFetcher, BinaryToolUrlGenerator,
BinaryUtil)
from pants.net.http.fetcher import Fetcher
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import safe_open
from pants_test.test_base import TestBase
logger = logging.getLogger(__name__)
class ExternalUrlGenerator(BinaryToolUrlGenerator):
def generate_urls(self, version, host_platform):
return ['https://example.com/some-binary', 'https://example.com/same-binary']
# Make the __str__ deterministic, for testing exception messages.
def __str__(self):
return 'ExternalUrlGenerator(<example __str__()>)'
# TODO(cosmicexplorer): test requests with an archiver!
class BinaryUtilTest(TestBase):
"""Tests binary_util's binaries_baseurls handling."""
class MapFetcher(object):
"""Class which pretends to be a pants.net.http.Fetcher, but is actually a dictionary."""
def __init__(self, read_map):
self._map = read_map
def download(self, url, path_or_fd=None, **kwargs):
if not url in self._map:
raise IOError("404: Virtual URL '{}' does not exist.".format(url))
if not path_or_fd:
raise AssertionError("Expected path_or_fd to be set")
path_or_fd.write(self._map[url])
return path_or_fd
def keys(self):
return list(self._map.keys())
def values(self):
return list(self._map.values())
def __getitem__(self, key):
return self._map[key] # Vanilla internal map access (without lambda shenanigans).
@classmethod
def _fake_base(cls, name):
return 'fake-url-{name}'.format(name=name)
@classmethod
def _fake_url(cls, binaries, base, binary_key):
binary_util = cls._gen_binary_util()
supportdir, version, name = binaries[binary_key]
binary_request = binary_util._make_deprecated_binary_request(supportdir, version, name)
binary_path = binary_request.get_download_path(binary_util._host_platform())
return '{base}/{binary}'.format(base=base, binary=binary_path)
@classmethod
def _gen_binary_tool_fetcher(cls, bootstrap_dir='/tmp', timeout_secs=30, fetcher=None,
ignore_cached_download=True):
return BinaryToolFetcher(
bootstrap_dir=bootstrap_dir,
timeout_secs=timeout_secs,
fetcher=fetcher,
ignore_cached_download=ignore_cached_download)
@classmethod
def _gen_binary_util(cls, baseurls=[], path_by_id=None, allow_external_binary_tool_downloads=True,
uname_func=None, **kwargs):
return BinaryUtil(
baseurls=baseurls,
binary_tool_fetcher=cls._gen_binary_tool_fetcher(**kwargs),
path_by_id=path_by_id,
allow_external_binary_tool_downloads=allow_external_binary_tool_downloads,
uname_func=uname_func)
@classmethod
def _read_file(cls, file_path):
with open(file_path, 'rb') as result_file:
return result_file.read()
def test_timeout(self):
fetcher = mock.create_autospec(Fetcher, spec_set=True)
timeout_value = 42
binary_util = self._gen_binary_util(baseurls=['http://binaries.example.com'],
timeout_secs=timeout_value,
fetcher=fetcher)
self.assertFalse(fetcher.download.called)
fetch_path = binary_util.select_script(supportdir='a-binary', version='v1.2', name='a-binary')
logger.debug("fetch_path: {}".format(fetch_path))
fetcher.download.assert_called_once_with('http://binaries.example.com/a-binary/v1.2/a-binary',
listener=mock.ANY,
path_or_fd=mock.ANY,
timeout_secs=timeout_value)
def test_no_base_urls_error(self):
"""Tests exception handling if build support urls are improperly specified."""
binary_util = self._gen_binary_util()
with self.assertRaises(BinaryUtil.BinaryResolutionError) as cm:
binary_util.select_script("supportdir", "version", "name")
the_raised_exception_message = str(cm.exception)
self.assertIn(BinaryUtil.NoBaseUrlsError.__name__, the_raised_exception_message)
expected_msg = (
"Error resolving binary request BinaryRequest(supportdir=supportdir, version=version, "
"name=name, platform_dependent=False, external_url_generator=None, archiver=None): "
"--binaries-baseurls is empty.")
self.assertIn(expected_msg, the_raised_exception_message)
def test_support_url_multi(self):
"""Tests to make sure existing base urls function as expected."""
bootstrap_dir = '/tmp'
with temporary_dir() as invalid_local_files, temporary_dir() as valid_local_files:
binary_util = self._gen_binary_util(
baseurls=[
'BLATANTLY INVALID URL',
'https://dl.bintray.com/pantsbuild/bin/reasonably-invalid-url',
invalid_local_files,
valid_local_files,
'https://dl.bintray.com/pantsbuild/bin/another-invalid-url',
],
bootstrap_dir=bootstrap_dir)
binary_request = binary_util._make_deprecated_binary_request(
supportdir='bin/protobuf',
version='2.4.1',
name='protoc')
binary_path = binary_request.get_download_path(binary_util._host_platform())
contents = b'proof'
with safe_open(os.path.join(valid_local_files, binary_path), 'wb') as fp:
fp.write(contents)
binary_path_abs = os.path.join(bootstrap_dir, binary_path)
self.assertEqual(binary_path_abs, binary_util.select(binary_request))
self.assertEqual(contents, self._read_file(binary_path_abs))
def test_support_url_fallback(self):
"""Tests fallback behavior with multiple support baseurls.
Mocks up some dummy baseurls and then swaps out the URL reader to make sure urls are accessed
and others are not.
"""
fake_base, fake_url = self._fake_base, self._fake_url
bases = [fake_base('apple'), fake_base('orange'), fake_base('banana')]
binaries = {t[2]: t for t in (('bin/protobuf', '2.4.1', 'protoc'),
('bin/ivy', '4.3.7', 'ivy'),
('bin/bash', '4.4.3', 'bash'))}
fetcher = self.MapFetcher({
fake_url(binaries, bases[0], 'protoc'): 'SEEN PROTOC',
fake_url(binaries, bases[0], 'ivy'): 'SEEN IVY',
fake_url(binaries, bases[1], 'bash'): 'SEEN BASH',
fake_url(binaries, bases[1], 'protoc'): 'UNSEEN PROTOC 1',
fake_url(binaries, bases[2], 'protoc'): 'UNSEEN PROTOC 2',
fake_url(binaries, bases[2], 'ivy'): 'UNSEEN IVY 2',
})
binary_util = self._gen_binary_util(
baseurls=bases,
fetcher=fetcher)
unseen = [item for item in fetcher.values() if item.startswith('SEEN ')]
for supportdir, version, name in binaries.values():
binary_path_abs = binary_util.select_binary(
supportdir=supportdir,
version=version,
name=name)
expected_content = 'SEEN {}'.format(name.upper())
self.assertEqual(expected_content, self._read_file(binary_path_abs))
unseen.remove(expected_content)
self.assertEqual(0, len(unseen)) # Make sure we've seen all the SEENs.
def test_select_binary_base_path_linux(self):
def uname_func():
return "linux", "dontcare1", "dontcare2", "dontcare3", "amd64"
binary_util = self._gen_binary_util(uname_func=uname_func)
binary_request = binary_util._make_deprecated_binary_request("supportdir", "version", "name")
self.assertEquals("supportdir/linux/x86_64/version/name",
binary_util._get_download_path(binary_request))
def test_select_binary_base_path_darwin(self):
def uname_func():
return "darwin", "dontcare1", "14.9", "dontcare2", "dontcare3",
binary_util = self._gen_binary_util(uname_func=uname_func)
binary_request = binary_util._make_deprecated_binary_request("supportdir", "version", "name")
self.assertEquals("supportdir/mac/10.10/version/name",
binary_util._get_download_path(binary_request))
def test_select_binary_base_path_missing_os(self):
def uname_func():
return "vms", "dontcare1", "999.9", "dontcare2", "VAX9"
binary_util = self._gen_binary_util(uname_func=uname_func)
with self.assertRaises(BinaryUtil.BinaryResolutionError) as cm:
binary_util.select_binary("supportdir", "version", "name")
the_raised_exception_message = str(cm.exception)
self.assertIn(BinaryUtil.MissingMachineInfo.__name__, the_raised_exception_message)
expected_msg = (
"Error resolving binary request BinaryRequest(supportdir=supportdir, version=version, "
"name=name, platform_dependent=True, external_url_generator=None, archiver=None): "
"Pants could not resolve binaries for the current host: platform 'vms' was not recognized. "
"Recognized platforms are: [u'darwin', u'linux'].")
self.assertIn(expected_msg, the_raised_exception_message)
def test_select_binary_base_path_missing_version(self):
def uname_func():
return "darwin", "dontcare1", "999.9", "dontcare2", "x86_64"
binary_util = self._gen_binary_util(uname_func=uname_func)
with self.assertRaises(BinaryUtil.BinaryResolutionError) as cm:
binary_util.select_binary("mysupportdir", "myversion", "myname")
the_raised_exception_message = str(cm.exception)
self.assertIn(BinaryUtil.MissingMachineInfo.__name__, the_raised_exception_message)
expected_msg = (
"Error resolving binary request BinaryRequest(supportdir=mysupportdir, version=myversion, "
"name=myname, platform_dependent=True, external_url_generator=None, archiver=None): Pants could not "
"resolve binaries for the current host. Update --binaries-path-by-id to find binaries for "
"the current host platform (u\'darwin\', u\'999\').\\n--binaries-path-by-id was:")
self.assertIn(expected_msg, the_raised_exception_message)
def test_select_script_missing_version(self):
def uname_func():
return "darwin", "dontcare1", "999.9", "dontcare2", "x86_64"
binary_util = self._gen_binary_util(uname_func=uname_func)
with self.assertRaises(BinaryUtil.BinaryResolutionError) as cm:
binary_util.select_script("mysupportdir", "myversion", "myname")
the_raised_exception_message = str(cm.exception)
self.assertIn(BinaryUtil.MissingMachineInfo.__name__, the_raised_exception_message)
expected_msg = (
"Error resolving binary request BinaryRequest(supportdir=mysupportdir, version=myversion, "
# platform_dependent=False when doing select_script()
"name=myname, platform_dependent=False, external_url_generator=None, archiver=None): Pants "
"could not resolve binaries for the current host. Update --binaries-path-by-id to find "
"binaries for the current host platform (u\'darwin\', u\'999\').\\n--binaries-path-by-id was:")
self.assertIn(expected_msg, the_raised_exception_message)
def test_select_binary_base_path_override(self):
def uname_func():
return "darwin", "dontcare1", "100.99", "dontcare2", "t1000"
binary_util = self._gen_binary_util(uname_func=uname_func,
path_by_id={('darwin', '100'): ['skynet', '42']})
binary_request = binary_util._make_deprecated_binary_request("supportdir", "version", "name")
self.assertEquals("supportdir/skynet/42/version/name",
binary_util._get_download_path(binary_request))
def test_external_url_generator(self):
binary_util = self._gen_binary_util(baseurls=[])
binary_request = BinaryRequest(
supportdir='supportdir',
version='version',
name='name',
platform_dependent=False,
external_url_generator=ExternalUrlGenerator(),
# TODO: test archiver!
archiver=None)
with self.assertRaises(BinaryUtil.BinaryResolutionError) as cm:
binary_util.select(binary_request)
the_raised_exception_message = str(cm.exception)
self.assertIn(BinaryToolFetcher.BinaryNotFound.__name__, the_raised_exception_message)
expected_msg = (
"Error resolving binary request BinaryRequest(supportdir=supportdir, version=version, "
"name=name, platform_dependent=False, "
"external_url_generator=ExternalUrlGenerator(<example __str__()>), archiver=None): "
"Failed to fetch name binary from any source: (Failed to fetch binary from "
"https://example.com/some-binary: Fetch of https://example.com/some-binary failed with "
"status code 404, Failed to fetch binary from https://example.com/same-binary: Fetch of "
"https://example.com/same-binary failed with status code 404)'")
self.assertIn(expected_msg, the_raised_exception_message)
def test_disallowing_external_urls(self):
binary_util = self._gen_binary_util(baseurls=[], allow_external_binary_tool_downloads=False)
binary_request = binary_request = BinaryRequest(
supportdir='supportdir',
version='version',
name='name',
platform_dependent=False,
external_url_generator=ExternalUrlGenerator(),
# TODO: test archiver!
archiver=None)
with self.assertRaises(BinaryUtil.BinaryResolutionError) as cm:
binary_util.select(binary_request)
the_raised_exception_message = str(cm.exception)
self.assertIn(BinaryUtil.NoBaseUrlsError.__name__, the_raised_exception_message)
expected_msg = (
"Error resolving binary request BinaryRequest(supportdir=supportdir, version=version, "
"name=name, platform_dependent=False, "
"external_url_generator=ExternalUrlGenerator(<example __str__()>), archiver=None): "
"--binaries-baseurls is empty.")
self.assertIn(expected_msg, the_raised_exception_message)
|
StarcoderdataPython
|
9708489
|
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils.text import Truncator
from django.utils.translation import ugettext_lazy as _
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django_extensions.db.fields import ModificationDateTimeField, CreationDateTimeField
from django.conf import settings
from polymorphic import PolymorphicModel
from bluebottle.bb_follow.models import Follow
from .managers import ReactionManager, WallpostManager
WALLPOST_TEXT_MAX_LENGTH = getattr(settings, 'WALLPOST_TEXT_MAX_LENGTH', 300)
WALLPOST_REACTION_MAX_LENGTH = getattr(settings, 'WALLPOST_REACTION_MAX_LENGTH', 300)
class Wallpost(PolymorphicModel):
"""
The Wallpost base class. This class will never be used directly because the content of a Wallpost is always defined
in the child classes.
Implementation Note: Normally this would be an abstract class but it's not possible to make this an abstract class
and have the polymorphic behaviour of sorting on the common fields.
"""
# The user who wrote the wall post. This can be empty to support wall posts without users (e.g. anonymous
# TextWallposts, system Wallposts for donations etc.)
author = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('author'), related_name="%(class)s_wallpost", blank=True, null=True)
editor = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('editor'), blank=True, null=True, help_text=_("The last user to edit this wallpost."))
# The metadata for the wall post.
created = CreationDateTimeField(_('created'))
updated = ModificationDateTimeField(_('updated'))
deleted = models.DateTimeField(_('deleted'), blank=True, null=True)
ip_address = models.IPAddressField(_('IP address'), blank=True, null=True, default=None)
# Generic foreign key so we can connect it to any object.
content_type = models.ForeignKey(ContentType, verbose_name=_('content type'), related_name="content_type_set_for_%(class)s")
object_id = models.PositiveIntegerField(_('object ID'))
content_object = generic.GenericForeignKey('content_type', 'object_id')
email_followers = models.BooleanField(default=True)
# Manager
objects = WallpostManager()
class Meta:
ordering = ('created',)
def __unicode__(self):
return str(self.id)
class MediaWallpost(Wallpost):
# The content of the wall post.
title = models.CharField(max_length=60)
text = models.TextField(max_length=WALLPOST_REACTION_MAX_LENGTH, blank=True, default='')
video_url = models.URLField(max_length=100, blank=True, default='')
def __unicode__(self):
return Truncator(self.text).words(10)
# FIXME: See how we can re-enable this
# def save(self, *args, **kwargs):
# super(MediaWallpost, self).save(*args, **kwargs)
#
# # Mark the photos as deleted when the MediaWallpost is deleted.
# if self.deleted:
# for photo in self.photos.all():
# if not photo.deleted:
# photo.deleted = self.deleted
# photo.save()
class MediaWallpostPhoto(models.Model):
mediawallpost = models.ForeignKey(MediaWallpost, related_name='photos', null=True, blank=True)
photo = models.ImageField(upload_to='mediawallpostphotos')
deleted = models.DateTimeField(_('deleted'), blank=True, null=True)
ip_address = models.IPAddressField(_('IP address'), blank=True, null=True, default=None)
author = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('author'), related_name="%(class)s_wallpost_photo", blank=True, null=True)
editor = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('editor'), blank=True, null=True, help_text=_("The last user to edit this wallpost photo."))
class TextWallpost(Wallpost):
# The content of the wall post.
text = models.TextField(max_length=WALLPOST_REACTION_MAX_LENGTH)
def __unicode__(self):
return Truncator(self.text).words(10)
class SystemWallpost(Wallpost):
# The content of the wall post.
text = models.TextField(max_length=WALLPOST_REACTION_MAX_LENGTH, blank=True)
# Generic foreign key so we can connect any object to it.
related_type = models.ForeignKey(ContentType, verbose_name=_('related type'))
related_id = models.PositiveIntegerField(_('related ID'))
related_object = generic.GenericForeignKey('related_type', 'related_id')
def __unicode__(self):
return Truncator(self.text).words(10)
class Reaction(models.Model):
"""
A user reaction or comment to a Wallpost. This model is based on the Comments model from django.contrib.comments.
"""
# Who posted this reaction. User will need to be logged in to make a reaction.
author = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('author'), related_name='wallpost_reactions')
editor = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('editor'), blank=True, null=True, related_name='+', help_text=_("The last user to edit this reaction."))
# The reaction text and the wallpost it's a reaction to.
text = models.TextField(_('reaction text'), max_length=WALLPOST_REACTION_MAX_LENGTH)
wallpost = models.ForeignKey(Wallpost, related_name='reactions')
# Metadata for the reaction.
created = CreationDateTimeField(_('created'))
updated = ModificationDateTimeField(_('updated'))
deleted = models.DateTimeField(_('deleted'), blank=True, null=True)
ip_address = models.IPAddressField(_('IP address'), blank=True, null=True, default=None)
# Manager
objects = ReactionManager()
objects_with_deleted = models.Manager()
class Meta:
ordering = ('created',)
verbose_name = _('Reaction')
verbose_name_plural = _('Reactions')
def __unicode__(self):
s = "{0}: {1}".format(self.author.get_full_name(), self.text)
return Truncator(s).words(10)
#Import the signals for sending mails in case they are present
if 'apps.tasks' in settings.INSTALLED_APPS and 'apps.projects' in settings.INSTALLED_APPS:
import mails
|
StarcoderdataPython
|
224785
|
<gh_stars>1-10
print('\033[33m-=-\033[m' * 20)
print('\033[33m************* Fatorial *************\033[m')
print('\033[33m-=-\033[m' * 20)
v = float(input('Insira um valor: '))
c = 1
f = 1
while c <= v:
f = f * c
c += 1
print('O fatorial de {} é {}' .format(v, f))
|
StarcoderdataPython
|
287404
|
# Copyright (c) 2020 Foundries.io
# SPDX-License-Identifier: Apache-2.0
import yaml
import os
from helpers import status
def normalize_keyvals(params: dict, prefix=''):
"""Handles two types of docker-app params:
1) traditional. eg:
key: val
returns data as is
2) docker app nested. eg:
shellhttp:
port: 80
returns dict: {shell.httpd: 80}
"""
normalized = {}
for k, v in params.items():
assert type(k) == str
if type(v) == str:
normalized[prefix + k] = v
elif type(v) == int:
normalized[prefix + k] = str(v)
elif type(v) == dict:
sub = normalize_keyvals(v, prefix + k + '.')
normalized.update(sub)
else:
raise ValueError('Invalid parameter type for: %r' % v)
return normalized
def convert_docker_app(path: str):
"""Take a .dockerapp file to directory format.
The file format isn't supported by docker-app 0.8 and beyond. Just
split a 3 sectioned yaml file into its pieces
"""
with open(path) as f:
_, compose, params = yaml.safe_load_all(f)
if params is None:
params = {}
os.unlink(path)
path, _ = os.path.splitext(path)
try:
# this directory might already exist. ie the user has:
# shellhttpd.dockerapp
# shellhttpd/Dockerfile...
os.rename(path, path + '.orig') # just move out of the way
except FileNotFoundError:
pass
os.mkdir(path)
# We need to try and convert docker-app style parameters to parameters
# that are compatible with docker-compose
params = normalize_keyvals(params)
compose_str = yaml.dump(compose)
for k, v in params.items():
# there are two things we have to replace:
# 1) Things with no defaults - ie ports: 8080:${PORT}
# We need to make this ${PORT-<default>} if we can
compose_str = compose_str.replace('${%s}' % k, '${%s-%s}' % (k, v))
# 2) Things that are nested - ie foo.bar=12
# We have to make this foo_bar so compose will accept it
if '.' in k:
safek = k.replace('.', '_')
status('Replacing parameter "%s" with "%s" for compatibility' % (
k, safek))
compose_str = compose_str.replace('${' + k, '${' + safek)
with open(os.path.join(path, 'docker-compose.yml'), 'w') as f:
f.write(compose_str)
status('Converted docker-compose for %s\n' % compose_str)
def convert_docker_apps():
"""Loop through all the .dockerapp files in a directory and convert them
to directory-based docker-compose.yml friendly representations.
"""
for p in os.listdir():
if not p.endswith('.dockerapp'):
continue
if os.path.isfile(p):
status('Converting .dockerapp file for: ' + p)
convert_docker_app(p)
|
StarcoderdataPython
|
5040007
|
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).parent.parent))
from config import ANALYSIS as cfg
import analysis.utils as utils
import portion as P
import pickle
import os
from analysis.transcript_parsing import parse
def seg_invalid(row):
"""
This functions specifies what makes a segment invalid
Input: row defining an audio segment with the following columns:
- ['meeting_id', 'part_id', 'chan', 'start', 'end', 'length', 'type', 'laugh_type']
"""
# If the length is shorter than min_length passed to detection algorithm, mark invalid
# - empirically tested -> this doesn't apply to many segments
return (
row["length"] < cfg["model"]["min_length"]
or row["laugh_type"] == "breath-laugh"
)
def append_to_index(index, row, meeting_id, part_id):
"""
Append the segment as time-interval (Portion) to the passed index
The segment is defined by the passed dataframe-row
"""
start = utils.to_frames(row["start"])
end = utils.to_frames(row["end"])
seg_as_interval = P.openclosed(start,end)
# Append to existing intervals or create new dict entry
if part_id in index[meeting_id].keys():
index[meeting_id][part_id] = index[meeting_id][part_id] | seg_as_interval
else:
index[meeting_id][part_id] = seg_as_interval
seg_len = utils.to_sec(utils.p_len(seg_as_interval))
index[meeting_id]["tot_len"] += seg_len
index[meeting_id]["tot_events"] += 1
return index
def create_laugh_index(df, invalid_index):
"""
Creates a laugh_index with all transcribed laughter events per particpant per meeting
Invalid index needs to be passed because invalid laughter segments will be added to it
The segments are stored as disjunction of closed intervals (using portion library)
dict structure:
{
meeting_id: {
tot_len: INT,
tot_events: INT,
part_id: P.openclosed(start,end) | P.openclosed(start,end),
part_id: P.openclosed(start,end)| P.openclosed(start,end)
}
...
}
"""
laugh_index = {}
meeting_groups = df.groupby(["meeting_id"])
for meeting_id, meeting_df in meeting_groups:
laugh_index[meeting_id] = {}
laugh_index[meeting_id]["tot_len"] = 0
laugh_index[meeting_id]["tot_events"] = 0
# Ensure rows are sorted by 'start'-time in ascending order
part_groups = meeting_df.sort_values("start").groupby(["part_id"])
for part_id, part_df in part_groups:
laugh_index[meeting_id][part_id] = P.empty()
for _, row in part_df.iterrows():
# If segment is invalid, append to invalid segments index
if seg_invalid(row):
invalid_index = append_to_index(
invalid_index, row, meeting_id, part_id
)
continue
# If segment is valid, append to laugh segments index
laugh_index = append_to_index(laugh_index, row, meeting_id, part_id)
return laugh_index
def create_index_from_df(df):
"""
Creates an index with all segments defined by the passed dataframe
The segments are stored as disjunction of closed intervals (using portion library) per participant per meeting
dict structure (same as laugh_index):
{
meeting_id: {
tot_len: INT,
tot_events: INT,
part_id: P.openclosed(start,end) | P.openclosed(start,end),
part_id: P.openclosed(start,end) | P.openclosed(start,end)
...
}
...
}
"""
index = {}
meeting_groups = df.groupby(["meeting_id"])
for meeting_id, meeting_df in meeting_groups:
index[meeting_id] = {}
index[meeting_id]["tot_len"] = 0
index[meeting_id]["tot_events"] = 0
# Ensure rows are sorted by 'start'-time in ascending order
part_groups = meeting_df.sort_values("start").groupby(["part_id"])
for part_id, part_df in part_groups:
for _, row in part_df.iterrows():
index = append_to_index(index, row, meeting_id, part_id)
return index
def get_seg_from_index(index, meeting_id, part_id):
"""
Return index segment for a specific participant of a specific meeting.
If meeting_id or part_id don't exist in index, return empty interval
"""
if meeting_id in index.keys():
return index[meeting_id].get(part_id, P.empty())
return P.empty()
def create_silence_index(laugh_index, invalid_index, noise_index, speech_index):
# TODO: Not used at the moment
"""
Index of those intervals that contain no transcriptions.
Take whole audio files for each participant for each meeting and subtract all
transcribed segments
dict_structure (same as laugh_index - without tot_events)
{
meeting_id: {
tot_len: INT,
part_id: P.openclosed(start,end) | P.openclosed(start,end),
part_id: P.openclosed(start,end) | P.openclosed(start,end)
}
...
}
"""
silence_index = {}
for _, row in parse.info_df.iterrows():
if row.meeting_id not in silence_index.keys():
silence_index[row.meeting_id] = {}
end_frame = utils.to_frames(row.length)
full_interval = P.openclosed(0, end_frame)
silence_seg = (
full_interval
- get_seg_from_index(laugh_index, row.meeting_id, row.part_id)
- get_seg_from_index(invalid_index, row.meeting_id, row.part_id)
- get_seg_from_index(speech_index, row.meeting_id, row.part_id)
- get_seg_from_index(noise_index, row.meeting_id, row.part_id)
)
silence_index[row.meeting_id][row.part_id] = silence_seg
silence_index[row.meeting_id]["tot_length"] = utils.to_sec(utils.p_len(silence_seg))
return silence_index
#############################################
# EXECUTED ON IMPORT
#############################################
'''
Load from disk if possible. o/w create indices from scratch
'''
cache_file = ".cache/preprocessed_indices.pkl"
force_recompute = cfg['force_index_recompute']
if not force_recompute and os.path.isfile(cache_file):
print('==========================\nLOADING INDICES FROM DISK\nTo recompute set `force_index_recompute=True` in config.py\n')
with open(cache_file, "rb") as f:
mega_index = pickle.load(f)
invalid_index = mega_index['invalid']
laugh_index = mega_index['laugh']
noise_index = mega_index['noise']
speech_index = mega_index['speech']
silence_index = mega_index['silence']
else:
print('Creating indices from transcripts...')
print('(this can take a while)')
# The following indices are dicts that contain segments of a particular type per participant per meeting
invalid_index = create_index_from_df(parse.invalid_df)
laugh_index = create_laugh_index(parse.laugh_only_df, invalid_index=invalid_index)
speech_index = create_index_from_df(parse.speech_df)
noise_index = create_index_from_df(parse.noise_df)
silence_index = create_silence_index(
laugh_index, invalid_index, noise_index, speech_index
)
mega_index = {
"invalid": invalid_index,
"laugh": laugh_index,
"speech": speech_index,
"noise": noise_index,
"silence": silence_index,
}
# Create .cache dir if it doesn't exist
Path(cache_file).parent.mkdir(parents=True, exist_ok=True)
with open(cache_file, "wb") as f:
pickle.dump(mega_index, f)
|
StarcoderdataPython
|
162667
|
from entityfx.string_manipulation_base import StringManipulationBase
class StringManipulation(StringManipulationBase):
def benchImplementation(self) -> str:
str0_ = "the quick brown fox jumps over the lazy dog"
str1 = ""
i = 0
while i < self._iterrations:
str1 = StringManipulationBase._doStringManipilation(str0_)
i += 1
return str1
|
StarcoderdataPython
|
203774
|
#!/usr/bin/env python3
#
# tmp_multi_cluster.py
#
# This source file is part of the FoundationDB open source project
#
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import subprocess
import sys
from pathlib import Path
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from tmp_cluster import TempCluster
if __name__ == "__main__":
parser = ArgumentParser(
formatter_class=RawDescriptionHelpFormatter,
description="""
This script automatically configures N temporary local clusters on the machine and then
calls a command while these clusters are running. As soon as the command returns, all
configured clusters are killed and all generated data is deleted.
The purpose of this is to support testing a set of integration tests using multiple clusters
(i.e. using the Multi-threaded client).
""",
)
parser.add_argument(
"--build-dir",
"-b",
metavar="BUILD_DIRECTORY",
help="FDB build director",
required=True,
)
parser.add_argument(
"--clusters",
"-c",
metavar="NUM_CLUSTERS",
type=int,
help="The number of clusters to run",
required=True,
)
parser.add_argument("cmd", metavar="COMMAND", nargs="+", help="The command to run")
args = parser.parse_args()
errcode = 1
# spawn all the clusters
base_dir = args.build_dir
num_clusters = args.clusters
build_dir = Path(base_dir)
bin_dir = build_dir.joinpath("bin")
clusters = []
for c in range(1, num_clusters + 1):
# now start the cluster up
local_c = TempCluster(args.build_dir, port="{}501".format(c))
local_c.__enter__()
clusters.append(local_c)
# all clusters should be running now, so run the subcommand
# TODO (bfines): pass through the proper ENV commands so that the client can find everything
cluster_paths = ";".join(
[str(cluster.etc.joinpath("fdb.cluster")) for cluster in clusters]
)
print(cluster_paths)
env = dict(**os.environ)
env["FDB_CLUSTERS"] = env.get("FDB_CLUSTERS", cluster_paths)
errcode = subprocess.run(
args.cmd, stdout=sys.stdout, stderr=sys.stderr, env=env
).returncode
# shutdown all the running clusters
for tc in clusters:
tc.close()
sys.exit(errcode)
|
StarcoderdataPython
|
3218172
|
<filename>universalmutator/java_handler.py
import os
import subprocess
import shutil
def handler(tmpMutantName, mutant, sourceFile, uniqueMutants):
backupName = sourceFile + ".um.backup." + str(os.getpid())
classFile = sourceFile.replace(".java", ".class")
classBackupName = classFile + ".um.backup" + str(os.getpid())
try:
shutil.copy(sourceFile, backupName)
if os.path.exists(classFile):
shutil.copy(classFile, classBackupName)
shutil.copy(tmpMutantName, sourceFile)
with open(".um.mutant_output" + os.getpid(), 'w') as file:
r = subprocess.call(["javac", sourceFile],
stdout=file, stderr=file)
finally:
shutil.copy(backupName, sourceFile)
os.remove(backupName)
if os.path.exists(classBackupName):
shutil.copy(classBackupName, classFile)
os.remove(classBackupName)
if r == 0:
return "VALID"
else:
return "INVALID"
|
StarcoderdataPython
|
1699493
|
<reponame>jamhocken/aoc-2021
import regex as re
import collections
def process_input(file_contents):
lines_stripped = [line.strip() for line in file_contents]
scanners = dict()
scanner_pattern = re.compile("(\d+)")
beacon_pattern = re.compile("(-?\d+),(-?\d+),(-?\d+)")
for line in lines_stripped:
if len(line)>0 and line[1] == "-":
scanner_match = re.search(scanner_pattern,line)
scanner = int(scanner_match.group(0))
beacons = list()
elif len(line)>0:
beacon_match = re.match(beacon_pattern,line)
beacon = [int(beacon_match.group(i)) for i in [1,2,3]]
beacons.append(beacon)
else:
scanners[scanner] = beacons
scanners[scanner] = beacons
return scanners
def find_distance(scanners):
distance_dict = dict()
for key,value in scanners.items():
distances = dict()
for i in range(len(value)):
for j in range(i+1,len(value)):
distances[(i,j)] = [abs(value[i][k]-value[j][k]) for k in [0,1,2]]
distance_dict[key] = distances
return distance_dict
def find_matches(mapped,unmapped,distances,scanners,scan_translation):
new_unmapped = unmapped.copy()
new_mapped = mapped.copy()
for key_unmapped in unmapped:
# find all distances between beacons that match for both scanners
for key_mapped in mapped:
matches = list()
beacon_unmapped = set()
beacon_mapped = set()
for pair_mapped,distance_mapped in distances[key_mapped].items():
for pair_unmapped,distance_unmapped in distances[key_unmapped].items():
if set(distance_mapped) == set(distance_unmapped):
matches.append([pair_mapped,pair_unmapped])
beacon_unmapped.update(pair_unmapped)
beacon_mapped.update(pair_mapped)
#find the matching pairs of beacons
beacon_mapping = dict()
for beacon in beacon_mapped:
other_beacon = set()
for match in matches:
if beacon in match[0]:
if not other_beacon:
other_beacon = set(match[1])
else:
other_beacon = other_beacon.intersection(match[1])
beacon_mapping[beacon] = other_beacon.pop()
# If there are at least 12 beacon matches
if len(beacon_mapping)>11:
#First we sort out the x,y,z coordinates so that both scanners have the same definition
flagx = sum([1 if distances[key_mapped][matches[j][0]][0] == distances[key_unmapped][matches[j][1]][0] else 0 for j in range(len(matches))])
flagy = sum([1 if distances[key_mapped][matches[j][0]][0] == distances[key_unmapped][matches[j][1]][1] else 0 for j in range(len(matches))])
flagz = sum([1 if distances[key_mapped][matches[j][0]][0] == distances[key_unmapped][matches[j][1]][2] else 0 for j in range(len(matches))])
if flagy >= flagx and flagy >= flagz:
distances[key_unmapped] = {key:[value[1],value[0],value[2]] for key,value in distances[key_unmapped].items()}
scanners[key_unmapped] = [[value[1],value[0],value[2]] for value in scanners[key_unmapped]]
elif flagz >= flagx and flagz >= flagy:
distances[key_unmapped] = {key:[value[2],value[1],value[0]] for key,value in distances[key_unmapped].items()}
scanners[key_unmapped] = [[value[2],value[1],value[0]] for value in scanners[key_unmapped]]
flagy = sum([1 if distances[key_mapped][match[0]][1] == distances[key_unmapped][match[1]][1] else 0 for match in matches])
flagz = sum([1 if distances[key_mapped][match[0]][1] == distances[key_unmapped][match[1]][2] else 0 for match in matches])
if flagz >= flagy:
distances[key_unmapped] = {key:[value[0],value[2],value[1]] for key,value in distances[key_unmapped].items()}
scanners[key_unmapped] = [[value[0],value[2],value[1]] for value in scanners[key_unmapped]]
# Then we see if any x,y,z is inverted
# First though we have to reorder our matching list to keep beacon pairs consistent
temp_matches = list()
for match in matches:
if match[1][0] != beacon_mapping[match[0][0]]:
temp_matches.append([match[0],(match[1][1],match[1][0])])
else:
temp_matches.append(match)
matches = temp_matches
invertx = sum([1 if scanners[key_mapped][match[0][0]][0] - scanners[key_mapped][match[0][1]][0] == scanners[key_unmapped][match[1][0]][0] - scanners[key_unmapped][match[1][1]][0] else -1 for match in matches])
inverty = sum([1 if scanners[key_mapped][match[0][0]][1] - scanners[key_mapped][match[0][1]][1] == scanners[key_unmapped][match[1][0]][1] - scanners[key_unmapped][match[1][1]][1] else -1 for match in matches])
invertz = sum([1 if scanners[key_mapped][match[0][0]][2] - scanners[key_mapped][match[0][1]][2] == scanners[key_unmapped][match[1][0]][2] - scanners[key_unmapped][match[1][1]][2] else -1 for match in matches])
if invertx < 0:
scanners[key_unmapped] = [[-1*value[0],value[1],value[2]] for value in scanners[key_unmapped]]
if inverty < 0:
scanners[key_unmapped] = [[value[0],-1*value[1],value[2]] for value in scanners[key_unmapped]]
if invertz < 0:
scanners[key_unmapped] = [[value[0],value[1],-1*value[2]] for value in scanners[key_unmapped]]
# And finally we shift the scanner to overlap with the other one
translation = [(scanners[key_mapped][match[0][0]][0] - scanners[key_unmapped][match[1][0]][0],scanners[key_mapped][match[0][0]][1] - scanners[key_unmapped][match[1][0]][1],scanners[key_mapped][match[0][0]][2] - scanners[key_unmapped][match[1][0]][2]) for match in matches]
translation = collections.Counter(translation).most_common(1)[0][0]
scanners[key_unmapped] = [[value[0]+translation[0],value[1]+translation[1],value[2]+translation[2]] for value in scanners[key_unmapped]]
if key_unmapped in new_unmapped:
new_unmapped.remove(key_unmapped)
new_mapped.add(key_unmapped)
scan_translation.append(translation)
return new_mapped, new_unmapped
def main():
with open("input.txt",'r') as beacon_file:
beacon_lines = beacon_file.readlines()
scanners = process_input(beacon_lines)
distances = find_distance(scanners)
keys = list(scanners.keys())
mapped = {keys[0]}
unmapped = set(keys[1:])
translation = list()
while unmapped:
(new_mapped, new_unmapped) = find_matches(mapped,unmapped,distances,scanners,translation)
mapped = new_mapped
unmapped = new_unmapped
# Star 1
beacons = set()
for key,value in scanners.items():
for beacon in value:
beacons.add(tuple(beacon))
print(len(beacons))
# Star 2
max_distance = 0
for i in range(len(translation)):
for j in range(i+1,len(translation)):
max_distance = max(max_distance,sum([abs(translation[i][k]-translation[j][k]) for k in [0,1,2]]))
print(max_distance)
main()
|
StarcoderdataPython
|
3231599
|
from flask import Flask
from config import Config
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
app = Flask(__name__)
app.config.from_object(Config)
bootstrap = Bootstrap(app)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
from app import routes, models
|
StarcoderdataPython
|
4934516
|
import requests
from bs4 import BeautifulSoup
filename = "keyword.txt"
headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36 Edg/91.0.864.64"}
def create_soup(url) :
res = requests.get(url, headers=headers)
res.raise_for_status()
soup = BeautifulSoup(res.text, "lxml")
return soup
def scrape_news_keyword(KEYWORD, PAGE):
with open(filename, "w", encoding="utf8") as file:
def scrape_1page(KEYWORD) :
url = "https://search.naver.com/search.naver?where=news&sm=tab_pge&query=" + KEYWORD + "&sort=0&photo=0&field=0&pd=0&ds=&de=&mynews=0&office_type=0&office_section_code=0&news_office_checked=&nso=so:r,p:all,a:all&start=1"
soup = create_soup(url)
news_list = soup.find("ul", attrs={"class":"list_news"}).find_all("li", attrs={"class":"bx"})
for index , news in enumerate(news_list) :
title = news.find("div", attrs={"class":"news_area"}).find("a", attrs={"class":"news_tit"})["title"]
link = news.find("div", attrs={"class":"news_area"}).find("a", attrs={"class":"news_tit"})["href"]
press = []
for press_pick in news.find("div", attrs={"class":"news_area"}).find("div", attrs={"class":"news_info"}).find("div", attrs={"class":"info_group"}).find("a", attrs={"class":"info press"}).stripped_strings :
press.append(press_pick)
file.write("{}. {}".format(index+1, title)+"\n"+" 링크 : {}".format(link) + "\n" + " 언론사 : {}".format(press[0]) + "\n")
if PAGE == 1 :
scrape_1page(KEYWORD)
elif PAGE > 1 :
scrape_1page(KEYWORD)
page_index = 10
for i in range(1, PAGE) :
url = "https://search.naver.com/search.naver?where=news&sm=tab_pge&query=" + KEYWORD + "&sort=0&photo=0&field=0&pd=0&ds=&de=&mynews=0&office_type=0&office_section_code=0&news_office_checked=&nso=so:r,p:all,a:all&start=" + str(i) + "1"
soup = create_soup(url)
news_list = soup.find("ul", attrs={"class":"list_news"}).find_all("li", attrs={"class":"bx"})
for news in news_list :
title = news.find("div", attrs={"class":"news_area"}).find("a", attrs={"class":"news_tit"})["title"]
link = news.find("div", attrs={"class":"news_area"}).find("a", attrs={"class":"news_tit"})["href"]
press = []
for press_pick in news.find("div", attrs={"class":"news_area"}).find("div", attrs={"class":"news_info"}).find("div", attrs={"class":"info_group"}).find("a", attrs={"class":"info press"}).stripped_strings :
press.append(press_pick)
page_index = page_index + 1
file.write("{}. {}".format(page_index, title)+"\n"+" 링크 : {}".format(link) + "\n" + " 언론사 : {}".format(press[0]) + "\n")
|
StarcoderdataPython
|
9771585
|
<filename>vroombaby/middleware.py
class MyMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
response['X-My-Header'] = "my value"
return response
|
StarcoderdataPython
|
4822055
|
<reponame>PasaLab/SparkDQ
class A:
def __str__(self):
return "hhhhh"
if __name__ == "__main__":
a = A()
i = 0
while i < 3 - 1:
print(i)
i += 1
|
StarcoderdataPython
|
5138635
|
import ngram
import random
import json
class MarkovBot(object):
def __init__(self, gramstores=None):
if gramstores is None:
self.gramstores = []
else:
self.gramstores = gramstores
def add_gramstore(self, gramstore):
self.gramstores.append(gramstore)
def _stitch(self, terms):
total = ' '.join(terms).split()
prev = ''
res = []
for idx in total:
if idx != prev:
res.append(idx)
prev = idx
return res
def generate_sentence(self):
rs = []
store = random.choice(self.gramstores)
gram = store.random_gram()
for rnd in range(0, 5):
rs.append(gram)
potential = store.gram_starts_with(gram)
if potential is None:
continue
chgram = random.choice(list(potential[1]))
if chgram is set:
chgram = list(chgram)[0]
rs.append(chgram)
if ' ' in chgram:
gram = chgram.split()[-1] # take the last gram for searching
else:
gram = chgram
store = random.choice(self.gramstores)
return self._stitch(rs)
if __name__ == "__main__":
bot = MarkovBot()
data = {}
g1, g2, g3 = ngram.NGram(size=1), ngram.NGram(size=2), ngram.NGram(size=3)
with open('dump', 'r') as f:
data = json.load(f)
for tweet in data['tweet']:
g1.index(tweet)
g2.index(tweet)
g3.index(tweet)
bot.add_gramstore(g1)
bot.add_gramstore(g2)
bot.add_gramstore(g3)
print bot.generate_sentence()
|
StarcoderdataPython
|
250452
|
<filename>AIMachine.py
from StateMachine import TRANSITIONS
from transitions.core import State
EVALUATING_POLICY = "evaluating_policy"
SEARCHING = "searching"
VALUATION = "valuation"
IDLE = "idle"
STATES = [
State(IDLE),
State(EVALUATING_POLICY),
State(SEARCHING),
State(VALUATION)
]
# Triggers
VALUE_DETERMINED = "value_determined"
POLICY_CHOICE = "policy_choice"
SEARCH_COMPLETE = "search_complete"
BOARD_DECISION = "board_decision"
EXIT_CONDITION_REACHED = "exit_condition_reached"
TRANSITIONS = [
{
"trigger": VALUE_DETERMINED,
"source": VALUATION,
"dest": EVALUATING_POLICY
},
{
"trigger": POLICY_CHOICE,
"source": EVALUATING_POLICY,
"dest": SEARCHING,
},
{
"trigger": SEARCH_COMPLETE,
"source": SEARCHING,
"dest": VALUATION
},
{
"trigger": EXIT_CONDITION_REACHED,
"source": "*", # Any
"dest": IDLE
}
]
|
StarcoderdataPython
|
11356468
|
<gh_stars>0
import socket, time
if __name__ == "__main__":
try:
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.bind(('', 50000))
print('listening')
serversocket.listen(5)
while 1:
(clientsocket, address) = serversocket.accept()
clientsocket.close()
except KeyboardInterrupt:
pass
|
StarcoderdataPython
|
4969523
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
from os import walk
from os import sep
from functools import partial
def fullpath(root, dirpath, fname):
if dirpath[len(dirpath) - 1] != sep:
dirpath += sep
if root[len(root) - 1] != sep:
root += sep
sub_dirpath = dirpath[dirpath.find(root) + len(root):]
return (sub_dirpath + fname, dirpath + fname)
def template_pages_map(root):
result = {}
for (dirpath, dirnames, filenames) in walk(root):
dirpathise = partial(fullpath, root, dirpath)
fullnames = map(dirpathise, filenames)
for (name, fullname) in fullnames:
result[fullname] = name
return result
AUTHOR = u'The Physiome Project'
SITENAME = u'The Physiome Journal'
SITEURL = ''
STATIC_PATHS = ['pdfs', 'models', 'archive-descriptions', 'data']
PATH = 'content'
TIMEZONE = 'GMT'
# Use filesystem date if not otherwise specified.
DEFAULT_DATE = "fs"
DEFAULT_LANG = u'en'
THEME = './themes/pj'
TEMPLATE_PAGES = template_pages_map(
PATH + sep + 'custompages') # Create a list of template pages to generate in content/custompages
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
CATEGORY_SAVE_AS = '{slug}/index.html'
CATEGORY_URL = '{slug}/index.html'
PAGE_SAVE_AS = '{slug}.html'
PAGE_URL = 'slug.html'
# Social widget
SOCIAL = (('You can add links in your config file', '#'),
('Another social link', '#'),)
DEFAULT_PAGINATION = 20
CATEGORY_SAVE_AS = '{slug}/index.html'
CATEGORY_URL = '{slug}/index.html'
ARTICLE_URL = '{category}/{slug}.html'
ARTICLE_SAVE_AS = '{category}/{slug}.html'
PAGE_SAVE_AS = '{slug}.html'
FILENAME_METADATA = '(?P<title>.*)'
# Uncomment following line if you want document-relative URLs when developing
# RELATIVE_URLS = True
# Google Analytics tracking ID
GOOGLE_ANALYTICS = "UA-200859312-1"
|
StarcoderdataPython
|
6630169
|
import torch
import torch.nn as nn
import math
import numpy as np
class MultivarMLP(nn.Module):
def __init__(self, input_dims, hidden_dims, output_dims, extra_dims, actfn, pre_layers=None):
"""
Module for stacking N neural networks in parallel for more efficient evaluation. In the context
of ENCO, we stack the neural networks of the conditional distributions for all N variables on top
of each other to parallelize it on a GPU.
Parameters
----------
input_dims : int
Input dimensionality for all networks (in ENCO, size of embedding)
hidden_dims : list[int]
Hidden dimensionalities to use in the hidden layer. Length of list determines
the number of hidden layers to use.
output_dims : int
Output dimensionality of all networks (in ENCO, max. number of categories)
extra_dims : list[int]
Number of neural networks to have in parallel (in ENCO, number of variables).
Can have multiple dimensions if needed.
actfn : function -> nn.Module
Activation function to use in between hidden layers
pre_layers : list[nn.Module] / nn.Module
Any modules that should be applied before the actual MLP. This can include
an embedding layer and/or masking operation.
"""
super().__init__()
self.extra_dims = extra_dims
layers = []
if pre_layers is not None:
if not isinstance(pre_layers, list):
layers += [pre_layers]
else:
layers += pre_layers
hidden_dims = [input_dims] + hidden_dims
for i in range(len(hidden_dims)-1):
if not isinstance(layers[-1], EmbedLayer): # After an embedding layer, we directly apply a non-linearity
layers += [MultivarLinear(input_dims=hidden_dims[i],
output_dims=hidden_dims[i+1],
extra_dims=extra_dims)]
layers += [actfn()]
layers += [MultivarLinear(input_dims=hidden_dims[-1],
output_dims=output_dims,
extra_dims=extra_dims)]
self.layers = nn.ModuleList(layers)
def forward(self, x, mask=None):
for l in self.layers:
if isinstance(l, (EmbedLayer, InputMask)):
x = l(x, mask=mask)
else:
x = l(x)
return x
@property
def device(self):
return next(iter(self.parameters())).device
class MultivarLinear(nn.Module):
def __init__(self, input_dims, output_dims, extra_dims):
"""
Linear layer with the same properties as MultivarMLP. It effectively applies N independent
linear layers in parallel.
Parameters
----------
input_dims : int
Number of input dimensions per network.
output_dims : int
Number of output dimensions per network.
extra_dims : list[int]
Number of networks to apply in parallel. Can have multiple dimensions if needed.
"""
super().__init__()
self.input_dims = input_dims
self.output_dims = output_dims
self.extra_dims = extra_dims
self.weight = nn.Parameter(torch.zeros(*extra_dims, output_dims, input_dims))
self.bias = nn.Parameter(torch.zeros(*extra_dims, output_dims))
nn.init.kaiming_uniform_(self.weight, nonlinearity='relu')
def forward(self, x):
# Shape preparation
x_extra_dims = x.shape[1:-1]
if len(x_extra_dims) > 0:
for i in range(len(x_extra_dims)):
assert x_extra_dims[-(i+1)] == self.extra_dims[-(i+1)], \
"Shape mismatch: X=%s, Layer=%s" % (str(x.shape), str(self.extra_dims))
for _ in range(len(self.extra_dims)-len(x_extra_dims)):
x = x.unsqueeze(dim=1)
# Unsqueeze
x = x.unsqueeze(dim=-1)
weight = self.weight.unsqueeze(dim=0)
bias = self.bias.unsqueeze(dim=0)
# Linear layer
out = torch.matmul(weight, x).squeeze(dim=-1)
out = out + bias
return out
def extra_repr(self):
# For printing
return 'input_dims={}, output_dims={}, extra_dims={}'.format(
self.input_dims, self.output_dims, str(self.extra_dims)
)
class InputMask(nn.Module):
def __init__(self, input_mask):
"""
Module for handling to mask the input. Needed to simulate different parent sets.
Parameters
----------
input_mask : torch.Tensor/None
If a tensor, it is assumed to be a fixed mask for all forward passes.
If None, it is required to pass the mask during every forward pass.
"""
super().__init__()
if isinstance(input_mask, torch.Tensor):
self.register_buffer('input_mask', input_mask.float(), persistent=False)
else:
self.input_mask = input_mask
def forward(self, x, mask=None, mask_val=0):
"""
Forward pass.
Parameters
----------
x : torch.Tensor
Input that should be masked.
mask : torch.FloatTensor/None
If self.input_mask is None, this tensor must be not none. Will be used
to mask the input. A value of 1.0 means that an element is not masked,
and 0.0 that it will be masked. Is broadcasted over dimensions with x.
mask_val : float
Value to set for masked elements.
"""
# Check if mask is passed or should be taken constant
if mask is None:
assert self.input_mask is not None, "No mask was given in InputMask module."
mask = self.input_mask
if len(mask.shape) > len(x.shape):
x = x.reshape(x.shape[:1] + (1,)*(len(mask.shape)-len(x.shape)) + x.shape[1:])
if len(x.shape) > len(mask.shape):
mask = mask.reshape((1,)*(len(x.shape)-len(mask.shape)) + mask.shape)
mask = mask.to(x.dtype)
if mask_val != 0.0:
x = x * mask + (1 - mask) * mask_val
else:
x = x * mask
return x
class EmbedLayer(nn.Module):
def __init__(self, num_vars, num_categs, hidden_dim, input_mask, sparse_embeds=False):
"""
Embedding layer to represent categorical inputs in continuous space. For efficiency, the embeddings
of different inputs are summed in this layer instead of stacked. This is equivalent to stacking the
embeddings and applying a linear layer, but is more efficient with slightly more parameter cost.
Masked inputs are represented by a zero embedding tensor.
Parameters
----------
num_vars : int
Number of variables that are input to each neural network.
num_categs : int
Max. number of categories that each variable can take.
hidden_dim : int
Output dimensionality of the embedding layer.
input_mask : InputMask
Input mask module to use for masking possible inputs.
sparse_embeds : bool
If True, we sparsify the embedding tensors before summing them together in the
forward pass. This is more memory efficient and can give a considerable speedup
for networks with many variables, but can be slightly slower for small networks.
It is recommended to set it to True for graphs with more than 50 variables.
"""
super().__init__()
self.num_vars = num_vars
self.hidden_dim = hidden_dim
self.input_mask = input_mask
self.sparse_embeds = sparse_embeds
self.num_categs = num_categs
# For each of the N networks, we have num_vars*num_categs possible embeddings to model.
# Sharing embeddings across all N networks can limit the expressiveness of the networks.
# Instead, we share them across 10-20 variables for large graphs to reduce memory.
self.num_embeds = self.num_vars*self.num_vars*self.num_categs
if self.num_embeds > 1e7:
self.num_embeds = int(math.ceil(self.num_embeds / 20.0))
self.shortend = True
elif self.num_embeds > 1e6:
for s in range(11, -1, -1):
if self.num_vars % s == 0:
self.num_embeds = self.num_embeds // s
break
self.shortend = True
else:
self.shortend = False
self.embedding = nn.Embedding(num_embeddings=self.num_embeds,
embedding_dim=hidden_dim)
self.embedding.weight.data.mul_(2./math.sqrt(self.num_vars))
self.bias = nn.Parameter(torch.zeros(num_vars, self.hidden_dim))
# Tensor for mapping each input to its corresponding embedding range in self.embedding
pos_trans = torch.arange(self.num_vars**2, dtype=torch.long) * self.num_categs
self.register_buffer("pos_trans", pos_trans, persistent=False)
def forward(self, x, mask):
# For very large x tensors during graph fitting, it is more efficient to split it
# into multiple sub-tensors before running the forward pass.
num_chunks = int(math.ceil(np.prod(mask.shape) / 256e5))
if self.training or num_chunks == 1:
return self.embed_tensor(x, mask)
else:
x = x.chunk(num_chunks, dim=0)
mask = mask.chunk(num_chunks, dim=0)
x_out = []
for x_l, mask_l in zip(x, mask):
out_l = self.embed_tensor(x_l, mask_l)
x_out.append(out_l)
x_out = torch.cat(x_out, dim=0)
return x_out
def embed_tensor(self, x, mask):
assert x.shape[-1] == self.num_vars
if len(x.shape) == 2: # Add variable dimension
x = x.unsqueeze(dim=1).expand(-1, self.num_vars, -1)
else:
assert x.shape[-2] == self.num_vars
# Number of variables
pos_trans = self.pos_trans.view((1,)*(len(x.shape)-2) + (self.num_vars, self.num_vars))
x = x + pos_trans
if self.sparse_embeds:
# Selects the non-zero embedding tensors and stores them in a separate tensor instead of masking.
# Lower memory consumption and faster for networks with many variables.
flattened_mask = mask.flatten(0, 1).long()
num_neighbours = flattened_mask.sum(dim=-1)
max_neighbours = num_neighbours.max()
x_sparse = torch.masked_select(x, mask == 1.0)
if self.shortend:
x_sparse = x_sparse % self.num_embeds
x_sparse = self.embedding(x_sparse)
x_sparse = torch.cat([x_sparse.new_zeros(x_sparse.shape[:-2]+(1,)+x_sparse.shape[-1:]), x_sparse], dim=-2)
idxs = flattened_mask.cumsum(dim=-1)
idxs[1:] += num_neighbours[:-1].cumsum(dim=-1)[..., None]
idxs = (idxs * flattened_mask).sort(dim=-1, descending=True)[0]
# Determine how many embeddings to sum per variable. Needed to construct the sparse tensor.
sort_neighbours, sort_indices = num_neighbours.sort(dim=0)
_, resort_indices = sort_indices.sort(dim=0)
pos = 1+torch.arange(num_neighbours.shape[0], device=num_neighbours.device, dtype=torch.long)
comp_cost = sort_neighbours * pos + max_neighbours * (num_neighbours.shape[0] - pos)
min_cost, argmin_cost = comp_cost.min(dim=0)
mid_neighbours = sort_neighbours[argmin_cost]
# More efficient: split tensor into two, one half with the variables with the least and the other
# with the most embeddings to sum. This prevents large computational costs if we have a few outliers.
idxs = idxs[sort_indices]
idxs = idxs[:, :max_neighbours]
if mid_neighbours > 0:
x_new_1 = x_sparse.index_select(index=idxs[:argmin_cost+1, :mid_neighbours].reshape(-1), dim=0)
x_1 = x_new_1.reshape(-1, mid_neighbours, x_sparse.shape[-1]).sum(dim=-2)
else:
x_1 = x_sparse.new_zeros(argmin_cost+1, x_sparse.shape[-1])
x_new_2 = x_sparse.index_select(index=idxs[argmin_cost+1:, :max_neighbours].reshape(-1), dim=0)
x_2 = x_new_2.reshape(-1, max_neighbours, x_sparse.shape[-1]).sum(dim=-2)
# Bring tensors back in order
x = torch.cat([x_1, x_2], dim=0)[resort_indices]
x = x.reshape(mask.shape[0], mask.shape[1], x.shape[-1])
else:
if self.shortend:
x = x % self.num_embeds
x = self.embedding(x)
x = self.input_mask(x, mask=mask[..., None], mask_val=0.0)
if len(x.shape) > 3:
x = x.sum(dim=-2)
bias = self.bias.view((1,)*(len(x.shape)-2) + self.bias.shape)
x = x + bias
return x
def get_activation_function(actfn):
"""
Returns an activation function based on a string description.
"""
if actfn is None or actfn == 'leakyrelu':
def create_actfn(): return nn.LeakyReLU(0.1, inplace=True)
elif actfn == 'gelu':
def create_actfn(): return nn.GELU()
elif actfn == 'relu':
def create_actfn(): return nn.ReLU()
elif actfn == 'swish' or actfn == 'silu':
def create_actfn(): return nn.SiLU()
else:
raise Exception('Unknown activation function ' + str(actfn))
return create_actfn
def create_model(num_vars, num_categs, hidden_dims, actfn=None):
"""
Method for creating a full multivariable MLP as used in ENCO.
"""
num_outputs = max(1, num_categs)
num_inputs = num_vars
actfn = get_activation_function(actfn)
mask = InputMask(None)
if num_categs > 0:
pre_layers = EmbedLayer(num_vars=num_vars,
num_categs=num_categs,
hidden_dim=hidden_dims[0],
input_mask=mask,
sparse_embeds=(num_vars >= 50))
num_inputs = pre_layers.hidden_dim
pre_layers = [pre_layers, actfn()]
else:
pre_layers = mask
mlps = MultivarMLP(input_dims=num_inputs,
hidden_dims=hidden_dims,
output_dims=num_outputs,
extra_dims=[num_vars],
actfn=actfn,
pre_layers=pre_layers)
return mlps
|
StarcoderdataPython
|
6481354
|
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RAffyilm(RPackage):
"""affyILM is a preprocessing tool which estimates gene
expression levels for Affymetrix Gene Chips. Input from
physical chemistry is employed to first background subtract
intensities before calculating concentrations on behalf
of the Langmuir model."""
homepage = "https://www.bioconductor.org/packages/affyILM/"
git = "https://git.bioconductor.org/packages/affyILM.git"
version('1.28.0', commit='<PASSWORD>3ebc599e0ea4a1d6fa8d5511ccf8bef7de')
depends_on('[email protected]:3.4.9', when='@1.28.0')
depends_on('r-gcrma', type=('build', 'run'))
depends_on('r-affxparser', type=('build', 'run'))
depends_on('r-affy', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
|
StarcoderdataPython
|
3332958
|
<gh_stars>0
# First step
import cv2
filename = '../../video_for_training/validation.txt'
with open(filename) as f:
content = f.readlines()
# you may also want to remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
array = []
listImage = []
# print(content)
for element in content:
check = element[-2:]
# print(check)
if str(check) == ' 0':
array.append(element)
if str(element[-4:]) == '.png':
listImage.append(element)
# print(listImage)
# print(array)
for i in range(len(listImage)):
path = '/home/duy/Documents/video_for_training/images/' + listImage[i][-16:]
pathSave = '/home/duy/Documents/mmdetection/2bbox/'
img = cv2.imread(path)
bbox = array[i].split(' ')
bbox[0] = round(float(bbox[0]))
bbox[1] = round(float(bbox[1]))
bbox[2] = round(float(bbox[2]))
bbox[3] = round(float(bbox[3]))
y1 = int(bbox[0]) + int(bbox[2])
y2 = int(bbox[1]) + int(bbox[3])
image = cv2.rectangle(img, (bbox[0], bbox[1]), (y1, y2), (0, 0, 255), thickness=5)
cv2.imwrite(pathSave + listImage[i][-16:], image)
i += 1
if i % 30 == 0:
print('-------------------->', i)
cv2.waitKey(0)
|
StarcoderdataPython
|
3531909
|
from functools import partial
import aiometer
from app import models, schemas
from app.factories.dns_record import DnsRecordFactory
from app.services.whois import Whois
class DomainFactory:
@staticmethod
async def from_hostname(hostname: str) -> schemas.Domain:
tasks = [
partial(Whois.lookup, hostname),
partial(DnsRecordFactory.from_hostname, hostname),
partial(models.Snapshot.find_by_hostname, hostname),
]
whois, records, snapshots = await aiometer.run_all(tasks)
return schemas.Domain(
hostname=hostname, whois=whois, dns_records=records, snapshots=snapshots
)
|
StarcoderdataPython
|
11283673
|
from .MCP3008 import MCP3008
|
StarcoderdataPython
|
368707
|
#!/bin/python
import sys
assert len(sys.argv) > 1
f = open(sys.argv[1], "r")
prefix = sys.argv[1]
rTimes = list()
#print(prefix + ",benchmark,solve mem,solve time,drat kb,drat sec,lrat kb,lrat sec,restarts,decisions,conflicts,propagations,mark proof sec,dump lrat sec, ana sec, anamem mb")
for l in f:
data = l.split(",")
rTimes.append((float(data[3]), float(data[6]), float(data[8])))
rTimes.sort(key=lambda tup: tup[0])
for i in range(0,len(rTimes)):
print(str(i+1) + "," + str(rTimes[i][0]) + "," + str(rTimes[i][1]) + "," + str(rTimes[i][2]))
|
StarcoderdataPython
|
6583252
|
<filename>Models/Resnet34V2.py
# from google.colab import drive
# drive.mount('/content/drive/')
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
import numpy as np
import pathlib
import os
import cv2 as cv
import pydotplus
from tensorflow.python.keras import layers
from tensorflow.keras import models
# from tensorflow.keras.utils import np_utils
from sklearn import metrics
from tensorflow.keras.utils import plot_model
from sklearn import metrics
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
num_class = 5
IMAGE_SIZE = 124
train_data = pathlib.Path('/content/drive/My Drive/MozSnake/Training/')
def resnt34( num_class):
stride =1;
Channel_axis=3;
def comom_bloxk(x):
x = layers.BatchNormalization(axis=Channel_axis)(x)
x = tf.nn.relu(x)
return x
def residual_block(x_in, filters):
x_tmp = x_in
x_tmp = comom_bloxk(x_tmp)
x_tmp = layers.Conv2D(filters, kernel_size=(3, 3), strides=stride, padding='same')(x_tmp)
x_tmp = comom_bloxk(x_tmp)
x_tmp = layers.Conv2D(filters, kernel_size=(3, 3), strides=stride, padding='same')(x_tmp)
x_shortcut = layers.Conv2D(filters, kernel_size=(2, 2), strides=stride, padding='same')(x_in)
x = layers.add([x_tmp, x_shortcut])
x = comom_bloxk(x)
return x
# Conv 1
img_input = layers.Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3))
x = layers.Conv2D(64, kernel_size=(7, 7), strides=(2, 2), padding='same')(img_input)
x = comom_bloxk(x)
x = layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding='same')(x)
# Conv2
for i in range(3):
x = residual_block(x, 64)
# Conv2
for i in range(4):
x = residual_block(x, 128)
# Conv3
for i in range(6):
x = residual_block(x, 256)
# Conv4
for i in range(3):
x = residual_block(x, 512)
x = layers.GlobalAveragePooling2D()(x)
# x = layers.Dropout(0.25)(x)
x= layers.Flatten()(x)
output = layers.Dense(num_class, activation=tf.nn.softmax)(x)
model = models.Model(inputs=img_input, outputs=output, name='ResNet34')
return model
model = resnt34(num_class)
print(model.summary())
# plot_model(model, show_shapes=True, to_file='resnet34.png')
# def load_data(data_directory):
#
# directories = [d for d in os.listdir(data_directory)
# if os.path.isdir(os.path.join(data_directory, d))]
# labels = []
# images = []
# for d in directories:
# label_directory = os.path.join(data_directory, d)
# file_names = [os.path.join(label_directory, f)
# for f in os.listdir(label_directory)]
# for f in file_names:
# img = cv.imread(f)
# img = cv.resize(img,(IMAGE_SIZE,IMAGE_SIZE))
# # cv.imwrite(f,img)
# images.append(img)
# labels.append(int(d))
# return images, labels
#
# imagens , labels = load_data(train_data)
#
#
#
#
# kf = StratifiedKFold(n_splits=10)
#
# for train, test in kf.split(imagens, labels):
# imgs_train = []
# labs_train = []
# imgs_test = []
# labs_test = []
# # print("Train::", train, "Test::", test)
#
# for i in train:
# imgs_train.append(imagens[i])
# labs_train.append(labels[i])
#
# for i in test:
# imgs_test.append(imagens[i])
# labs_test.append(labels[i])
#
#
# imgs_train = np.asarray(imgs_train)
# imgs_test = np.asarray(imgs_test)
#
# # imgs_train = imgs_train.astype('float32')/np.float32(255)
# # imgs_test = imgs_test.astype('float32')/np.float32(255)
#
#
# labs_train, labs_test = np.array(labs_train), np.array(labs_test)
# # labs_train = labs_train.astype(np.int32)
# # labs_test = labs_test.astype(np.int32)
#
# labs_train = tf.keras.utils.to_categorical(labs_train, num_class)
#
# print(imgs_train.shape)
# print(labs_train.shape)
#
# model = resnt34(num_class)
# model.compile(loss=tf.keras.losses.categorical_crossentropy,
# optimizer=tf.keras.optimizers.Adam(),
# metrics=['accuracy'])
#
# model.fit(imgs_train, labs_train, verbose=2, epochs=70)
# # model.load_weights('my_modelRN.h5')
#
# x_predict = model.predict(imgs_test)
# x_predict = np.argmax(x_predict, axis=1)
#
# accuracy = metrics.accuracy_score(labs_test, x_predict)
# f1 = metrics.f1_score(labs_test, x_predict, average="micro")
# precision = metrics.precision_score(labs_test, x_predict, average="micro")
# recall = metrics.recall_score(labs_test, x_predict, average="micro")
#
# print('Acurracy: %f' % accuracy)
# print('F1: %f' % f1)
# print('Precision: %f' % precision)
# print('Recall: %f' % recall)
|
StarcoderdataPython
|
11263258
|
<filename>examples/003_pandapower_modelexchange/pandapower/7EFC7D_pandapower.py
from pymodelica import compile_fmu
fmu_name = compile_fmu("pandapower", "pandapower.mo",compiler_log_level="d",
version="2.0", target="me",
compiler_options={'extra_lib_dirs':["C:\\Users\\DRRC\\Desktop\\Joscha\\SimulatorToFMU\\simulatortofmu\\parser\\libraries\\modelica"]})
|
StarcoderdataPython
|
5148079
|
# coding=utf-8
# Copyright 2021 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Transformer models: encoder, decoder, language model, and encoder-decoder.
The "Transformer" name and network architecture were introduced in the paper
[Attention Is All You Need](https://arxiv.org/abs/1706.03762).
"""
from trax import layers as tl
# Defaults used across Transformer variants.
MODE = 'train'
D_MODEL = 512
D_FF = 2048
N_LAYERS = 6
N_HEADS = 8
MAX_SEQUENCE_LENGTH = 2048
DROPOUT_RATE = .1
DROPOUT_SHARED_AXES = None
FF_ACTIVATION_TYPE = tl.Relu
def TransformerEncoder(vocab_size,
n_classes=10,
d_model=D_MODEL,
d_ff=D_FF,
n_layers=N_LAYERS,
n_heads=N_HEADS,
max_len=MAX_SEQUENCE_LENGTH,
dropout=DROPOUT_RATE,
dropout_shared_axes=DROPOUT_SHARED_AXES,
mode=MODE,
ff_activation=FF_ACTIVATION_TYPE):
"""Returns a Transformer encoder suitable for N-way classification.
This model maps tokenized text to N-way (``n_classes``) activations:
- input: Array representing a batch of text strings via token IDs plus
padding markers; shape is (batch_size, sequence_length), where
sequence_length <= ``max_len``. Array elements are integers in
``range(vocab_size)``, and 0 values mark padding positions.
- output: Array representing a batch of raw (non-normalized) activations
over ``n_classes`` categories; shape is (batch_size, ``n_classes``).
Args:
vocab_size: Input vocabulary size -- each element of the input array
should be an integer in ``range(vocab_size)``. These integers typically
represent token IDs from a vocabulary-based tokenizer.
n_classes: Last/innermost dimension of output arrays, suitable for N-way
classification.
d_model: Last/innermost dimension of activation arrays at most points in
the model, including the initial embedding output.
d_ff: Last/innermost dimension of special (typically wider)
:py:class:`Dense` layer in the feedforward part of each encoder block.
n_layers: Number of encoder blocks. Each block includes attention, dropout,
residual, layer-norm, feedforward (:py:class:`Dense`), and activation
layers.
n_heads: Number of attention heads.
max_len: Maximum symbol length for positional encoding.
dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout within encoder blocks. The same rate is also
used for attention dropout in encoder blocks.
dropout_shared_axes: Tensor axes on which to share a dropout mask.
Sharing along batch and sequence axes (``dropout_shared_axes=(0,1)``)
is a useful way to save memory and apply consistent masks to activation
vectors at different sequence positions.
mode: If ``'train'``, each encoder block will include dropout; else, it
will pass all values through unaltered.
ff_activation: Type of activation function at the end of each encoder
block; must be an activation-type subclass of :py:class:`Layer`.
Returns:
A Transformer model that maps strings (conveyed by token IDs) to
raw (non-normalized) activations over a range of output classes.
"""
def _Dropout():
return tl.Dropout(rate=dropout, shared_axes=dropout_shared_axes, mode=mode)
def _EncBlock():
return _EncoderBlock(d_model, d_ff, n_heads, dropout, dropout_shared_axes,
mode, ff_activation)
return tl.Serial(
tl.Branch([], tl.PaddingMask()), # Creates masks from copy of the tokens.
tl.Embedding(vocab_size, d_model),
_Dropout(),
tl.PositionalEncoding(max_len=max_len),
[_EncBlock() for _ in range(n_layers)],
tl.Select([0], n_in=2), # Drops the masks.
tl.LayerNorm(),
tl.Mean(axis=1),
tl.Dense(n_classes),
)
def TransformerDecoder(vocab_size=None,
d_model=D_MODEL,
d_ff=D_FF,
n_layers=N_LAYERS,
n_heads=N_HEADS,
max_len=MAX_SEQUENCE_LENGTH,
dropout=DROPOUT_RATE,
dropout_shared_axes=DROPOUT_SHARED_AXES,
mode=MODE,
ff_activation=FF_ACTIVATION_TYPE):
"""Returns a Transformer decoder.
This model maps sequential inputs to sequential outputs:
- input if ``vocab_size`` is specified: array representing a batch
of text strings via token IDs plus padding markers; shape is
(batch_size, sequence_length). The tensor elements are integers in
``range(vocab_size)``, and 0 values mark padding positions.
- input if ``vocab_size`` is ``None``: 3-D array representing a batch of
sequences of activation vectors; shape is (batch_size, sequence_length,
``d_model``).
- output: 3-D array with shape (batch_size, sequence_length, ``d_model``).
The model uses causal attention and does *not* shift the input to the right.
Thus, the output for position `t` is based on inputs up to and including
position `t`.
Args:
vocab_size: If specified, gives the input vocabulary size -- each element
of the input tensor should be an integer in ``range(vocab_size)``.
If ``None``, indicates that the model expects as input sequences of
floating point vectors, each with ``d_model`` components.
d_model: Last/innermost dimension of activation arrays at most points in
the model, including the initial embedding output.
d_ff: Last/innermost dimension of special (typically wider)
:py:class:`Dense` layer in the feedforward part of each encoder block.
n_layers: Number of decoder blocks. Each block includes attention, dropout,
residual, layer-norm, feedforward (:py:class:`Dense`), and activation
layers.
n_heads: Number of attention heads.
max_len: Maximum symbol length for positional encoding.
dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout within decoder blocks. The same rate is also
used for attention dropout in decoder blocks.
dropout_shared_axes: Tensor axes on which to share a dropout mask.
Sharing along batch and sequence axes (``dropout_shared_axes=(0,1)``)
is a useful way to save memory and apply consistent masks to activation
vectors at different sequence positions.
mode: If ``'train'``, each encoder block will include dropout; else, it
will pass all values through unaltered.
ff_activation: Type of activation function at the end of each encoder
block; must be an activation-type subclass of :py:class:`Layer`.
Returns:
If ``vocab_size`` is defined: a Transformer model that maps strings
(conveyed by token IDs) to sequences of activation vectors.
If ``vocab_size`` is ``None``: a Transformer model that maps sequences of
activation vectors to sequences of activation vectors.
"""
def _EmbeddingOrDense():
return (tl.Embedding(vocab_size, d_model) if vocab_size is not None
else tl.Dense(d_model))
def _Dropout():
return tl.Dropout(rate=dropout, shared_axes=dropout_shared_axes, mode=mode)
def _DecBlock():
return _DecoderBlock(d_model, d_ff, n_heads, dropout, dropout_shared_axes,
mode, ff_activation)
return tl.Serial(
_EmbeddingOrDense(),
_Dropout(),
tl.PositionalEncoding(max_len=max_len),
[_DecBlock() for _ in range(n_layers)],
tl.LayerNorm(),
)
def TransformerLM(vocab_size,
d_model=D_MODEL,
d_ff=D_FF,
n_layers=N_LAYERS,
n_heads=N_HEADS,
max_len=MAX_SEQUENCE_LENGTH,
dropout=DROPOUT_RATE,
dropout_shared_axes=DROPOUT_SHARED_AXES,
mode=MODE,
ff_activation=FF_ACTIVATION_TYPE):
"""Returns a Transformer language model.
This model performs autoregressive language modeling:
- input: Array representing a batch of text strings via token IDs
plus padding markers; shape is (batch_size, sequence_length). Array
elements are integers in ``range(vocab_size)``, and 0 values mark padding
positions.
- output: 3-D array of raw activations with last/innermost dimension of
``vocab_size``, suitable for decoding into a batch of token strings;
shape is (batch_size, sequence_length, ``vocab_size``).
This model uses only the decoder part of the overall Transformer.
Args:
vocab_size: Input vocabulary size -- each element of the input array
should be an integer in ``range(vocab_size)``. These integers typically
represent token IDs from a vocabulary-based tokenizer.
d_model: Last/innermost dimension of activation arrays at most points in
the model, including the initial embedding output.
d_ff: Last/innermost dimension of special (typically wider)
:py:class:`Dense` layer in the feedforward part of each encoder block.
n_layers: Number of decoder blocks. Each block includes attention, dropout,
residual, layer-norm, feedforward (:py:class:`Dense`), and activation
layers.
n_heads: Number of attention heads.
max_len: Maximum symbol length for positional encoding.
dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout within decoder blocks. The same rate is also
used for attention dropout in decoder blocks.
dropout_shared_axes: Tensor axes on which to share a dropout mask.
Sharing along batch and sequence axes (``dropout_shared_axes=(0,1)``)
is a useful way to save memory and apply consistent masks to activation
vectors at different sequence positions.
mode: If ``'predict'``, use fast inference. If ``'train'``, each decoder
block will include dropout; else, it will pass all values through
unaltered.
ff_activation: Type of activation function at the end of each encoder
block; must be an activation-type subclass of :py:class:`Layer`.
Returns:
A Transformer language model that maps strings (represented as token ID
sequences) to sequences of raw (non-normalized) activation vectors; each
vector in the sequence can be mapped (e.g., by `argmax`) to a token ID.
"""
def _Dropout():
return tl.Dropout(rate=dropout, shared_axes=dropout_shared_axes, mode=mode)
def _DecBlock():
return _DecoderBlock(d_model, d_ff, n_heads, dropout, dropout_shared_axes,
mode, ff_activation)
return tl.Serial(
tl.ShiftRight(mode=mode),
tl.Embedding(vocab_size, d_model),
_Dropout(),
tl.PositionalEncoding(max_len=max_len, mode=mode),
[_DecBlock() for _ in range(n_layers)],
tl.LayerNorm(),
tl.Dense(vocab_size),
)
def Transformer(input_vocab_size,
output_vocab_size=None,
d_model=D_MODEL,
d_ff=D_FF,
n_encoder_layers=N_LAYERS,
n_decoder_layers=N_LAYERS,
n_heads=N_HEADS,
max_len=MAX_SEQUENCE_LENGTH,
dropout=DROPOUT_RATE,
dropout_shared_axes=DROPOUT_SHARED_AXES,
mode=MODE,
ff_activation=FF_ACTIVATION_TYPE):
"""Returns a full Transformer model.
This model is an encoder-decoder that performs tokenized string-to-string
("source"-to-"target") transduction:
- inputs (2):
- source: Array representing a batch of text strings via token
IDs plus padding markers; shape is (batch_size, sequence_length),
where sequence_length <= ``max_len``. Array elements are integers in
``range(input_vocab_size)``, and 0 values mark padding positions.
- target: Array representing a batch of text strings via token
IDs plus padding markers; shape is (batch_size, sequence_length),
where sequence_length <= ``max_len``. Array elements are integers in
``range(output_vocab_size)``, and 0 values mark padding positions.
- output: 3-D array of raw activations with last/innermost dimension of
``output_vocab_size``, suitable for decoding into a batch of token
strings; shape is (batch_size, sequence_length, ``vocab_size``).
An example use would be to translate (tokenized) sentences from English to
German.
Args:
input_vocab_size: Input vocabulary size -- each element of the input tensor
should be an integer in ``range(vocab_size)``. These integers typically
represent token IDs from a vocabulary-based tokenizer.
output_vocab_size: If specified, gives the vocabulary size for the targets;
if ``None``, then input and target integers (token IDs) are assumed to
come from the same vocabulary.
d_model: Last/innermost dimension of activation arrays at most points in
the model, including the initial embedding output.
d_ff: Last/innermost dimension of special (typically wider)
:py:class:`Dense` layer in the feedforward part of each encoder block.
n_encoder_layers: Number of encoder blocks.
n_decoder_layers: Number of decoder blocks.
n_heads: Number of attention heads.
max_len: Maximum symbol length for positional encoding.
dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout within encoder/decoder blocks. The same rate is
also used for attention dropout in encoder/decoder blocks.
dropout_shared_axes: Tensor axes on which to share a dropout mask.
Sharing along batch and sequence axes (``dropout_shared_axes=(0,1)``)
is a useful way to save memory and apply consistent masks to activation
vectors at different sequence positions.
mode: If ``'predict'``, use fast inference. If ``'train'``, each
encoder/decoder block will include dropout; else, it will pass all
values through unaltered.
ff_activation: Type of activation function at the end of each
encoder/decoder block; must be an activation-type subclass of
:py:class:`Layer`.
Returns:
A Transformer model as a layer that maps from a source-target tokenized
text pair to activations over a vocab set.
"""
# Avoid 'predict' mode in encoder, since encoder doesn't run stepwise.
encoder_mode = 'eval' if mode == 'predict' else mode
# Share embedding weights if no separate output vocab size.
in_embedder = tl.Embedding(input_vocab_size, d_model)
if output_vocab_size is None:
out_embedder = in_embedder
output_vocab_size = input_vocab_size
else:
out_embedder = tl.Embedding(output_vocab_size, d_model)
def _Dropout():
return tl.Dropout(rate=dropout, shared_axes=dropout_shared_axes, mode=mode)
def _EncBlock():
return _EncoderBlock(d_model, d_ff, n_heads, dropout, dropout_shared_axes,
mode, ff_activation)
def _Encoder():
encoder = tl.Serial(
in_embedder,
_Dropout(),
tl.PositionalEncoding(max_len=max_len, mode=encoder_mode),
[_EncBlock() for _ in range(n_encoder_layers)],
tl.LayerNorm(),
)
return tl.Cache(encoder) if mode == 'predict' else encoder
def _EncDecBlock():
return _EncoderDecoderBlock(d_model, d_ff, n_heads, dropout,
dropout_shared_axes, mode, ff_activation)
# Input to model is encoder-side tokens and decoder-side tokens: tok_d, tok_e
# Model output is decoder-side vectors and decoder-side tokens: vec_d tok_d
return tl.Serial(
tl.Select([0, 1, 1]), # Copies decoder tokens for use in loss.
# Encode.
tl.Branch([], tl.PaddingMask()), # tok_e masks tok_d tok_d
_Encoder(),
# Decode.
tl.Select([2, 1, 0]), # Re-orders inputs: tok_d masks vec_e .....
tl.ShiftRight(mode=mode),
out_embedder,
_Dropout(),
tl.PositionalEncoding(max_len=max_len, mode=mode),
tl.Branch([], tl.EncoderDecoderMask()), # vec_d masks ..... .....
[_EncDecBlock() for _ in range(n_decoder_layers)],
tl.LayerNorm(),
tl.Select([0], n_in=3), # Drops masks and encoding vectors.
# Map vectors to match output vocab size.
tl.Dense(output_vocab_size),
)
def _EncoderBlock(d_model,
d_ff,
n_heads,
dropout,
dropout_shared_axes,
mode,
ff_activation):
"""Returns a list of layers that implements a Transformer encoder block.
The input to the block is a pair (activations, mask) where the mask was
created from the original source tokens to prevent attending to the padding
part of the input. The block's outputs are the same type/shape as its inputs,
so that multiple blocks can be chained together.
Args:
d_model: Last/innermost dimension of activation arrays at most points in
the model, including the initial embedding output.
d_ff: Last/innermost dimension of special (typically wider)
:py:class:`Dense` layer in the feedforward part of each block.
n_heads: Number of attention heads.
dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout within encoder blocks. The same rate is also used
for attention dropout in encoder blocks.
dropout_shared_axes: Tensor axes on which to share a dropout mask.
Sharing along batch and sequence axes (``dropout_shared_axes=(0,1)``)
is a useful way to save memory and apply consistent masks to activation
vectors at different sequence positions.
mode: If ``'train'``, each block will include dropout; else, it will
pass all values through unaltered.
ff_activation: Type of activation function at the end of each block; must
be an activation-type subclass of :py:class:`Layer`.
Returns:
A list of layers that act in series as a (repeatable) encoder block.
"""
def _Attention():
return tl.Attention(d_model, n_heads=n_heads, dropout=dropout, mode=mode)
def _Dropout():
return tl.Dropout(rate=dropout, shared_axes=dropout_shared_axes, mode=mode)
def _FFBlock():
return _FeedForwardBlock(d_model, d_ff, dropout, dropout_shared_axes, mode,
ff_activation)
return [
tl.Residual(
tl.LayerNorm(),
_Attention(),
_Dropout(),
),
tl.Residual(
tl.LayerNorm(),
_FFBlock(),
_Dropout(),
),
]
def _DecoderBlock(d_model,
d_ff,
n_heads,
dropout,
dropout_shared_axes,
mode,
ff_activation):
"""Returns a list of layers that implements a Transformer decoder block.
The input to the block is a pair (activations, mask) where the mask encodes
causal connections, preventing attention to future positions in the sequence.
The block's outputs are the same type/shape as its inputs, so that multiple
blocks can be chained together.
Args:
d_model: Last/innermost dimension of activation arrays at most points in
the model, including the initial embedding output.
d_ff: Last/innermost dimension of special (typically wider)
:py:class:`Dense` layer in the feedforward part of each block.
n_heads: Number of attention heads.
dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout within decoder blocks. The same rate is also used
for attention dropout in decoder blocks.
dropout_shared_axes: Tensor axes on which to share a dropout mask.
Sharing along batch and sequence axes (``dropout_shared_axes=(0,1)``)
is a useful way to save memory and apply consistent masks to activation
vectors at different sequence positions.
mode: If ``'train'``, each block will include dropout; else, it will
pass all values through unaltered.
ff_activation: Type of activation function at the end of each block; must
be an activation-type subclass of :py:class:`Layer`.
Returns:
A list of layers that act in series as a (repeatable) decoder block.
"""
def _CausalAttention():
return tl.CausalAttention(d_model, n_heads=n_heads, dropout=dropout,
mode=mode),
def _FFBlock():
return _FeedForwardBlock(d_model, d_ff, dropout, dropout_shared_axes, mode,
ff_activation)
def _Dropout():
return tl.Dropout(rate=dropout, shared_axes=dropout_shared_axes, mode=mode)
return [
tl.Residual(
tl.LayerNorm(),
_CausalAttention(),
_Dropout(),
),
tl.Residual(
tl.LayerNorm(),
_FFBlock(),
_Dropout(),
),
]
def _EncoderDecoderBlock(d_model,
d_ff,
n_heads,
dropout,
dropout_shared_axes,
mode,
ff_activation):
"""Returns a list of layers implementing a Transformer encoder-decoder block.
The block input is a triple (decoder_activations, mask, encoder_activations)
where the mask was created from the original input token IDs to prevent
attending to padding positions for that input.
Args:
d_model: Last/innermost dimension of activation arrays at most points in
the model, including the initial embedding output.
d_ff: Last/innermost dimension of special (typically wider)
:py:class:`Dense` layer in the feedforward part of each block.
n_heads: Number of attention heads.
dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout within encoder/decoder blocks. The same rate is
also used for attention dropout in encoder/decoder blocks.
dropout_shared_axes: Tensor axes on which to share a dropout mask.
Sharing along batch and sequence axes (``dropout_shared_axes=(0,1)``)
is a useful way to save memory and apply consistent masks to activation
vectors at different sequence positions.
mode: If ``'train'``, each block will include dropout; else, it will
pass all values through unaltered.
ff_activation: Type of activation function at the end of each block; must
be an activation-type subclass of :py:class:`Layer`.
Returns:
A list of layers that act in series as a (repeatable) encoder-decoder
block.
"""
def _Dropout():
return tl.Dropout(rate=dropout, shared_axes=dropout_shared_axes, mode=mode)
def _AttentionQKV():
return tl.AttentionQKV(d_model, n_heads=n_heads, dropout=dropout,
mode=mode, cache_KV_in_predict=True)
def _CausalAttention():
return tl.CausalAttention(d_model, n_heads=n_heads, mode=mode)
def _FFBlock():
return _FeedForwardBlock(d_model, d_ff, dropout, dropout_shared_axes, mode,
ff_activation)
return [ # vec_d masks vec_e
tl.Residual(
tl.LayerNorm(),
_CausalAttention(),
_Dropout(),
),
tl.Residual(
tl.LayerNorm(),
tl.Select([0, 2, 2, 1, 2]), # vec_d vec_e vec_e masks vec_e
_AttentionQKV(), # vec_d masks vec_e
_Dropout(),
),
tl.Residual(
tl.LayerNorm(),
_FFBlock(),
_Dropout(),
),
]
def _FeedForwardBlock(d_model,
d_ff,
dropout,
dropout_shared_axes,
mode,
activation):
"""Returns a list of layers that implements a feedforward block.
Args:
d_model: Last/innermost dimension of activation arrays at most points in
the model, including the initial embedding output.
d_ff: Last/innermost dimension of special (typically wider)
:py:class:`Dense` layer in the feedforward part of each block.
dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout within a block.
dropout_shared_axes: Tensor axes on which to share a dropout mask.
Sharing along batch and sequence axes (``dropout_shared_axes=(0,1)``)
is a useful way to save memory and apply consistent masks to activation
vectors at different sequence positions.
mode: If ``'train'``, each block will include dropout; else, it will
pass all values through unaltered.
activation: Type of activation function at the end of each block; must
be an activation-type subclass of :py:class:`Layer`.
Returns:
A list of layers that maps vectors to vectors.
"""
def _Dropout():
return tl.Dropout(rate=dropout, shared_axes=dropout_shared_axes, mode=mode)
return [
tl.Dense(d_ff),
activation(),
_Dropout(),
tl.Dense(d_model),
]
|
StarcoderdataPython
|
6583112
|
#/***********************************************************************
# * Licensed Materials - Property of IBM
# *
# * IBM SPSS Products: Statistics Common
# *
# * (C) Copyright IBM Corp. 1989, 2020
# *
# * US Government Users Restricted Rights - Use, duplication or disclosure
# * restricted by GSA ADP Schedule Contract with IBM Corp.
# ************************************************************************/
# Construct a dataset listing the variables and selected properties for a collection of data files
# 05-23-2008 Original version - JKP
# 04-29-2009 Add file handle support
# 11-16-2009 Protect against UP converting escape sequences with "\" characters
#12-16-2009 Enable translation
__version__ = "1.2.1"
__author__ = "JKP, SPSS"
#try:
#import wingdbstub
#except:
#pass
import spss, os, re, locale
import spssaux
from extension import Template, Syntax
try:
from extension import processcmd
except:
print("""This command requires a newer version of extension.py. Please download it from
SPSS Developer Central and replace the existing file""")
raise
class DataStep(object):
def __enter__(self):
"""initialization for with statement"""
try:
spss.StartDataStep()
except:
spss.Submit("EXECUTE")
spss.StartDataStep()
return self
def __exit__(self, type, value, tb):
spss.EndDataStep()
return False
# The following block of code is for using the gather function as an Extension command.
def Run(args):
"""Execute the GATHERMD command"""
###print args #debug
args = args[list(args.keys())[0]]
helptext=r"""GATHERMD
Create and activate a dataset whose cases are variable names and labels
and, optionally, selected attributes from one or more data files.
GATHERMD list-of-specifications
[/OPTIONS [FILETYPES=*spss sas stata]
[DSNAME=name]
[FILENAMEPATTERN="pattern expression"]]
[ATTRLENGTH=value]
[/ATTRIBUTES list-of-attribute-names]
[HELP].
list-of-specifications is a list of one or more filenames, optionally with paths, and/or directories.
For directories, all appropriate files in the directory and its subdirectories are searched. With version 18
or later, the file specifications can include PASW Statistics file handles.
FILETYPES defaults to SPSS files (.sav and .por).
sas files are .sas7bdat, .sd7, .sd2, .ssd01, and .xpt
stata files are .dta
Files with any of the specified types found in the directories specified are searched. Since
these files are opened in SPSS, if the same file is already open in SPSS, it will be reopened
without saving any changes that may have been made.
DSNAME optionally specifies a dataset name to be assigned to the output dataset.
FILENAMEPATTERN can be specified as a quoted literal containing a regular expression pattern
to be used as a filter on filenames. For example, FILENAMEPATTERN="car" would limit the
files searched to those whose name starts with "car". FILENAMEPATTERN=".*car" would accept
any filenames containing "car". These are not the same as filename wildcards found in many operating systems.
For example, "abc*" will match any name starting with ab: it means literally ab followed by zero or more c's.
The regular expression is not case sensitive, and it is applied to the name of the
file without the extension. For a full explanation of regular expressions, one good source is
http://www.amk.ca/python/howto/regex/
/ATTRIBUTES list-of-names
specifies a list of custom variable attributes to be included in the output dataset. The variable
names will be the attribute names except if they conflict with the built-in variables source,
VariableName, and VariableLabel. If the attribute is not present, the value will be blank.
If the attribute is an array, only the first value is included.
Attribute variables in the output dataset are truncated to the length specified in ATTRLENGTH,
which defaults to 256
/HELP displays this text and does nothing else.
Examples:
GATHERMD "c:/spss17/samples".
gathermd "c:/temp/firstlevel" "c:/spss16/samples/voter.sav" /options filetypes=spss sas
dsname=gathered.
"""
# define the command syntax and enable translation
oobj = Syntax([
Template("", subc="", var="files", ktype="literal", islist=True),
Template("FILETYPES", subc="OPTIONS", var="filetypes", ktype="str", islist=True),
Template("FILENAMEPATTERN", subc="OPTIONS", var="filenamepattern", ktype="literal"),
Template("DSNAME", subc="OPTIONS", var="dsname", ktype="varname"),
Template("ATTRLENGTH", subc="OPTIONS", var="attrlength", ktype="int", vallist=(1, 32767)),
Template("", subc="ATTRIBUTES", var="attrlist", ktype="varname", islist=True)])
global _
try:
_("---")
except:
def _(msg):
return msg
if "HELP" in args:
#print helptext
helper()
else:
processcmd(oobj, args, gather)
#oobj.parsecmd(args, vardict = spssaux.VariableDict())
#gather(**oobj.parsedparams)
def helper():
"""open html help in default browser window
The location is computed from the current module name"""
import webbrowser, os.path
path = os.path.splitext(__file__)[0]
helpspec = "file://" + path + os.path.sep + \
"markdown.html"
# webbrowser.open seems not to work well
browser = webbrowser.get()
if not browser.open_new(helpspec):
print(("Help file not found:" + helpspec))
try: #override
from extension import helper
except:
pass
def gather(files, filetypes=["spss"], filenamepattern=None, dsname=None,attrlist=[], attrlength=256):
"""Create SPSS dataset listing variable names, variable labels, and source files for selected files. Return the name of the new dataset.
files is a list of files and/or directories. If an item is a file, it is processed; if it is a directory, the files and subdirectories
it contains are processed.
filetypes is a list of filetypes to process. It defaults to ["spss"] which covers sav and por. It can also include
"sas" for sas7bdat, sd7, sd2, ssd01, and xpt, and "stata" for dta
filenamepattern is an optional parameter that can contain a regular expression to be applied to the filenames to filter the
datasets that are processed. It is applied to the filename itself, omitting any directory path and file extension. The expression
is anchored to the start of the name and ignores case.
dsname is an optional name to be assigned to the new dataset. If not specified, a name will be automatically generated.
If dsname is specified, it will become the active dataset; otherwise, it need not be the active dataset.
attrlist is an optional list of custom attributes to be included in the output. For array attributes, only the first item is
recorded. The value is blank if the attribute is not present for the variable. Attribute variables are
strings of size attrlength bytes, truncated appropriately.
The output is just a dataset. It must be saved, if desired, after this function has completed.
Its name is the return value of this function.
Exception is raised if any files not found.
Examples:
gathermetadata.gather(["c:/temp/firstlevel", "c:/spss16/samples/voter.sav"], ["spss", "sas"])
searches spss and sas files in or under the temp/firstlevel directory plus the voter file.
gathermetadata.gather(["c:/temp/firstlevel"], filenamepattern="car")
searches the firstlevel directory for spss files whose names start with "car".
"""
encoding = locale.getlocale()[1]
filetypes = [f.lower() for f in filetypes]
for ft in filetypes:
if not ft in ["spss", "sas", "stata"]:
raise ValueError(_("Filetypes must be one or more of spss, sas, and stata."))
dsvars = {"source":"source", "variablename":"VariableName", "variablelabel":"variableLabel"}
with DataStep():
ds = spss.Dataset(name=None)
dsn = ds.name
varlist = ds.varlist
varlist.append("source",200)
varlist["source"].label=_("File containing the variable")
varlist.append("variableName", 64)
varlist["variableName"].label = _("Variable Name")
varlist.append("variableLabel", 256)
varlist["variableLabel"].label = _("Variable Label")
attrindexes = {}
for i, aname in enumerate(attrlist):
anamemod = addunique(dsvars, aname)
varlist.append(dsvars[anamemod], attrlength)
attrindexes[aname.lower()] = i
addvarinfo = makeaddinfo(dsn, filetypes, filenamepattern, dsvars, attrindexes, attrlength) #factory function
files = [fixescapes(f) for f in files] #UP is converting escape characters :-)
# walk the list of files and directories and open
try: # will fail if spssaux is prior to version 2.3
fh = spssaux.FileHandles()
except:
pass
notfound = []
for item in files:
try:
item = fh.resolve(item)
except:
pass
if os.path.isfile(item):
addvarinfo(item)
elif os.path.isdir(item):
for dirpath, dirnames, fnames in os.walk(item):
for f in fnames:
try:
addvarinfo(os.path.join(dirpath, f))
except EnvironmentError as e:
notfound.append(e.args[0])
else:
if not isinstance(item, str):
item = str(item, encoding)
notfound.append(_("Not found: %s") % item)
spss.Submit("DATASET ACTIVATE %s." % dsn)
if not dsname is None:
spss.Submit("DATASET NAME %s." % dsname)
dsn = dsname
if notfound:
raise ValueError("\n".join(notfound))
return dsn
def makeaddinfo(dsname, filetypes, filenamepattern, dsvars, attrindexes, attrlength):
"""create a function to add variable information to a dataset.
dsname is the dataset name to append to.
filetypes is the list of file types to include.
filenamepattern is a regular expression to filter filename roots.
dsvars is a special dictionary of variables and attributes. See function addunique.
attrindexes is a dictionary with keys of lower case attribute names and values as the dataset index starting with 0.
attrlength is the size of the attribute string variables"""
ftdict = {"spss":[".sav", ".por"], "sas":[".sas7bdat",".sd7",".sd2",".ssd01",".xpt"], "stata":[".dta"]}
spsscmd = {"spss":"GET FILE='%s'.", "sas": "GET SAS DATA='%s'.", "stata": "GET STATA FILE='%s'."}
if filenamepattern:
try:
pat = re.compile(filenamepattern, re.IGNORECASE)
except:
raise ValueError(_("Invalid filenamepattern: %s") % filenamepattern)
else:
pat = None
ll = len(dsvars)
includeAttrs = ll > 3
blanks = (ll-3) * [" "]
def addinfo(filespec):
"""open the file if appropriate type, extract variable information, and add it to dataset dsname.
filespec is the file to open
dsname is the dataset name to append to
filetypes is the list of file types to include."""
fnsplit = os.path.split(filespec)[1]
fn, ext = os.path.splitext(fnsplit)
for ft in filetypes:
if ext in ftdict[ft]:
if pat is None or pat.match(fn):
try:
spss.Submit(spsscmd[ft] % filespec)
spss.Submit("DATASET NAME @__GATHERMD__.")
except:
if not isinstance(filespec, str):
filespec = str(filespec, encoding)
raise EnvironmentError(_("File could not be opened, skipping: %s") % filespec)
break
else:
return addinfo
with DataStep():
ds = spss.Dataset(name=dsname) # not the active dataset
dssource = spss.Dataset(name="*") # The dataset to examine
numvars = spss.GetVariableCount() # active dataset
variables = dssource.varlist
for v in range(numvars):
lis = [filespec.replace("\\","/"), spss.GetVariableName(v), spss.GetVariableLabel(v)]
lis.extend(blanks)
lis = [item+ 256*" " for item in lis]
ds.cases.append(lis)
#ds.cases.append([filespec.replace("\\","/"), spss.GetVariableName(v), spss.GetVariableLabel(v), *blanks])
if includeAttrs:
attrs = variables[v].attributes.data
for a in attrs:
if a.lower() in attrindexes:
ds.cases[-1, attrindexes[a.lower()]+ 3] = attrs[a][0] + attrlength * " "# allow for standard variables
spss.Submit("DATASET CLOSE @__GATHERMD__.")
return addinfo
def addunique(dsdict, key):
"""Add modified version of key to dictionary dsdict. Return generated key.
dsdict is a dictionary whose keys will be lower case strings and whose values are unique SPSS variable names.
duplicate keys are ignored.
keys are automatically prefixed with "*" to separate them from variable names that could be identical."""
key1 = "*" + key.lower()
if key1 in dsdict:
return key1
# make a version of key that is unique in the dictionary values and a legal variable name length
i=0
keymod = spssaux.truncatestring(key, 64)
while keymod.lower() in [k.lower() for k in list(dsdict.values())]:
keymod = spssaux.truncatestring(key, 59) + "_" + str(i)
i += 1
dsdict[key1] = keymod
return key1
escapelist = [('\a', r'\a'), ('\b', r'\b'), ('\f', r'\f'), ('\n', r'\n'), ('\r', r'\r'), ('\t',r'\t'),('\v', r'\v')]
def fixescapes(item):
for esc, repl in escapelist:
item = item.replace(esc, repl)
return item
# Example.
'''dsname = gather(["c:/temp/firstlevel"], filetypes=['spss','sas'], attrlist=['importance', 'relevance', 'VariableLabel'])
spss.Submit(r"""DATASET ACTIVATE %s.
SAVE OUTFILE='c:/temp2/gathered.sav'.""" % dsname)
dsname=gather(["c:/spss16/samples/employee data.sav"])'''
|
StarcoderdataPython
|
1876457
|
<filename>api/core/middleware/tests/test_cache_control.py
from core.middleware.cache_control import NeverCacheMiddleware
from django.http import HttpResponse
def test_NoCacheMiddleware_adds_cache_control_headers(mocker):
# Given
a_response = HttpResponse()
mocked_get_response = mocker.MagicMock(return_value=a_response)
mock_request = mocker.MagicMock()
middleware = NeverCacheMiddleware(mocked_get_response)
# When
response = middleware(mock_request)
# Then
assert (
response.headers["Cache-Control"]
== "max-age=0, no-cache, no-store, must-revalidate, private"
)
assert response.headers["Pragma"] == "no-cache"
|
StarcoderdataPython
|
1648175
|
"""
run.py (batch_geocode)
======================
Geocode any row-delimited json data, with columns corresponding
to a city/town/etc and country.
"""
import logging
import os
import pandas as pd
import s3fs # not called but required import to read from s3://
from nesta.packages.geo_utils.country_iso_code import country_iso_code_dataframe
from nesta.packages.geo_utils.geocode import geocode_batch_dataframe
from nesta.core.orms.geographic_orm import Geographic
from nesta.core.orms.orm_utils import db_session, get_mysql_engine
def run():
batch_file = os.environ['BATCHPAR_batch_file']
db = os.environ['BATCHPAR_db_name']
bucket = os.environ['BATCHPAR_bucket']
# database setup
engine = get_mysql_engine('BATCHPAR_config', 'mysqldb', db)
# collect data
target = f"s3://{bucket}/{batch_file}"
df = pd.read_json(target, orient='records')
logging.info(f"{len(df)} locations to geocode")
# append country iso codes and continent
df = country_iso_code_dataframe(df)
logging.info("Country ISO codes appended")
# geocode, appending latitude and longitude columns, using the q= query method
df = geocode_batch_dataframe(df, query_method='query_only')
logging.info("Geocoding complete")
# remove city and country columns and append done column
df = df.drop(['city', 'country'], axis=1)
df['done'] = True
# convert to list of dict and output to database
rows = df.to_dict(orient='records')
logging.info(f"Writing {len(rows)} rows to database")
with db_session(engine) as session:
session.bulk_update_mappings(Geographic, rows)
logging.warning("Batch task complete")
if __name__ == '__main__':
log_stream_handler = logging.StreamHandler()
logging.basicConfig(handlers=[log_stream_handler, ],
level=logging.INFO,
format="%(asctime)s:%(levelname)s:%(message)s")
if False:
environ = {"BATCHPAR_done": "False",
"BATCHPAR_batch_file" : "geocoding_batch_15597590150867765.json",
"BATCHPAR_config": "/home/ec2-user/nesta/nesta/core/config/mysqldb.config",
"BATCHPAR_bucket": "nesta-production-intermediate",
"BATCHPAR_test": "True",
"BATCHPAR_db_name": "dev"}
for k, v in environ.items():
os.environ[k] = v
run()
|
StarcoderdataPython
|
1607805
|
<reponame>insequor/webpy-graphql
from .utils import props
from inspect import isclass
class InitSubclassMeta(type):
def __init__(self, classname, baseclasses, attrs):
_Meta = getattr(self, "GraphQLMeta", None)
_meta_props = {}
if _Meta:
if isinstance(_Meta, dict):
_meta_props = _Meta
elif isclass(_Meta):
_meta_props = props(_Meta)
else:
raise Exception("Meta have to be either a class or a dict. Received {}".format(_Meta))
attrs = attrs.update(**_meta_props)
|
StarcoderdataPython
|
3390242
|
<reponame>linksapprentice1/dys
# -*- coding: utf-8 -*-
from Tkinter import *
from tkFileDialog import *
def printGameTable(days, left_or_right):
print """<table class=\"tableizer-table\" style=\"float:""" + left_or_right +"""\">
<tbody>
<tr class=\"tableizer-firstrow\">
<th>DAY</th>
<th>DATE</th>
<th> TIME </th>
<th> HOME TEAM </th>
<th> AWAY TEAM </th>
</tr> """
for i, day in enumerate(days):
day_line = day.strip().split("\n")
print """ <tr>
<td>""" + day_line[0] + """</td>
<td>""" + day_line[1] + """</td>
<td style="border:none"> </td>
<td style="border:none"> </td>
<td style="border:none"> </td>
</tr>"""
for j, game in enumerate(day_line[2:]):
game_parts = game.strip().split()
print """<tr>
<td style="border:none"> </td>
<td style="border:none"> </td>
<td>""" + game_parts[0] + """</td>
<td>""" + game_parts[1] + """</td>
<td>""" + game_parts[2] + """</td>
</tr>"""
# if i < len(days)-1:
# print """<tr>
# <td> </td>
# <td> </td>
# <td> </td>
# <td> </td>
# <td> </td>
# </tr>"""
print """</tbody>
</table>"""
def printCoachTable(coaches):
print """<table class=\"tableizer-table\">
<tbody>
<tr class=\"tableizer-firstrow\">
<th colspan="12"> Coaches </th>
</tr>
<tr> """
for index, coach in enumerate(coaches):
print """<td>""" + str(index) + """</td>"""
print """<td>""" + coach + """</td>"""
print """</tr>
</tbody>
</table> """
#GUI
root = Tk()
root.wm_title("Generate Tables")
w = Label(root, text="Select template text file.")
fileName = askopenfilename(parent=root, title="Select template text file")
#Enter data
input = open(fileName).read()
sections = [x for x in input.split('~')]
time, age_group = sections[0].strip().split("\n")[:2]
days = sections[1:len(sections)-2]
coaches = sections[len(sections)-2].strip().replace("\n", ":").strip().split(":")[1::2]
director = sections[len(sections)-1].strip().replace("\n", ":").strip().split(":")[1]
import base64
file_name_string = base64.urlsafe_b64encode(time + "_" + age_group)[:23]
sys.stdout = open(file_name_string + ".txt", "w")
print """<p> </p>
<h1>""" + time + """ """ + age_group + """</h1>
<h2>GAMES AT RUSTCRAFT</h2>
<p><br />
<style type=\"text/css\">
h1,h2,p{
text-align:center;
}
h2{
margin-bottom: 0px;
}
table.tableizer-table {
margin: 0 auto;
border: 1px solid #CCC; font-family: Arial, Helvetica, sans-serif
font-size: 12px;
}
.tableizer-table td {
text-align: center;
padding: 4px;
margin: 3px;
border: 1px solid #ccc;
}
.tableizer-table th {
background-color: #B33333;
color: #FFF;
font-weight: bold;
}
</style>
</p>"""
print """<div style=\"margin: 0 auto;display: table;\">"""
printGameTable(days[:len(days)/2], "left")
print """<div style=\"float:left;width:10px\"> </div>"""
printGameTable(days[len(days)/2:], "right")
print """</div><div style=\"clear:both;height:10px\"> </div>"""
printCoachTable(coaches)
print """<p> </p><h1>Director: """ + director + """</h1>"""
sys.stdout.flush()
import shutil
shutil.copy2(file_name_string + ".txt", file_name_string + ".html")
import webbrowser
import time
webbrowser.open(file_name_string + ".html")
time.sleep(2)
import subprocess
subprocess.call("cscript notepad.vbs " + file_name_string + ".txt", shell=True)
|
StarcoderdataPython
|
4887657
|
<reponame>AntoineGagne/ulaval-notify
"""This module contains the code related to the session handling.
:copyright: (c) 2018 by <NAME>.
:license: MIT, see LICENSE for more details.
"""
from collections import namedtuple
from copy import copy
from threading import Lock, Timer
from requests import Request
from .constants import API_URL, BASE_URL
#: Immutable type that contains information about the session token
Token = namedtuple(
'Token', [
'client_id',
'token',
'token_type',
'expiration_date'
]
)
#: Immutable type that contains information about the current user of the API
UserDetails = namedtuple(
'UserDetails', [
'user_id',
'email',
'identification_number',
'first_name',
'last_name',
'username',
'change_number'
]
)
def refresh_periodically(interval, session_manager):
"""Refresh the session held by the session manager every given interval.
:param interval: The number of time before refreshing the session
:param session_manager: The session manager that will refresh the session
"""
session_manager.refresh()
timer = Timer(
interval,
refresh_periodically,
args=(interval, session_manager)
)
timer.start()
def create_token(token_details):
"""Create a token based on the API response.
:param token_details: The API response containing the token details
:returns: The token details
"""
return Token(
client_id=token_details['idClient'],
token=token_details['token'],
token_type=token_details['typeToken'],
expiration_date=token_details['dateExpiration']
)
def create_user_details(user_details):
"""Create a type holding the user's details based on the API response.
:param user_details: The API response containing the user's details
:returns: The user details
"""
return UserDetails(
user_id=user_details['idUtilisateurMpo'],
email=user_details['courrielPrincipal'],
identification_number=user_details['nie'],
first_name=user_details['prenom'],
last_name=user_details['nom'],
username=user_details['pseudonyme'],
change_number=user_details['numeroChangement']
)
class SessionManager:
"""The session manager handles requests to the API related to the session.
:param session: The API session
:param cookie_name: The named of the cookie to set in the session
:param cookie_content: The content of the cookie to set in the session
"""
#: The route used to refresh the session token
refresh_token_route = '{base_url}/auth/rafraichirtoken'.format(
base_url=BASE_URL
)
#: The route used to refresh the session details
refresh_session_route = '{api_url}/refreshsession'.format(
api_url=API_URL
)
def __init__(self, session, cookie_name, cookie_content):
"""Create a new session manager."""
self.__session = session
self.__lock = Lock()
self._cookie_name = cookie_name
self._update_cookies(cookie_content)
#: The details of the API session token
self.token_details = create_token(cookie_content['detailsToken'])
#: The details of the current user of the API
self.user_details = create_user_details(cookie_content['utilisateurMpo'])
def _update_cookies(self, cookie_content):
with self.__lock:
self.__session.cookies.set(
self._cookie_name,
cookie_content,
domain='monportail.ulaval.ca',
path='/public/modules/mpo-client/services/securestorage/cookie/',
secure=True
)
@property
def _session(self):
return copy(self.__session)
def send(self, request):
"""Send the given request with the current session.
:param request: The request to send to the API
:returns: The response of the API
"""
response = None
with self.__lock:
request = self._add_authentication_header(request)
response = self._session.send(
self._session.prepare_request(request)
)
return response.json() if response else response
def _add_authentication_header(self, request):
request.headers['Authorization'] = '{token_type} {token}'.format(
token_type=self.token_details.token_type,
token=self.token_details.token
)
request.headers['Accept'] = 'application/json, text/plain, */*'
return request
def refresh(self):
"""Refresh the session."""
self._refresh_session()
response = self._refresh_token()
self._update_cookies(response)
self.token_details = create_token(response['detailsToken'])
def _refresh_session(self):
with self.__lock:
self.__session.post(SessionManager.refresh_session_route)
def _refresh_token(self):
request = Request(
'POST',
SessionManager.refresh_token_route
)
return self.send(request)
|
StarcoderdataPython
|
1939929
|
<gh_stars>1-10
from typing import Union
import numpy as np
import talib
from jesse.indicators.ma import ma
from jesse.helpers import get_candle_source, same_length
from jesse.helpers import slice_candles
from jesse.indicators.mean_ad import mean_ad
from jesse.indicators.median_ad import median_ad
def rvi(candles: np.ndarray, period: int = 10, ma_len: int = 14, matype: int = 1, devtype: int = 0, source_type: str = "close",
sequential: bool = False) -> Union[float, np.ndarray]:
"""
RVI - Relative Volatility Index
:param candles: np.ndarray
:param period: int - default: 10
:param ma_len: int - default: 14
:param matype: int - default: 1
:param source_type: str - default: "close"
:param sequential: bool - default: False
:return: float | np.ndarray
"""
candles = slice_candles(candles, sequential)
source = get_candle_source(candles, source_type=source_type)
if devtype == 0:
dev = talib.STDDEV(source, period)
elif devtype == 1:
dev = mean_ad(source, period, sequential=True)
elif devtype == 2:
dev = median_ad(source, period, sequential=True)
diff = np.diff(source)
diff = same_length(source, diff)
up = np.nan_to_num(np.where(diff <= 0, 0, dev))
down = np.nan_to_num(np.where(diff > 0, 0, dev))
up_avg = ma(up, period=ma_len, matype=matype, sequential=True)
down_avg = ma(down, period=ma_len, matype=matype, sequential=True)
result = 100 * (up_avg / (up_avg + down_avg))
return result if sequential else result[-1]
|
StarcoderdataPython
|
4807788
|
<filename>pyrobolearn/control/dp.py<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file describes the Dynamic Programming algorithm
class DP(object):
r"""Dynamic Programming (DP)
Type: model-based
"Dynamic programming usually refers to simplifying a decision by breaking it down into a sequence of decision
steps over time" (Wikipedia)
Bellman's equations:
.. math:: V(s_{t+1}) =
References:
[1] "Dynamic Programming", Bellman, 1957
[2] "Reinforcement Learning: An Introduction", Sutton and Barto, 1998 (chap4)
[3] "Optimal Control Theory: An Introduction", Kirk, 1970
[4] "Dynamic Programming and Optimal Control", Bertsekas, 1987
"""
def __init__(self):
pass
def compute(self):
pass
|
StarcoderdataPython
|
139775
|
<filename>python/testData/inspections/PyProtectedMemberInspection/namedTuple.py
from collections import namedtuple
i = namedtuple('Point', ['x', 'y'], verbose=True)
i._replace( **{"a":"a"})
|
StarcoderdataPython
|
3451182
|
<filename>PUG/Demo 1/main.py<gh_stars>100-1000
from kivy.app import App
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.button import Button
from kivy.uix.image import Image
from kivy.animation import Animation
class DemoApp(App):
"""
The App class is a singleton and creates the base of your application.
"""
def build(self):
""" This method returns the root widget of your application. """
float_layout = FloatLayout()
button = Button(text='Click me',
size_hint=(0.4, 0.2),
pos_hint={'center_x': 0.5, 'center_y': 0.8})
button.bind(on_press=self.button_click)
self.image = Image(source='1.png',
size_hint=(0.2, 0.2),
pos_hint={'right': 1, 'top': 1},
allow_stretch=True)
float_layout.add_widget(self.image)
float_layout.add_widget(button)
return float_layout
def button_click(self, button):
""" Fired, via binding, when the button is clicked. """
anim = Animation(pos_hint={'x': 0, 'y': 0}, duration=1)
anim += Animation(size_hint=(1, 1), duration=1, t='in_bounce')
anim.start(self.image)
if __name__ == "__main__":
DemoApp().run()
|
StarcoderdataPython
|
178570
|
import torch
from IPython.core.display import display
from torch import Tensor
from torch.nn import DataParallel
# noinspection PyProtectedMember
from torch.utils.data import DataLoader
from datasets.deep_fashion import ICRBDataset, ICRBCrossPoseDataloader
from modules.pgpg import PGPG
from train_setup import args, run_locally, exec_device, log_level, datasets_groot, models_groot, in_notebook
from utils.dep_free import get_tqdm
from utils.ifaces import FilesystemDataset
from utils.metrics import GanEvaluator
###################################
### Hyper-parameters settings ###
###################################
# - training
n_epochs = 100
batch_size = 48 if not run_locally else 48
train_test_splits = [90, 10] # for a 90% training - 10% evaluation set split
# - evaluation
metrics_n_samples = 1000 if not run_locally else 2
metrics_batch_size = 32 if not run_locally else 1
f1_k = 3 if not run_locally else 1
# - visualizations / checkpoints steps
display_step = 200
checkpoint_step = 600
metrics_step = 1800 # evaluate model every 3 checkpoints
# - dataset
target_shape = 128
target_channels = 3
skip_pose_norm = True
# - PGPG config file
pgpg_config_id = f'{target_shape}_MSE_256_6_4_5_none_none_1e4_true_false_false' # as proposed in the original paper
###################################
### Dataset Initialization ###
###################################
# - image transforms:
# If target_shape is different from load one, resize & crop. If target_shape is different from load shape,
# convert to grayscale.
# Update: Now done automatically if you set target_channels, target_shape when instantiating the dataloader.
gen_transforms = ICRBDataset.get_image_transforms(target_shape=target_shape, target_channels=target_channels)
# - the dataloader used to access the training dataset of cross-scale/pose image pairs at every epoch
# > len(dataloader) = <number of batches>
# > len(dataloader.dataset) = <number of total dataset items>
dataloader = ICRBCrossPoseDataloader(dataset_fs_folder_or_root=datasets_groot, batch_size=batch_size,
image_transforms=gen_transforms, skip_pose_norm=skip_pose_norm,
splits=train_test_splits, pin_memory=not run_locally, log_level=log_level)
dataset = dataloader.dataset
# - ensure dataset is fetched locally and unzipped
if isinstance(dataset, FilesystemDataset):
dataset.fetch_and_unzip(in_parallel=False, show_progress=True)
elif hasattr(dataset, 'dataset') and isinstance(dataset.dataset, FilesystemDataset):
dataset.dataset.fetch_and_unzip(in_parallel=False, show_progress=True)
else:
raise TypeError('dataset must implement utils.ifaces.FilesystemDataset in order to be auto-downloaded and unzipped')
# - apply rudimentary tests
assert issubclass(dataloader.__class__, DataLoader)
# noinspection PyTypeChecker
assert len(dataloader) == len(dataset) // batch_size + (1 if len(dataset) % batch_size else 0)
_image_1, _image_2, _dense_pose_2 = next(iter(dataloader))
assert tuple(_image_1.shape) == (batch_size, target_channels, target_shape, target_shape)
assert tuple(_image_2.shape) == (batch_size, target_channels, target_shape, target_shape)
assert tuple(_dense_pose_2.shape) == (batch_size, target_channels, target_shape, target_shape)
###################################
### Models Initialization ###
###################################
# - initialize evaluator instance (used to run GAN evaluation metrics: FID, IS, PRECISION, RECALL, F1 and SSIM)
evaluator = GanEvaluator(model_fs_folder_or_root=models_groot, gen_dataset=dataloader.test_set, target_index=1,
condition_indices=(0, 2), n_samples=10000, batch_size=metrics_batch_size,
device=exec_device, f1_k=f1_k, ssim_c_img=target_channels)
# - initialize model
chkpt_step = args.chkpt_step
try:
if chkpt_step == 'latest':
pgpg_chkpt_step = chkpt_step
elif isinstance(chkpt_step, str) and chkpt_step.isdigit():
pgpg_chkpt_step = int(chkpt_step)
else:
pgpg_chkpt_step = None
except NameError:
pgpg_chkpt_step = None
# noinspection PyTypeChecker
pgpg = PGPG(model_fs_folder_or_root=models_groot, config_id=pgpg_config_id, dataset_len=len(dataset),
chkpt_epoch=pgpg_chkpt_step, evaluator=evaluator, device=exec_device, log_level=log_level)
pgpg.logger.debug(f'Using device: {str(exec_device)}')
pgpg.logger.debug(f'Model initialized. Number of params = {pgpg.nparams_hr}')
# - setup multi-GPU training
if torch.cuda.device_count() > 1:
pgpg.gen = DataParallel(pgpg.gen)
pgpg.info(f'Using {torch.cuda.device_count()} GPUs for PGPG Generator (via torch.nn.DataParallel)')
# - load dataloader state (from model checkpoint)
if 'dataloader' in pgpg.other_state_dicts.keys():
dataloader.set_state(pgpg.other_state_dicts['dataloader'])
pgpg.logger.debug(f'Loaded dataloader state! Current pem_index={dataloader.get_state()["perm_index"]}')
###################################
### Training Loop ###
###################################
# - get the correct tqdm instance
exec_tqdm = get_tqdm()
# - start training loop from last checkpoint's epoch and step
gcapture_ready = True
async_results = None
pgpg.logger.info(f'[training loop] STARTING (epoch={pgpg.epoch}, step={pgpg.initial_step})')
for epoch in range(pgpg.epoch, n_epochs):
image_1: Tensor
image_2: Tensor
pose_2: Tensor
# noinspection PyProtectedMember
d = {
'step': pgpg.step,
'initial_step': pgpg.initial_step,
'epoch': pgpg.epoch,
'_counter': pgpg._counter,
'epoch_inc': pgpg.epoch_inc,
}
# initial_step = pgpg.initial_step % len(dataloader)
pgpg.logger.debug('[START OF EPOCH] ' + str(d))
for image_1, image_2, pose_2 in exec_tqdm(dataloader, initial=pgpg.initial_step):
# Transfer image batches to GPU
image_1 = image_1.to(exec_device)
image_2 = image_2.to(exec_device)
pose_2 = pose_2.to(exec_device)
# Perform a forward + backward pass + weight update on the Generator & Discriminator models
disc_loss, gen_loss = pgpg(image_1=image_1, image_2=image_2, pose_2=pose_2)
# Metrics & Checkpoint Code
if pgpg.step % checkpoint_step == 0:
# Check if another upload is pending
if not gcapture_ready and async_results:
# Wait for previous upload to finish
pgpg.logger.warning('Waiting for previous gcapture() to finish...')
[r.wait() for r in async_results]
pgpg.logger.warning('DONE! Starting new capture now.')
# Capture current model state, including metrics and visualizations
async_results = pgpg.gcapture(checkpoint=True, metrics=pgpg.step % metrics_step == 0, visualizations=True,
dataloader=dataloader, in_parallel=True, show_progress=True,
delete_after=False)
# Visualization code
elif pgpg.step % display_step == 0:
visualization_img = pgpg.visualize()
visualization_img.show() if not in_notebook() else display(visualization_img)
# Check if a pending checkpoint upload has finished
if async_results:
gcapture_ready = all([r.ready() for r in async_results])
if gcapture_ready:
pgpg.logger.info(f'gcapture() finished')
if pgpg.latest_checkpoint_had_metrics:
pgpg.logger.info(str(pgpg.latest_metrics))
async_results = None
# If run locally one pass is enough
if run_locally and gcapture_ready:
break
# If run locally one pass is enough
if run_locally:
break
# noinspection PyProtectedMember
d = {
'step': pgpg.step,
'initial_step': pgpg.initial_step,
'epoch': pgpg.epoch,
'_counter': pgpg._counter,
'epoch_inc': pgpg.epoch_inc,
}
pgpg.logger.debug('[END OF EPOCH] ' + str(d))
# Check if a pending checkpoint exists
if async_results:
([r.wait() for r in async_results])
pgpg.logger.info(f'last gcapture() finished')
if pgpg.latest_checkpoint_had_metrics:
pgpg.logger.info(str(pgpg.latest_metrics))
async_results = None
# Training finished!
pgpg.logger.info('[training loop] DONE')
|
StarcoderdataPython
|
243454
|
# %%
from tensorflow import keras
import numpy as np
import cv2
import os
IMG_SIZE = 50
DATASETDIR = 'D:\\Dataset\\test\\Dog'
def imgPrepera(path):
img_array = cv2.imread(os.path.join(DATASETDIR, path))
img_array = cv2.cvtColor(img_array, cv2.COLOR_BGR2RGB)
img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
return np.array(img_array).reshape(-1, IMG_SIZE, IMG_SIZE, 3)
# %%
model = keras.models.load_model('catdog-64x4')
predictions = model.predict([imgPrepera('images.jpg')])
print(predictions)
# %%
|
StarcoderdataPython
|
6634685
|
<filename>ppmessage/__init__.py
from . import backend
"""
version format, MAIN.SUB.HOTFIX.DEV
1.0.0.0:
Initial to SaaS for ppmessage.cn
2.0.0.0:
Github to SaaS for ppmessage.com
2.0.0.1:
PPCom send<->recv with PPKefu
3.0.0.0:
one main to start all ppmessage components
"""
__version__ = "3.0.0.0"
|
StarcoderdataPython
|
111582
|
new_model = tf.keras.models.load_model('my_first_model.h5')
cap = cv2.VideoCapture(0)
faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
while 1:
# get a frame
ret, frame = cap.read()
# show a frame
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, 1.1, 4)
for x, y, w, h in faces:
roi_gray = gray[y:y + h, x:x + h]
roi_color = frame[y:y + h, x:x + h]
# cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
faces = faceCascade.detectMultiScale(roi_gray)
for (ex, ey, ew, eh) in faces:
face_roi = roi_color[ey:ey + eh, ex:ex + eh]
final_image = cv2.resize(face_roi, (224, 224))
final_image = final_image.reshape(-1, 224, 224, 3) # return the image with shaping that TF wants.
fianl_image = np.expand_dims(final_image, axis=0) # need forth dimension
final_image = final_image / 225.0
Predictions = new_model.predict(final_image)
print(Predictions)
if (Predictions > 0.5):
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(frame, 'Wearing Mask!', (x, y), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 1)
# cv2.putText(img, str,origin,font,size,color,thickness)
else:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
cv2.putText(frame, 'No Mask!', (x, y), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 1)
# if(Predictions<0.45):
# print("No mask")
# elif(Predictions>0.55):
# print("With mask")
# else:
# print("Can not determine")
cv2.imshow("capture", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
StarcoderdataPython
|
1932912
|
'''
Find Cycle in the List.
'''
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def hasCycle(self, head: Optional[ListNode]) -> bool:
hare = head
turtle = head
while(turtle and hare and hare.next):
hare = hare.next.next
turtle = turtle.next
if(turtle == hare):
return True
return False
|
StarcoderdataPython
|
6595200
|
from itertools import product
from oop import Customer, Shop
from oop import ProductStock
from oop import Basket
shop = Shop('stock.csv')
rich_customer_order=Customer('customer.csv')
rich_customer_order.calculate_costs(shop.stock)
customer_poor=Customer('OutOfBudget.csv')
shop.shop_info()
shop.shop_cash('stock.csv')
print(shop.names())
custom_active = True
while custom_active:
shop.menu()
answer = input("Option selected: ")
if int(answer) == 1:
print("1 selected")
ProductName = input("Enter product: ")
ProductQuantity = int(input("Enter quantity: "))
CustomerBudget = int(input("Enter your budget: "))
my_basket = Basket(ProductName,ProductQuantity,CustomerBudget)
my_basket.calculate_costs(shop.stock)
print(shop.process_customer_order(my_basket))
elif int(answer) == 2:
shop.shop_info()
shop.shop_cash('stock.csv')
print(shop.names())
elif int(answer) == 3:
print("Option selected: 3")
print("Application finished!")
custom_active = False
elif int(answer) == 4:
print(shop.process_customer_order(rich_customer_order))
elif int(answer) == 5:
customer_poor.calculate_costs(shop.stock)
print(shop.process_customer_order(customer_poor))
elif int(answer) == 6:
shop.wants_too_much_read_csv()
|
StarcoderdataPython
|
9704954
|
<filename>pyluna-core/tests/luna/api/radiologyPreprocessingLibrary/test_app.py
import pytest
from minio import Minio
import pyarrow.parquet as pq
from luna.api.radiologyPreprocessingLibrary import app
@pytest.fixture
def client():
# setup flask api client for testing
app.app.config["OBJECT_URI"] = "mockuri:1000"
app.app.config["OBJECT_USER"] = "mockuser"
app.app.config["OBJECT_PASSWORD"] = "<PASSWORD>"
with app.app.test_client() as client:
yield client
class GetObjectResponse:
metadata = {'Accept-Ranges': 'bytes',
'Content-Security-Policy': 'block-all-mixed-content',
'Content-Type': 'application/xml'}
def test_app_post(client, monkeypatch):
def mock_bucket(*args, **kwargs):
return False
monkeypatch.setattr(Minio, "bucket_exists", mock_bucket)
monkeypatch.setattr(Minio, "make_bucket", mock_bucket)
monkeypatch.setattr(pq, "write_table", mock_bucket)
data = {"paths": ["pyluna-core/tests/luna/api/radiologyPreprocessingLibrary/testdata/1.dcm",
"pyluna-core/tests/luna/api/radiologyPreprocessingLibrary/testdata/2.dcm"],
"width": 512,
"height": 512}
response = client.post('/radiology/images/project_id/scan_id', json=data)
print(response.json)
assert 200 == response.status_code
assert response.json['message'].startswith('Parquet created at ')
def test_app_post_missing_input(client):
response = client.post('/radiology/images/project_id/scan_id')
assert 400 == response.status_code
assert response.json['message'].startswith('Missing ')
def test_app_post_bad_input(client):
data = {"dicom_paths": ["pyluna-core/tests/luna/api/radiologyPreprocessingLibrary/testdata/1.dcm",
"pyluna-core/tests/luna/api/radiologyPreprocessingLibrary/testdata/2.dcm"],
"width": 512}
response = client.post('/radiology/images/project_id/scan_id', json=data)
assert 400 == response.status_code
assert response.json['message'].startswith('Missing ')
def test_app_get(client, monkeypatch):
def mock_get(*args, **kwargs):
return GetObjectResponse()
monkeypatch.setattr(Minio, "fget_object", mock_get)
data = {"output_location": "src/api/tests/api/test.parquet"}
response = client.get('/radiology/images/project_id/scan_id', json=data)
assert 200 == response.status_code
assert response.json['message'].startswith('Downloaded object ')
def test_app_get_missing_input(client):
response = client.get('/radiology/images/project_id/scan_id')
assert 400 == response.status_code
assert 'Missing expected params.' == response.json['message']
def test_app_delete(client, monkeypatch):
def mock_get(*args, **kwargs):
return GetObjectResponse()
monkeypatch.setattr(Minio, "remove_object", mock_get)
response = client.delete('/radiology/images/project_id/scan_id')
assert 200 == response.status_code
assert response.json['message'].startswith('Removed object')
|
StarcoderdataPython
|
6525882
|
<gh_stars>0
from flask_restx import Namespace, fields
class AuthDto:
api = Namespace("Authentication", description="Authenticate and receive tokens.")
user_obj = api.model(
"User object",
{
"id": fields.String,
"first_name": fields.String,
"last_name": fields.String,
"full_name": fields.String,
"date_joined": fields.DateTime,
"email": fields.String,
"is_superuser": fields.Boolean,
"is_active": fields.Boolean,
"is_staff": fields.Boolean,
},
)
auth_login = api.model(
"Login data",
{
"email": fields.String(required=True),
"password": fields.String(required=True),
},
)
auth_refresh = api.parser()
auth_refresh.add_argument('Authorization', type=str, location='headers')
auth_success = api.model(
"Auth success response",
{
"status": fields.Boolean,
"message": fields.String,
"access_token": fields.String,
"refresh_token": fields.String,
"user": fields.Nested(user_obj),
},
)
refresh_success = api.model(
"Refresh success response",
{
"status": fields.Boolean,
"message": fields.String,
"access_token": fields.String,
},
)
|
StarcoderdataPython
|
12829056
|
<gh_stars>0
import discord
import os
import json
import numpy as np
import reddit_functions as rf
import billboard_functions as bf
import st
import activities as act
with open("keys.json") as f:
info = json.load(f)
headers = ['Task', 'Start', 'End']
todolist = np.empty(shape=[0,3])
client = discord.Client()
@client.event
async def on_ready():
print('We have logged in as {0.user}'.format(client))
@client.event
async def on_message(message):
if message.author == client.user:
return
#good news subreddit
if message.content.startswith('!goodnews'):
news, link = rf.good_news()
await message.channel.send("Here's some good news\n\n" +
news + "\n" + link)
#gives movie game or TV show
elif message.content.startswith('!activity '):
answer = ""
genre = message.content.split(' ')
if len(genre) > 0:
genre = genre[1]
if genre == "game":
answer = act.game()
if genre == "TV":
answer = act.television()
if genre == "movie":
answer = act.movie()
await message.channel.send(answer)
return
elif message.content.startswith('!song'):
charts = bf.random_queue()
await message.channel.send(charts)
elif message.content.startswith('!today'):
embedVar = discord.Embed(title="Daily Dashboard", description=" ", color=discord.Color.teal())
rlink = rf.dailypic().url
embedVar.set_thumbnail(url=rlink)
embedVar.add_field(name='Post of the Day', value=rlink, inline=False)
embedVar.add_field(name="Music's Top 5", value=bf.top_five(), inline=False)
embedVar.add_field(name="Self Care Tip of the Day", value=bl.bucketRandom(), inline=False)
embedVar.set_footer(text='Source: https://wholefully.com/self-care-ideas/')
await message.channel.send(embed=embedVar)
elif st.contains(message.content)[0]:
info = st.contains(message.content)
await message.channel.send(st.are_you_okay(info[1]))
###Calendar/To Do List
elif message.content.startswith('!addtask'):
global todolist
args = message.content.split(' ')
task = args[1]
start = args[2]
end = args[3]
item = np.array([task, start, end])
todolist = np.append(todolist, [item], axis=0)
todolist = todolist[todolist[:, 1].argsort()]
print(todolist)
await message.channel.send('Task added')
elif message.content.startswith('!todo'):
await message.channel.send(headers)
for item in todolist:
await message.channel.send(item)
elif message.content.startswith('!done'):
args = message.content.split(' ')[1]
for item in range(len(todolist)):
if args == todolist[item][0]:
await message.channel.send("Congrats on finishing " + args + "!")
todolist = np.delete(todolist, item, axis=0)
client.run(info["discord"]["discord_token"])
|
StarcoderdataPython
|
254260
|
from segmenter.models.FoldWeightFinder import FoldWeightFinder
import os
class OrganizedFoldWeightFinder(FoldWeightFinder):
def __init__(self, directory):
self.directory = os.path.join(directory, "results", "weights")
fold_weights = [
os.path.join(self.directory, d) for d in os.listdir(self.directory)
]
fold_names = [f.split("/")[-1][:-3] for f in fold_weights]
self.folds = dict(zip(fold_names, fold_weights))
def get(self, fold_name):
assert fold_name in self.folds, "Fold {} does not exist in loader".format(
fold_name)
return self.folds[fold_name]
def keys(self):
return self.folds.keys()
|
StarcoderdataPython
|
248099
|
import sys
if sys.version[0]=="3": raw_input=input
|
StarcoderdataPython
|
9748376
|
class NeuralNet():
def __init__(self, game):
pass
def train(self, examples):
pass
def predict(self, board):
pass
def save_checkpoint(self, folder, filename):
pass
def load_checkpoint(self, folder, filename):
pass
|
StarcoderdataPython
|
299515
|
class Producto:
def __init__(self, nombre, descripcion, precio, stock, codigo):
self.nombre = nombre
self.descripcion = descripcion
self.precio = precio
self.stock = stock
self.codigo = codigo
|
StarcoderdataPython
|
9643915
|
<reponame>Dimwest/jsonymize<gh_stars>0
import pyspark
from configparser import ConfigParser
from pathlib import Path
from src.spark import df_join, get_source_ids, anonymize_df, show_examples
from pyspark.sql.functions import input_file_name
if __name__ == '__main__':
# Parse config
cfg = ConfigParser()
cfg.read(f'{Path(__file__).parent}/config.ini')
# Create Spark configuration and SQLContext
spark_cfg = pyspark.SparkConf().setAll([('spark.driver.host', cfg['sparkconfig']['host'])])
sc = pyspark.SparkContext(conf=spark_cfg)
sqlContext = pyspark.SQLContext(sc)
# Create Spark Dataframe listing all user IDs to anonymize data from
user_ids = get_source_ids(sqlContext, cfg)
# Read target events JSON data into Dataframe
events = sqlContext.read.json(cfg['anonymize']['target_dir'],
multiLine=cfg.getboolean('anonymize', 'multiline')
).withColumn("filename", input_file_name())
# Left join both Dataframes and flag events to anonymize
events = df_join(events, user_ids, 'left', cfg)
# Anonymize events data
events = anonymize_df(events, cfg)
show_examples(events, cfg)
|
StarcoderdataPython
|
291457
|
<filename>adc_tmp36.py
import spidev, time
spi = spidev.SpiDev()
spi.open(0,0)
def analog_read(channel):
r = spi.xfer2([1, (8 + channel) << 4, 0])
adc_out = ((r[1]&3) << 8) + r[2]
return adc_out
while True:
reading = analog_read(0)
voltage = reading * 3.3 / 1024
temp_c = voltage * 100 - 50
temp_f = temp_c * 9.0 / 5.0 + 32
print("Temp C=%f\t\tTemp f=%f" % (temp_c, temp_f))
time.sleep(1)
|
StarcoderdataPython
|
3576622
|
import hyperchamber as hc
from hyperchamber import Config
from hypergan.ops import TensorflowOps
from hypergan.gan_component import ValidationException, GANComponent
import os
import hypergan as hg
import tensorflow as tf
class BaseGAN(GANComponent):
def __init__(self, config=None, inputs=None, device='/gpu:0', ops_config=None, ops_backend=TensorflowOps,
batch_size=None, width=None, height=None, channels=None):
""" Initialized a new GAN."""
self.inputs = inputs
self.device = device
self.ops_backend = ops_backend
self.ops_config = ops_config
self.created = False
self.components = []
self._batch_size = batch_size
self._width = width
self._height = height
self._channels = channels
if config == None:
config = hg.Configuration.default()
# A GAN as a component has a parent of itself
# gan.gan.gan.gan.gan.gan
GANComponent.__init__(self, self, config)
def batch_size(self):
if self._batch_size:
return self._batch_size
if self.inputs == None:
raise ValidationException("gan.batch_size() requested but no inputs provided")
return self.ops.shape(self.inputs.x)[0]
def channels(self):
if self._channels:
return self._channels
if self.inputs == None:
raise ValidationException("gan.channels() requested but no inputs provided")
return self.ops.shape(self.inputs.x)[-1]
def width(self):
if self._width:
return self._width
if self.inputs == None:
raise ValidationException("gan.width() requested but no inputs provided")
return self.ops.shape(self.inputs.x)[2]
def height(self):
if self._height:
return self._height
if self.inputs == None:
raise ValidationException("gan.height() requested but no inputs provided")
return self.ops.shape(self.inputs.x)[1]
def get_config_value(self, symbol):
if symbol in self.config:
config = hc.Config(hc.lookup_functions(self.config[symbol]))
return config
return None
def create_component(self, defn, *args, **kw_args):
if defn == None:
return None
if defn['class'] == None:
raise ValidationException("Component definition is missing '" + name + "'")
gan_component = defn['class'](self, defn, *args, **kw_args)
self.components.append(gan_component)
return gan_component
def create(self):
if self.created:
raise ValidationException("gan.create already called. Cowardly refusing to create graph twice")
self.created = True
def save(self, save_file, step=None):
if step is not None:
print("[hypergan] Step {:d}, saving network to {:s}".format(step, save_file))
else:
print("[hypergan] Saving network to ", save_file)
os.makedirs(os.path.expanduser(os.path.dirname(save_file)), exist_ok=True)
saver = tf.train.Saver()
saver.save(self.session, save_file, global_step=step)
def load(self, save_file):
save_file = os.path.expanduser(save_file)
if os.path.isfile(save_file) or os.path.isfile(save_file + ".index" ):
print("[hypergan] |= Loading network from "+ save_file)
dir = os.path.dirname(save_file)
print("[hypergan] |= Loading checkpoint from "+ dir)
ckpt = tf.train.get_checkpoint_state(os.path.expanduser(dir))
if ckpt and ckpt.model_checkpoint_path:
saver = tf.train.Saver()
saver.restore(self.session, save_file)
loadedFromSave = True
return True
else:
return False
else:
return False
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.