id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
1621876 | <filename>hello_world/test_main.py
"""Tests main.py."""
from hello_world.main import message
def test_message() -> None:
"""Tests hello message."""
assert message() == "Hi world"
| StarcoderdataPython |
1627928 | #!/usr/bin/python
import cookielib
import urllib
import urllib2
import urllib3
import time
import json
import sys
import traceback
from atlas import measure_baseclass
login_url = 'https://access.ripe.net'
udm_url = 'https://atlas.ripe.net/atlas/udm.html'
data_url = 'https://atlas.ripe.net/atlas/udmgrid.json'
cookie_filename = '/tmp/ripeatlas.cookie'
login_test_url = '/atlas/user'
new_url = 'https://atlas.ripe.net/atlas/newudm.json'
class Atlas:
def __init__(self, username, password, inputfile, connection_pool_size=10):
self.username = username
self.password = password
self.inputfile = inputfile
self.sleep = 60
self.pool = urllib3.connection_from_url('https://atlas.ripe.net', maxsize=connection_pool_size)
self.headers = [('User-agent', 'Mozilla/5.0'),
('Referer', 'https://atlas.ripe.net/atlas/udm.html'),
('Host', 'atlas.ripe.net'),
('Origin', 'https://atlas.ripe.net'),
('X-Requested-With', 'XMLHttpRequest')]
self.login()
self.target_dict = measure_baseclass.load_input(self.inputfile)
"""
self.target_list = []
f = open(self.inputfile)
for line in f:
line = line.strip()
chunks = line.split()
target = chunks[0]
probes = chunks[1:]
#if target in self.target_list:
# sys.stderr.write('Already saw target %s\n' % target)
# continue
self.target_list.append((target, probes))
f.close()
"""
"""
f = open(self.inputfile)
for line in f:
line = line.strip()
chunks = line.split(' ')
nodeid = chunks[0]
targetip = chunks[1]
self.target_list.append((nodeid, targetip))
f.close()
"""
def login(self):
self.cookiejar = cookielib.LWPCookieJar()
self.opener = urllib2.build_opener(
urllib2.HTTPRedirectHandler(),
urllib2.HTTPHandler(debuglevel=0),
urllib2.HTTPSHandler(debuglevel=0),
urllib2.HTTPCookieProcessor(self.cookiejar))
self.opener.addheaders = self.headers
login_data = urllib.urlencode({
'username' : self.username,
'password' : <PASSWORD>,
})
self.opener.open(login_url, login_data)
for cookie in self.cookiejar:
if cookie.name == 'crowd.token_key':
self.token = cookie.value
elif cookie.name == 'JSESSIONID':
self.session_id = cookie.value
if self.token and self.session_id:
self.headers.append(('Cookie', 'JSESSIONID='+self.session_id+'; crowd.token_key='+self.token+'; csrftoken='+self.token))
def runall(self, req, description, interval):
timestr = time.strftime('%Y-%m-%d %H:%M:%S')
target_len = len(self.target_dict)
targets = self.target_dict.keys()
i = 0
while i < target_len:
target = targets[i]
probe_list = self.target_dict[target]
url = 'http://'+target+'/'+req
"""
The maxmimum number of probes per requet is 500 so we need to break
this is up into several requests.
"""
probe_list_chunks = [probe_list[x:x+500] for x in xrange(0, len(probe_list), 500)]
for probe_list_chunk in probe_list_chunks:
try:
response = self.run(probe_list_chunk, url, description, interval)
except:
traceback.print_exc(file=sys.stderr)
sys.stderr.write('Got some kind of network exception. Sleeping for '+str(self.sleep)+'\n')
time.sleep(self.sleep)
continue
if not response['success']:
if 'Not authenticated or session is expired' in response['errorMessage']:
self.login()
sys.stderr.write(response['errorMessage']+'\n')
sys.stderr.write('Sleeping for '+str(self.sleep)+'\n')
time.sleep(self.sleep)
else:
i += 1
sys.stderr.write(str(i)+'/'+str(target_len)+'\n')
def run(self, probe_list, url, description, interval):
probe_list_str = ','.join(probe_list)
isoneoff = '"oneoff":"on",' if interval == '-1' else ''
if interval == '-1':
interval = '900' #change to something valid
""" Without oneoff
{"types":[{"intvl":"900","method":"method_get","httpver":"httpver11","headbytes":"","useragent":"httpget for atlas.ripe.net","url":"http://fi-hel-as3292.anchors.atlas.ripe.net","public":"1","descr":"http://fi-hel-as3292.anchors.atlas.ripe.net","typeid":"httpget"}],"sources":[{"probesreqlist":[4788,4906],"typeid":"probes"}]}
"""
""" With oneoff
data:{"oneoff":"on","types":[{"intvl":"900","method":"method_get","httpver":"httpver11","headbytes":"","useragent":"httpget for atlas.ripe.net","url":"http://gr-ath-as5408.anchors.atlas.ripe.net","public":"1","descr":"http://gr-ath-as5408.anchors.atlas.ripe.net","typeid":"httpget"}],"sources":[{"probesreqlist":[3775,3992],"typeid":"probes"}]}
"""
data = {}
data['csrfmiddlewaretoken'] = self.token
data['data'] = '{%s"types":[{"intvl":"%s","method":"method_get","httpver":"httpver11","headbytes":"","useragent":"Mozilla","url":"%s","public":"1","descr":"%s","typeid":"httpget"}],"sources":[{"probesreqlist":[%s],"typeid":"probes"}]}' % (isoneoff, interval, url, description, probe_list_str)
response = self.pool.request('POST', new_url, data, self.headers)
response_str = response.data
return json.loads(response_str)
if __name__ == '__main__':
if len(sys.argv) != 7:
sys.stderr.write('Usage: <username> <password> <probeid-ip-file> <request (search?q=dogs)> <collection-identifier> <repeat-interval-secs (-1 for one off)>\n')
sys.exit(1)
user = sys.argv[1]
password = sys.argv[2]
probe_file = sys.argv[3]
req = sys.argv[4]
description = sys.argv[5]
repeat_interval = sys.argv[6]
http = Atlas(user, password, probe_file)
http.runall(req, description, repeat_interval)
| StarcoderdataPython |
116853 | from rest_framework import viewsets, permissions, status
from rest_framework.response import Response
class HealthViewSet(viewsets.ViewSet):
# uncomment this to make the endpoint require authentication
# permission_classes = [permissions.IsAuthenticated]
def list(self, request, *args, **kwargs):
"""Displays "healthy"
"""
return Response({'health': 'healthy'}, status=status.HTTP_200_OK)
| StarcoderdataPython |
1730975 | #!/usr/bin/env python
"""Contains the Data Model for the MAC Address(es) Resource.
Represents the data model for the IEEE MA-L Public Assignments
Listing data. The three-octet MA-L can be used as a lookup
to help identify a provided Organizationally Unique Identifier
(OUI) which identifies a vendor, manufacturer or other org
globally/worldwide.
ref: http://standards.ieee.org/develop/regauth/oui/public.html
"""
__author__ = "<NAME>"
__copyright__ = "IBM Copyright 2015"
__credits__ = ["<NAME>"]
__license__ = "Apache 2.0"
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Prototype"
schema = {
'schema': {
'cloudhost': {
'type': 'string',
'default': 'Powered by IBM Bluemix and Python Eve'
},
'base16': {
'type': 'string',
'default': '######'
},
'hex': {
'type': 'string',
'default': '##-##-##'
},
'organization': {
'type': 'string',
'default': 'Doh!MissingOrg'
}
},
'allow_unknown': True
}
| StarcoderdataPython |
3334053 | import zerorpc
import pandas as pd
import logging
import numpy as np
logging.basicConfig()
def rand_weights(n):
k = np.random.rand(n)
return k / sum(k)
def calcMuSigma(returns):
p = np.asmatrix(np.mean(returns, axis=1))
w = np.asmatrix(rand_weights(len(returns)))
C = np.asmatrix(np.cov(returns))
mu = w * p.T
sigma = np.sqrt(w * C * w.T)
# This recursion reduces outliers to keep plots pretty
if sigma > 2:
return calcMuSigma(returns)
return mu, sigma
class ServerRPC(object):
def getReturns(self, prices):
columns = []
for x in prices['columns']:
columns.append(x['name'])
dataframes = {}
prices = pd.DataFrame(prices['data'],columns=columns)
# Calculate Returns
for item in prices['ticker'].unique():
dataframes[item] = prices.loc[prices['ticker']==item]
dataframes[item]['returns'] = dataframes[item]['close'].pct_change(1)
dataframes[item] = dataframes[item].dropna(subset=['returns'])
return_vec = [
list(dataframes[x]['returns']) for x in dataframes.keys()
]
n_portfolios = 500
means, stds = np.column_stack([
calcMuSigma(return_vec)
for _ in xrange(n_portfolios)
])
prices = {}
means = [
i[0] for i in means
]
stds = [
i[0] for i in stds
]
prices['means'] = means
prices['stds'] = stds
print(prices['stds'])
return prices
s = zerorpc.Server(ServerRPC())
s.bind("tcp://0.0.0.0:4242")
s.run()
| StarcoderdataPython |
3318536 | from .action import default_kwargs, plot_action, generate_arg_and_kwags, get_value, get_subset
from .action import DataSource, AxPlot
def _hist_plotter(df: DataSource, y, *arg, **kwargs):
_y = get_subset()(df, y)
def plot(ax):
ax.hist(_y, **kwargs)
return ax
return plot
def hist(**presetting):
return plot_action(
_hist_plotter,
["y"],
default_kwargs.get("hist")
)(**presetting)
| StarcoderdataPython |
1743858 | from rx.core import Observer, ObservableBase, Disposable
from rx.subjects import Subject
from rx.disposables import AnonymousDisposable
from rx.concurrency import current_thread_scheduler
from rx.core.notification import OnCompleted, OnError, OnNext
class ControlledSubject(ObservableBase, Observer):
def __init__(self, enable_queue=True, scheduler=None):
super(ControlledSubject, self).__init__()
self.subject = Subject()
self.enable_queue = enable_queue
self.queue = [] if enable_queue else None
self.requested_count = 0
self.requested_disposable = Disposable.empty()
self.error = None
self.has_failed = False
self.has_completed = False
self.scheduler = scheduler or current_thread_scheduler
def _subscribe_core(self, observer, scheduler=None):
return self.subject.subscribe(observer, scheduler)
def on_completed(self):
self.has_completed = True
if not self.enable_queue or len(self.queue) == 0:
self.subject.on_completed()
self.dispose_current_request()
else:
self.queue.append(OnCompleted())
def on_error(self, error):
self.has_failed = True
self.error = error
if not self.enable_queue or len(self.queue) == 0:
self.subject.on_error(error)
self.dispose_current_request()
else:
self.queue.append(OnError(error))
def on_next(self, value):
if self.requested_count <= 0:
self.enable_queue and self.queue.append(OnNext(value))
else:
self.requested_count -= 1
if self.requested_count == 0:
self.dispose_current_request()
self.subject.on_next(value)
def _process_request(self, number_of_items):
if self.enable_queue:
while len(self.queue) > 0 and (number_of_items > 0 or self.queue[0].kind != 'N'):
first = self.queue.pop(0)
first.accept(self.subject)
if first.kind == 'N':
number_of_items -= 1
else:
self.dispose_current_request()
self.queue = []
return number_of_items
def request(self, number):
self.dispose_current_request()
def action(scheduler, i):
remaining = self._process_request(i)
stopped = self.has_completed and self.has_failed
if not stopped and remaining > 0:
self.requested_count = remaining
def dispose():
self.requested_count = 0
return AnonymousDisposable(dispose)
# Scheduled item is still in progress. Return a new
# disposable to allow the request to be interrupted
# via dispose.
self.requested_disposable = self.scheduler.schedule(action, state=number)
return self.requested_disposable
def dispose_current_request(self):
if self.requested_disposable:
self.requested_disposable.dispose()
self.requested_disposable = None
| StarcoderdataPython |
1669095 | <filename>courses/migrations/0004_auto_20190414_2045.py<gh_stars>0
# Generated by Django 2.2 on 2019-04-14 19:45
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('courses', '0003_auto_20190414_1910'),
]
operations = [
migrations.AlterField(
model_name='content',
name='module',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='contents', to='courses.Module'),
),
]
| StarcoderdataPython |
1743512 | <reponame>hritools/faceR<filename>faceR/tracking/dlib_tracking.py<gh_stars>0
"""
A stateful tracker of arbitrary objects
"""
import logging
import dlib
from multiprocessing import Pool
name_list = list()
d_trackers = list()
class ReusablePool:
"""
Manage reusable tracker objects for use by client.
"""
def __init__(self, size):
logging.debug('Creating ' + str(size) + ' trackers!')
self._reusables = [dlib.correlation_tracker() for _ in range(size)]
def acquire(self, frame, bbox):
if len(self._reusables) == 0:
tracker = dlib.correlation_tracker()
logging.debug('No spare trackers! Created new.')
else:
tracker = self._reusables.pop()
tracker.start_track(frame, dlib.rectangle(
bbox[0], bbox[1], bbox[2], bbox[3]))
return tracker
def release(self, reusable):
self._reusables.append(reusable)
tracker_pool = ReusablePool(0)
def reset_trackers():
"""
Resets trackers.
"""
global d_trackers
for tracker in d_trackers:
tracker_pool.release(tracker)
d_trackers = list()
def start_track(frame, face_pos):
"""
Starts new trackers.
:param frame: frame from which to start
:param face_pos: positions of faces at the image
"""
d_tracker = tracker_pool.acquire(frame, face_pos)
d_trackers.append(d_tracker)
def update(d_tracker, frame):
d_tracker.update(frame)
return d_tracker.get_position()
def track(frame_gen):
"""
Updates trackers.
:param frame_gen: generator feeding frames
"""
for frame in frame_gen:
pos_list = list()
for d_tracker in d_trackers:
d_tracker.update(frame)
pos = d_tracker.get_position()
pos_list.append((int(pos.left()), int(pos.top()),
int(pos.right()), int(pos.bottom())))
yield pos_list
logging.debug('exiting dlib tracking')
# TODO: finalize multiprocessed tracking
def track_multiprocess(frame_gen, conf):
"""
Updates trackers.
:param conf: configs
:param frame_gen: generator feeding frames
"""
pool = Pool(conf['processes'])
for frame in frame_gen:
pos_list = list()
# result = {
# d_tracker: pool.apply_async(lambda x: x.update(frame))
# for d_tracker in d_trackers
# }
results = [pool.apply_async(update, args=(d_tracker, frame)) for d_tracker in d_trackers]
for res in results:
pos = res.get()
# d_tracker.update(frame)
# pos = d_tracker.get_position()
pos_list.append((int(pos.left()), int(pos.top()),
int(pos.right()), int(pos.bottom())))
yield pos_list
logging.debug('exiting dlib tracking')
| StarcoderdataPython |
3349681 | import distutils.util
import cv2
import numpy as np
import paddle
from tqdm import tqdm
def print_arguments(args):
print("----------- Configuration Arguments -----------")
for arg, value in sorted(vars(args).items()):
print("%s: %s" % (arg, value))
print("------------------------------------------------")
def add_arguments(argname, type, default, help, argparser, **kwargs):
type = distutils.util.strtobool if type == bool else type
argparser.add_argument("--" + argname,
default=default,
type=type,
help=help + ' 默认: %(default)s.',
**kwargs)
# 获取lfw全部路径
def get_lfw_list(pair_list):
with open(pair_list, 'r') as fd:
pairs = fd.readlines()
data_list = []
for pair in pairs:
splits = pair.split()
if splits[0] not in data_list:
data_list.append(splits[0])
if splits[1] not in data_list:
data_list.append(splits[1])
return data_list
# 加载图片并预处理
def load_image(img_path):
image = cv2.imread(img_path)
if image is None:
return None
image = cv2.resize(image, (112, 112))
image_flip = np.fliplr(image)
image = np.array([image, image_flip], dtype='float32')
image = image.transpose((0, 3, 1, 2))
image = image.astype(np.float32, copy=False)
image = (image - 127.5) / 127.5
return image
# 获取图像特征
def get_features(model, test_list, batch_size=32):
images = None
features = None
for i, img_path in enumerate(tqdm(test_list)):
image = load_image(img_path)
assert image is not None, '{} 图片错误'.format(img_path)
if images is None:
images = image
else:
images = np.concatenate((images, image), axis=0)
if images.shape[0] % batch_size == 0 or i == len(test_list) - 1:
data = paddle.to_tensor(images, dtype='float32')
output = model(data)
output = output.numpy()
feature_1 = output[0::2]
feature_2 = output[1::2]
feature = np.hstack((feature_1, feature_2))
if features is None:
features = feature
else:
features = np.vstack((features, feature))
images = None
return features
# 将文件路径名跟模型输出的图像特征打包成字典
def get_feature_dict(test_list, features):
feature_dict = {}
for i, each in enumerate(test_list):
feature_dict[each] = features[i]
return feature_dict
# 计算对角余弦值
def cosin_metric(x1, x2):
return np.dot(x1, x2) / (np.linalg.norm(x1) * np.linalg.norm(x2))
# 根据对角余弦值计算准确率
def cal_accuracy(y_score, y_true):
y_score = np.asarray(y_score)
y_true = np.asarray(y_true)
best_accuracy = 0
best_threshold = 0
for i in range(len(y_score)):
threshold = y_score[i]
y_test = (y_score >= threshold)
acc = np.mean((y_test == y_true).astype(int))
if acc > best_accuracy:
best_accuracy = acc
best_threshold = threshold
return best_accuracy, best_threshold
# 计算lfw每一对的相似度
def test_performance(feature_dict, lfw_data_list):
with open(lfw_data_list, 'r') as fd:
pairs = fd.readlines()
sims = []
labels = []
for pair in pairs:
splits = pair.split()
feature_1 = feature_dict[splits[0]]
feature_2 = feature_dict[splits[1]]
label = int(splits[2])
sim = cosin_metric(feature_1, feature_2)
sims.append(sim)
labels.append(label)
accuracy, threshold = cal_accuracy(sims, labels)
return accuracy, threshold
| StarcoderdataPython |
72882 | from textbox.data.dataloader.abstract_dataloader import AbstractDataLoader
from textbox.data.dataloader.single_sent_dataloader import SingleSentenceDataLoader
from textbox.data.dataloader.paired_sent_dataloader import PairedSentenceDataLoader
from textbox.data.dataloader.attr_sent_dataloader import AttributedSentenceDataLoader
from textbox.data.dataloader.kg_sent_dataloader import KGSentenceDataLoader
from textbox.data.dataloader.wikibio_sent_dataloader import WikiBioSentenceDataLoader
from textbox.data.dataloader.rotowire_sent_dataloader import RotoWireSentenceDataLoader | StarcoderdataPython |
4841815 | <gh_stars>1-10
City = [
{
"热门城市": [
{
"name": "北京",
"key": "热门"
},
{
"name": "上海",
"key": "热门"
},
{
"name": "广州",
"key": "热门"
},
{
"name": "深圳",
"key": "热门"
},
{
"name": "成都",
"key": "热门"
},
{
"name": "重庆",
"key": "热门"
},
{
"name": "天津",
"key": "热门"
},
{
"name": "杭州",
"key": "热门"
},
{
"name": "南京",
"key": "热门"
},
{
"name": "苏州",
"key": "热门"
},
{
"name": "武汉",
"key": "热门"
},
{
"name": "西安",
"key": "热门"
}
],
"A": [
{
"name": "阿坝",
"key": "A"
},
{
"name": "阿拉善",
"key": "A"
},
{
"name": "阿里",
"key": "A"
},
{
"name": "安康",
"key": "A"
},
{
"name": "安庆",
"key": "A"
},
{
"name": "鞍山",
"key": "A"
}
,
{
"name": "安顺",
"key": "A"
}
,
{
"name": "安阳",
"key": "A"
}
,
{
"name": "澳门",
"key": "A"
}
],
"B": [
{
"name": "北京",
"key": "B"
},
{
"name": "白银",
"key": "B"
},
{
"name": "保定",
"key": "B"
},
{
"name": "宝鸡",
"key": "B"
},
{
"name": "保山",
"key": "B"
},
{
"name": "包头",
"key": "B"
},
{
"name": "巴中",
"key": "B"
}
,
{
"name": "北海",
"key": "B"
}
,
{
"name": "蚌埠",
"key": "B"
}
,
{
"name": "本溪",
"key": "B"
}
,
{
"name": "毕节",
"key": "B"
}
,
{
"name": "滨州",
"key": "B"
}
,
{
"name": "百色",
"key": "B"
}
,
{
"name": "亳州",
"key": "B"
}
],
"C": [
{
"name": "重庆",
"key": "C"
},
{
"name": "成都",
"key": "C"
},
{
"name": "长沙",
"key": "C"
},
{
"name": "长春",
"key": "C"
},
{
"name": "沧州",
"key": "C"
},
{
"name": "常德",
"key": "C"
},
{
"name": "昌都",
"key": "C"
}
,
{
"name": "长治",
"key": "C"
}
,
{
"name": "常州",
"key": "C"
}
,
{
"name": "巢湖",
"key": "C"
}
,
{
"name": "潮州",
"key": "C"
}
,
{
"name": "承德",
"key": "C"
}
,
{
"name": "郴州",
"key": "C"
}
,
{
"name": "赤峰",
"key": "C"
}
,
{
"name": "池州",
"key": "C"
}
,
{
"name": "崇左",
"key": "C"
}
,
{
"name": "楚雄",
"key": "C"
}
,
{
"name": "滁州",
"key": "C"
}
,
{
"name": "朝阳",
"key": "C"
}
],
"D": [
{
"name": "大连",
"key": "D"
},
{
"name": "东莞",
"key": "D"
},
{
"name": "大理",
"key": "D"
},
{
"name": "丹东",
"key": "D"
},
{
"name": "大庆",
"key": "D"
},
{
"name": "大同",
"key": "D"
},
{
"name": "大兴安岭",
"key": "D"
}
,
{
"name": "德宏",
"key": "D"
}
,
{
"name": "德阳",
"key": "D"
}
,
{
"name": "德州",
"key": "D"
}
,
{
"name": "定西",
"key": "D"
}
,
{
"name": "迪庆",
"key": "D"
}
,
{
"name": "东营",
"key": "D"
}
],
"E": [
{
"name": "鄂尔多斯",
"key": "E"
}
,
{
"name": "恩施",
"key": "E"
}
,
{
"name": "鄂州",
"key": "E"
}
],
"F": [
{
"name": "福州",
"key": "F"
}
,
{
"name": "防城港",
"key": "F"
}
,
{
"name": "佛山",
"key": "F"
}
,
{
"name": "抚顺",
"key": "F"
}
,
{
"name": "抚州",
"key": "F"
}
,
{
"name": "阜新",
"key": "F"
}
,
{
"name": "阜阳",
"key": "F"
}
]
,
"G": [
{
"name": "广州",
"key": "G"
},
{
"name": "赣州",
"key": "G"
},
{
"name": "桂林",
"key": "G"
},
{
"name": "贵阳",
"key": "G"
},
{
"name": "甘南",
"key": "G"
},
{
"name": "甘孜",
"key": "G"
},
{
"name": "广安",
"key": "G"
}
,
{
"name": "广元",
"key": "G"
}
,
{
"name": "果洛",
"key": "G"
}
,
{
"name": "贵港",
"key": "G"
}
],
"H": [
{
"name": "杭州",
"key": "H"
},
{
"name": "哈尔滨",
"key": "H"
},
{
"name": "合肥",
"key": "H"
},
{
"name": "海口",
"key": "H"
},
{
"name": "海东",
"key": "H"
},
{
"name": "海北",
"key": "H"
},
{
"name": "海南",
"key": "H"
}
,
{
"name": "海西",
"key": "H"
}
,
{
"name": "邯郸",
"key": "H"
}
,
{
"name": "汉中",
"key": "H"
}
,
{
"name": "鹤壁",
"key": "H"
}
,
{
"name": "河池",
"key": "H"
}
,
{
"name": "鹤岗",
"key": "H"
}
,
{
"name": "黑河",
"key": "H"
}
,
{
"name": "衡水",
"key": "H"
}
,
{
"name": "衡阳",
"key": "H"
}
,
{
"name": "河源",
"key": "H"
}
,
{
"name": "贺州",
"key": "H"
}
,
{
"name": "红河",
"key": "H"
}
,
{
"name": "淮安",
"key": "H"
}
,
{
"name": "淮北",
"key": "H"
}
,
{
"name": "怀化",
"key": "H"
}
,
{
"name": "淮南",
"key": "H"
}
,
{
"name": "黄冈",
"key": "H"
}
,
{
"name": "黄南",
"key": "H"
}
,
{
"name": "黄山",
"key": "H"
},
{
"name": "黄石",
"key": "H"
},
{
"name": "惠州",
"key": "H"
},
{
"name": "葫芦岛",
"key": "H"
},
{
"name": "呼伦贝尔",
"key": "H"
},
{
"name": "湖州",
"key": "H"
}
,
{
"name": "菏泽",
"key": "H"
}
],
"J": [
{
"name": "济南",
"key": "J"
},
{
"name": "佳木斯",
"key": "J"
},
{
"name": "吉安",
"key": "J"
},
{
"name": "江门",
"key": "J"
},
{
"name": "焦作",
"key": "J"
},
{
"name": "嘉兴",
"key": "J"
},
{
"name": "嘉峪关",
"key": "J"
}
,
{
"name": "揭阳",
"key": "J"
}
,
{
"name": "吉林",
"key": "J"
}
,
{
"name": "金昌",
"key": "J"
}
,
{
"name": "晋城",
"key": "J"
}
,
{
"name": "景德镇",
"key": "J"
}
,
{
"name": "荆门",
"key": "J"
}
,
{
"name": "荆州",
"key": "J"
}
,
{
"name": "金华",
"key": "J"
}
,
{
"name": "济宁",
"key": "J"
}
,
{
"name": "晋中",
"key": "J"
}
,
{
"name": "锦州",
"key": "J"
}
,
{
"name": "九江",
"key": "J"
}
,
{
"name": "酒泉",
"key": "J"
}
]
,
"K": [
{
"name": "昆明",
"key": "K"
}
,
{
"name": "开封",
"key": "K"
}
]
,
"L": [
{
"name": "兰州",
"key": "L"
},
{
"name": "拉萨",
"key": "L"
},
{
"name": "来宾",
"key": "L"
},
{
"name": "莱芜",
"key": "L"
},
{
"name": "廊坊",
"key": "L"
},
{
"name": "乐山",
"key": "L"
},
{
"name": "凉山",
"key": "L"
}
,
{
"name": "连云港",
"key": "L"
}
,
{
"name": "聊城",
"key": "L"
}
,
{
"name": "辽阳",
"key": "L"
}
,
{
"name": "辽源",
"key": "L"
}
,
{
"name": "丽江",
"key": "L"
}
,
{
"name": "临沧",
"key": "L"
}
,
{
"name": "临汾",
"key": "L"
}
,
{
"name": "临夏",
"key": "L"
}
,
{
"name": "临沂",
"key": "L"
}
,
{
"name": "林芝",
"key": "L"
}
,
{
"name": "丽水",
"key": "L"
}
,
{
"name": "六安",
"key": "L"
}
,
{
"name": "六盘水",
"key": "L"
}
,
{
"name": "柳州",
"key": "L"
}
,
{
"name": "陇南",
"key": "L"
}
,
{
"name": "龙岩",
"key": "L"
}
,
{
"name": "娄底",
"key": "L"
}
,
{
"name": "漯河",
"key": "L"
}
,
{
"name": "洛阳",
"key": "L"
},
{
"name": "泸州",
"key": "L"
},
{
"name": "吕梁",
"key": "L"
}
],
"M": [
{
"name": "马鞍山",
"key": "M"
}
,
{
"name": "茂名",
"key": "M"
}
,
{
"name": "眉山",
"key": "M"
}
,
{
"name": "梅州",
"key": "M"
}
,
{
"name": "绵阳",
"key": "M"
}
,
{
"name": "牡丹江",
"key": "M"
}
],
"N": [
{
"name": "南京",
"key": "N"
},
{
"name": "南昌",
"key": "N"
},
{
"name": "南宁",
"key": "N"
},
{
"name": "南充",
"key": "N"
},
{
"name": "南平",
"key": "N"
},
{
"name": "南通",
"key": "N"
},
{
"name": "南阳",
"key": "N"
}
,
{
"name": "那曲",
"key": "N"
}
,
{
"name": "内江",
"key": "N"
}
,
{
"name": "宁德",
"key": "N"
}
,
{
"name": "怒江",
"key": "N"
}
],
"P": [
{
"name": "盘锦",
"key": "P"
}
,
{
"name": "攀枝花",
"key": "P"
}
,
{
"name": "平顶山",
"key": "P"
}
,
{
"name": "平凉",
"key": "P"
}
,
{
"name": "萍乡",
"key": "P"
}
,
{
"name": "莆田",
"key": "P"
}
,
{
"name": "濮阳",
"key": "P"
}
],
"Q": [
{
"name": "青岛",
"key": "Q"
},
{
"name": "黔东南",
"key": "Q"
},
{
"name": "黔南",
"key": "Q"
},
{
"name": "黔西南",
"key": "Q"
},
{
"name": "庆阳",
"key": "Q"
},
{
"name": "清远",
"key": "Q"
},
{
"name": "秦皇岛",
"key": "Q"
}
,
{
"name": "钦州",
"key": "Q"
}
,
{
"name": "齐齐哈尔",
"key": "Q"
}
,
{
"name": "泉州",
"key": "Q"
}
,
{
"name": "曲靖",
"key": "Q"
}
,
{
"name": "衢州",
"key": "Q"
}
],
"R": [
{
"name": "日喀则",
"key": "R"
},
{
"name": "日照",
"key": "R"
}
]
,
"S": [
{
"name": "上海",
"key": "S"
},
{
"name": "深圳",
"key": "S"
},
{
"name": "苏州",
"key": "S"
},
{
"name": "沈阳",
"key": "S"
},
{
"name": "石家庄",
"key": "S"
},
{
"name": "三门峡",
"key": "S"
},
{
"name": "三明",
"key": "S"
}
,
{
"name": "三亚",
"key": "S"
}
,
{
"name": "商洛",
"key": "S"
}
,
{
"name": "商丘",
"key": "S"
}
,
{
"name": "上饶",
"key": "S"
}
,
{
"name": "山南",
"key": "S"
}
,
{
"name": "汕头",
"key": "S"
}
,
{
"name": "汕尾",
"key": "S"
}
,
{
"name": "韶关",
"key": "S"
}
,
{
"name": "绍兴",
"key": "S"
}
,
{
"name": "邵阳",
"key": "S"
}
,
{
"name": "十堰",
"key": "S"
}
,
{
"name": "朔州",
"key": "S"
}
,
{
"name": "四平",
"key": "S"
}
,
{
"name": "绥化",
"key": "S"
}
,
{
"name": "遂宁",
"key": "S"
}
,
{
"name": "随州",
"key": "S"
}
,
{
"name": "娄底",
"key": "S"
}
,
{
"name": "宿迁",
"key": "S"
}
,
{
"name": "宿州",
"key": "S"
}
],
"T": [
{
"name": "天津",
"key": "T"
},
{
"name": "太原",
"key": "T"
},
{
"name": "泰安",
"key": "T"
},
{
"name": "泰州",
"key": "T"
},
{
"name": "唐山",
"key": "T"
},
{
"name": "天水",
"key": "T"
},
{
"name": "铁岭",
"key": "T"
}
,
{
"name": "铜川",
"key": "T"
}
,
{
"name": "通化",
"key": "T"
}
,
{
"name": "通辽",
"key": "T"
}
,
{
"name": "铜陵",
"key": "T"
}
,
{
"name": "铜仁",
"key": "T"
}
,
{
"name": "台湾",
"key": "T"
}
]
,
"W": [
{
"name": "武汉",
"key": "W"
},
{
"name": "乌鲁木齐",
"key": "W"
},
{
"name": "无锡",
"key": "W"
},
{
"name": "威海",
"key": "W"
},
{
"name": "潍坊",
"key": "W"
},
{
"name": "文山",
"key": "W"
},
{
"name": "温州",
"key": "W"
}
,
{
"name": "乌海",
"key": "W"
}
,
{
"name": "芜湖",
"key": "W"
}
,
{
"name": "乌兰察布",
"key": "W"
}
,
{
"name": "武威",
"key": "W"
}
,
{
"name": "梧州",
"key": "W"
}
],
"X": [
{
"name": "厦门",
"key": "X"
},
{
"name": "西安",
"key": "X"
},
{
"name": "西宁",
"key": "X"
},
{
"name": "襄樊",
"key": "X"
},
{
"name": "湘潭",
"key": "X"
},
{
"name": "湘西",
"key": "X"
},
{
"name": "咸宁",
"key": "X"
}
,
{
"name": "咸阳",
"key": "X"
}
,
{
"name": "孝感",
"key": "X"
}
,
{
"name": "邢台",
"key": "X"
}
,
{
"name": "新乡",
"key": "X"
}
,
{
"name": "信阳",
"key": "X"
}
,
{
"name": "新余",
"key": "X"
}
,
{
"name": "忻州",
"key": "X"
}
,
{
"name": "西双版纳",
"key": "X"
}
,
{
"name": "宣城",
"key": "X"
}
,
{
"name": "许昌",
"key": "X"
}
,
{
"name": "徐州",
"key": "X"
}
,
{
"name": "香港",
"key": "X"
}
,
{
"name": "锡林郭勒",
"key": "X"
}
,
{
"name": "兴安",
"key": "X"
}
]
,
"Y": [
{
"name": "银川",
"key": "Y"
},
{
"name": "雅安",
"key": "Y"
},
{
"name": "延安",
"key": "Y"
},
{
"name": "延边",
"key": "Y"
},
{
"name": "盐城",
"key": "Y"
},
{
"name": "阳江",
"key": "Y"
},
{
"name": "阳泉",
"key": "Y"
}
,
{
"name": "扬州",
"key": "Y"
}
,
{
"name": "烟台",
"key": "Y"
}
,
{
"name": "宜宾",
"key": "Y"
}
,
{
"name": "宜昌",
"key": "Y"
}
,
{
"name": "宜春",
"key": "Y"
}
,
{
"name": "营口",
"key": "Y"
}
,
{
"name": "益阳",
"key": "Y"
}
,
{
"name": "永州",
"key": "Y"
}
,
{
"name": "岳阳",
"key": "Y"
}
,
{
"name": "榆林",
"key": "Y"
}
,
{
"name": "运城",
"key": "Y"
}
,
{
"name": "云浮",
"key": "Y"
}
,
{
"name": "玉树",
"key": "Y"
}
,
{
"name": "玉溪",
"key": "Y"
}
,
{
"name": "玉林",
"key": "Y"
}
],
"Z": [
{
"name": "杂多县",
"key": "Z"
},
{
"name": "赞皇县",
"key": "Z"
},
{
"name": "枣强县",
"key": "Z"
},
{
"name": "枣阳市",
"key": "Z"
},
{
"name": "枣庄",
"key": "Z"
},
{
"name": "泽库县",
"key": "Z"
},
{
"name": "增城",
"key": "Z"
}
,
{
"name": "曾都区",
"key": "Z"
}
,
{
"name": "泽普县",
"key": "Z"
}
,
{
"name": "泽州县",
"key": "Z"
}
,
{
"name": "札达县",
"key": "Z"
}
,
{
"name": "扎赉特旗",
"key": "Z"
}
,
{
"name": "扎兰屯",
"key": "Z"
}
,
{
"name": "扎鲁特旗",
"key": "Z"
}
,
{
"name": "扎囊县",
"key": "Z"
}
,
{
"name": "张北县",
"key": "Z"
}
,
{
"name": "张店区",
"key": "Z"
}
,
{
"name": "章贡区",
"key": "Z"
}
,
{
"name": "张家港",
"key": "Z"
}
,
{
"name": "张家界",
"key": "Z"
}
,
{
"name": "张家口",
"key": "Z"
}
,
{
"name": "漳平",
"key": "Z"
}
,
{
"name": "漳浦县",
"key": "Z"
}
,
{
"name": "章丘",
"key": "Z"
}
,
{
"name": "樟树",
"key": "Z"
}
,
{
"name": "张湾区",
"key": "Z"
},
{
"name": "彰武县",
"key": "Z"
},
{
"name": "漳县",
"key": "Z"
},
{
"name": "张掖",
"key": "Z"
},
{
"name": "漳州",
"key": "Z"
},
{
"name": "长子县",
"key": "Z"
}
,
{
"name": "湛河区",
"key": "Z"
}
,
{
"name": "湛江",
"key": "Z"
}
,
{
"name": "站前区",
"key": "Z"
}
,
{
"name": "沾益县",
"key": "Z"
}
,
{
"name": "诏安县",
"key": "Z"
},
{
"name": "召陵区",
"key": "Z"
},
{
"name": "昭平县",
"key": "Z"
},
{
"name": "肇庆",
"key": "Z"
},
{
"name": "昭通",
"key": "Z"
},
{
"name": "赵县",
"key": "Z"
}
]
}
] | StarcoderdataPython |
47755 | from pathlib import Path
from shutil import copyfile
import logging
def move_files(dir_from: Path, dir_to: Path):
logging.info(f"Moving files from '{dir_from}' to '{dir_to}'")
p = dir_from.glob("**/*")
input_paths = [x for x in p if x.is_file()]
for input_path in input_paths:
filename = input_path.name
output_path = dir_to / filename
copyfile(input_path, output_path)
logging.info(f"Moved file: {filename}")
| StarcoderdataPython |
191032 | """add sol associated wallet
Revision ID: f64a484f1496
Revises: <PASSWORD>
Create Date: 2021-07-21 15:58:05.108372
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "f64a484f1496"
down_revision = "<PASSWORD>"
branch_labels = None
depends_on = None
def upgrade():
op.add_column(
"user_balances",
sa.Column(
"associated_sol_wallets_balance",
sa.String(),
server_default="0",
nullable=False,
),
)
wallet_chain = postgresql.ENUM("eth", "sol", name="wallet_chain")
wallet_chain.create(op.get_bind())
op.add_column(
"associated_wallets",
sa.Column(
"chain",
sa.Enum("eth", "sol", name="wallet_chain"),
server_default="eth",
nullable=False,
),
)
op.alter_column("associated_wallets", "chain", server_default=None)
def downgrade():
op.drop_column("user_balances", "associated_sol_wallets_balance")
op.drop_column("associated_wallets", "chain")
wallet_chain = postgresql.ENUM("eth", "sol", name="wallet_chain")
wallet_chain.drop(op.get_bind())
| StarcoderdataPython |
3397564 | import numba
import numpy as np
from math import sqrt, sin, cos, acos, atan2
@numba.njit
def residual(t, yin, yp, R, L1, L2, H, Ct, Cdv, Cd, St, Sv, rho, Ma, Mv, Iv, theta):
"""Calculates the residual of the system of equations G(t, y, yp) == 0."""
x,y,p,x1,y1,p1 = yin
x2,y2,p2 = yp[3:6]
t1,dt1,ddt1,t2,dt2,ddt2 = theta(t)
# Test direction of Vn1
r1 = 0.5*L1
Vn1 = (-R*sin(p)*p1 - r1*(p1 + dt1)*sin(p + t1) + x1)*sin(p + t1) - (R*cos(p)*p1 + r1*(p1 + dt1)*cos(p + t1) + y1)*cos(p + t1)
s1 = 1.0 if Vn1 > 0 else -1.0
# Test direction of Vn2
r2 = 0.5*L2
Vn2 = (-L1*(p1 + dt1)*sin(p + t1) - R*sin(p)*p1 - r2*(p1 + dt1 + dt2)*sin(p + t1 + t2) + x1)*sin(p + t1 + t2) - (L1*(p1 + dt1)*cos(p + t1) + R*cos(p)*p1 + r2*(p1 + dt1 + dt2)*cos(p + t1 + t2) + y1)*cos(p + t1 + t2)
s2 = 1.0 if Vn2 > 0 else -1.0
s3 = 1.0 if p1 > 0 else -1.0
Z1 = 0.5*rho*Cd*H
Z2 = 0.5*rho*Cdv*Sv
Z3 = 0.5*rho*Ct*St
# Now calculate the residuals
f = np.empty(6)
f[0] = yp[0] - x1;
f[1] = yp[1] - y1;
f[2] = yp[2] - p1;
# First calculate the common sub-expressions
term0 = x1**2
term1 = y1**2
term2 = Z2*sqrt(term0 + term1)
term3 = L1**2
term4 = p + t1
term5 = sin(term4)
term6 = term5**3
term7 = (1/2)*Ma
term8 = term6*term7
term9 = Ma*dt1
term10 = cos(term4)
term11 = term10**3
term12 = p1*term11
term13 = dt1**2
term14 = term11*term7
term15 = p1**2
term16 = term10**2
term17 = term16*term5
term18 = term17*term7
term19 = p1*term9
term20 = term5**2
term21 = term10*term20
term22 = term13*term7
term23 = term15*term7
term24 = term3*(ddt1*term18 + ddt1*term8 + p2*term18 + p2*term8 + term12*term9 + term13*term14 + term14*term15 + term19*term21 + term21*term22 + term21*term23)
term25 = L1**3
term26 = Z1*s1
term27 = term26*term5**5
term28 = (2/3)*dt1
term29 = p1*term28
term30 = (1/3)*term13
term31 = (1/3)*term15
term32 = term10**4*term26
term33 = term32*term5
term34 = dt1*term16
term35 = term26*term6
term36 = (4/3)*p1
term37 = (2/3)*term13
term38 = term16*term35
term39 = (2/3)*term15
term40 = term25*(-term27*term29 - term27*term30 - term27*term31 - term29*term33 - term30*term33 - term31*term33 - term34*term35*term36 - term37*term38 - term38*term39)
term41 = Z1*term0
term42 = s1*term41
term43 = sin(p)
term44 = R*term43
term45 = p1*x1
term46 = 2*term45
term47 = term44*term46
term48 = term21*term26
term49 = 2*x1
term50 = term49*y1
term51 = Z1*term1
term52 = s1*term51
term53 = cos(p)
term54 = R*term53
term55 = term46*term54
term56 = p1*y1
term57 = 2*term56
term58 = term20*term44
term59 = term26*term58
term60 = term10*term59
term61 = term17*term26
term62 = 2*term54
term63 = term56*term62
term64 = R**2*term15
term65 = term43**2*term64
term66 = term53**2*term64
term67 = 2*term43*term53*term64
term68 = L1*(-term17*term52 + term35*term47 - term35*term65 - term42*term6 + term48*term50 + term48*term55 - term48*term67 - term57*term60 - term61*term63 - term61*term66)
term69 = Ma*x2
term70 = Ma*y1
term71 = p1*term70
term72 = dt1*term70
term73 = Ma*y2
term74 = term10*term5
term75 = Ma*p2
term76 = term9*x1
term77 = 2*term74
term78 = Ma*p1
term79 = term78*x1
term80 = term34*term78
term81 = Ma*term15
term82 = term16*term54
term83 = term19*term54
term84 = term54*term75
term85 = term44*term81
term86 = term19*term44
term87 = L1*(term16*term71 - term20*term69 - term20*term71 - term20*term72 - term20*term83 + term34*term70 + term54*term80 + term58*term75 + term73*term74 + term74*term84 + term74*term85 - term76*term77 - term77*term79 + term77*term86 + term81*term82)
term88 = L2**2
term89 = t2 + term4
term90 = sin(term89)
term91 = term90**3
term92 = term7*term91
term93 = cos(term89)
term94 = term93**3
term95 = term9*term94
term96 = dt2*term94
term97 = term7*term94
term98 = dt2**2
term99 = term93**2
term100 = term90*term99
term101 = term100*term7
term102 = term90**2
term103 = term102*term93
term104 = dt2*term103
term105 = term103*term7
term106 = term88*(ddt1*term101 + ddt1*term92 + ddt2*term101 + ddt2*term92 + dt2*term95 + p1*term95 + p2*term101 + p2*term92 + term103*term19 + term103*term22 + term103*term23 + term104*term78 + term104*term9 + term105*term98 + term13*term97 + term15*term97 + term78*term96 + term97*term98)
term107 = dt1*x1
term108 = term26*term5**4
term109 = p1*term44
term110 = dt1*term109
term111 = term15*term44
term112 = dt1*y1
term113 = term26*term5
term114 = term11*term113
term115 = term10*term35
term116 = term113*term12
term117 = term20*term26
term118 = term117*term34
term119 = term117*term16
term120 = dt1*term116
term121 = p1*term54
term122 = dt1*term121
term123 = term15*term54
term124 = p1*term34
term125 = term3*(term107*term108 - term108*term110 - term108*term111 + term108*term45 - term112*term114 - term112*term115 - term114*term123 - term115*term122 - term115*term123 - term115*term56 - term116*y1 + term118*x1 + term119*term45 - term120*term54 - term124*term59 - term15*term16*term59)
term126 = L2**3
term127 = Z1*s2
term128 = term127*term90**5
term129 = dt2*term128
term130 = (2/3)*p1
term131 = (1/3)*term98
term132 = term127*term93**4
term133 = term132*term90
term134 = dt2*term133
term135 = dt2*term91
term136 = term127*term99
term137 = term135*term136
term138 = (4/3)*dt1
term139 = term127*term91
term140 = term139*term99
term141 = dt1*term36
term142 = (2/3)*term98
term143 = term126*(-term128*term131 - term128*term29 - term128*term30 - term128*term31 - term129*term130 - term129*term28 - term130*term134 - term131*term133 - term133*term29 - term133*term30 - term133*term31 - term134*term28 - term137*term138 - term137*term36 - term140*term141 - term140*term142 - term140*term37 - term140*term39)
term144 = dt2*term70
term145 = term90*term93
term146 = term102*term44
term147 = term54*term99
term148 = dt2*term78
term149 = L1*term5
term150 = term102*term149
term151 = Ma*ddt1
term152 = 2*term145
term153 = Ma*x1
term154 = dt2*term153
term155 = term148*term54
term156 = L1*term10
term157 = term156*term99
term158 = dt2*term157
term159 = Ma*term13
term160 = term102*term156
term161 = dt2*term9
term162 = 2*term19
term163 = term145*term156
term164 = term148*term152
term165 = term145*term149
term166 = term149*term152
term167 = L2*(-term102*term144 - term102*term155 - term102*term69 - term102*term71 - term102*term72 - term102*term83 + term144*term99 + term145*term73 + term145*term84 + term145*term85 + term146*term75 + term147*term148 + term147*term19 + term147*term81 - term148*term160 + term148*term166 + term150*term151 + term150*term75 + term151*term163 - term152*term154 - term152*term76 - term152*term79 + term152*term86 + term157*term159 + term157*term162 + term157*term81 + term158*term78 + term158*term9 + term159*term165 - term160*term161 + term161*term166 + term163*term75 + term164*term44 + term165*term81 + term166*term19 + term71*term99 + term72*term99)
term168 = s2*term41
term169 = term127*y1
term170 = term169*term49
term171 = term139*term149
term172 = 2*term171
term173 = s2*term51
term174 = term103*term127
term175 = term127*term93
term176 = term146*term175
term177 = term100*term127
term178 = term139*term3
term179 = dt1*p1
term180 = 2*term179
term181 = term180*term20
term182 = term13*term20
term183 = term15*term20
term184 = term107*term127
term185 = 2*term184
term186 = term156*term185
term187 = term150*term175
term188 = 2*term187
term189 = 2*term177
term190 = term112*term189
term191 = term156*term177
term192 = term156*term180
term193 = term156*term189
term194 = term124*term3
term195 = term15*term156
term196 = term123*term189
term197 = term16*term3
term198 = term177*term197
term199 = term174*term3
term200 = 4*term179*term74
term201 = term199*term77
term202 = L2*(-term100*term173 + term103*term170 + term103*term186 + term107*term172 - term110*term172 - term111*term172 - term112*term188 - term122*term188 - term122*term193 - term123*term188 - term13*term198 - term13*term201 + term139*term47 - term139*term65 - term15*term198 - term15*term201 + term156*term174*term46 - term156*term190 - term156*term196 - term168*term91 + term171*term46 + term174*term55 - term174*term67 - term176*term192 - 2*term176*term195 - term176*term57 - term177*term63 - term177*term66 - term178*term181 - term178*term182 - term178*term183 - term187*term57 - term189*term194 - term191*term57 - term199*term200)
term203 = term127*term90**4
term204 = dt2*x1
term205 = term127*term90
term206 = term205*term94
term207 = term139*term93
term208 = term205*term96
term209 = term135*term93
term210 = term149*term203
term211 = dt2*term210
term212 = term102*term99
term213 = term127*term212
term214 = term127*term209
term215 = dt1*term156
term216 = p1*term156
term217 = term136*term146
term218 = dt2*p1
term219 = term13*term156
term220 = term136*term150
term221 = dt2*term220
term222 = term88*(-dt1*term211 - dt1*term221 - dt2*term109*term203 - p1*term211 - p1*term221 + term107*term203 - term110*term203 - term111*term203 - term112*term206 - term112*term207 - term121*term208 - term121*term214 - term122*term206 - term122*term207 - term123*term206 - term123*term207 - term13*term210 - term13*term220 - term15*term210 - term15*term217 - term15*term220 - term169*term209 - term179*term217 - term180*term210 - term180*term220 + term184*term212 - term192*term206 - term192*term207 - term195*term206 - term195*term207 + term203*term204 + term203*term45 + term204*term213 - term206*term219 - term206*term56 - term207*term219 - term207*term56 - term208*term215 - term208*term216 - term208*y1 + term213*term45 - term214*term215 - term214*term216 - term217*term218)
term223 = term21*term7
term224 = term3*(-ddt1*term14 - ddt1*term223 - p2*term14 - p2*term223 + term13*term18 + term13*term8 + term15*term18 + term15*term8 + term19*term6 + term5*term80)
term225 = term10**5*term26
term226 = term10*term108
term227 = term11*term26
term228 = (2/3)*term227
term229 = term25*(term117*term12*term138 + term182*term228 + term183*term228 + term225*term29 + term225*term30 + term225*term31 + term226*term29 + term226*term30 + term226*term31)
term230 = term44*term57
term231 = L1*(term11*term52 + term12*term26*term62*y1 + term21*term42 + term227*term66 + term230*term61 - term46*term60 + term48*term65 - term50*term61 - term55*term61 + term61*term67)
term232 = term44*term75
term233 = term54*term81
term234 = L1*(term153*term34 - term16*term73 + term16*term79 + term19*term58 - term20*term76 - term20*term79 - term232*term74 + term233*term74 - term44*term80 + term58*term81 + term69*term74 + term71*term77 + term72*term77 - term75*term82 + term77*term83)
term235 = dt2*term100
term236 = term88*(-ddt1*term105 - ddt1*term97 - ddt2*term105 - ddt2*term97 - p2*term105 - p2*term97 + term100*term19 + term100*term22 + term100*term23 + term101*term98 + term13*term92 + term135*term78 + term135*term9 + term15*term92 + term19*term91 + term235*term78 + term235*term9 + term92*term98)
term237 = term3*(-term107*term114 - term107*term115 + term110*term115 + term111*term114 + term111*term115 + term112*term32 - term115*term45 - term116*x1 + term118*term121 + term118*y1 + term119*term56 + term120*term44 + term122*term32 + term123*term32 + term183*term26*term82 + term32*term56)
term238 = term127*term93**5
term239 = dt2*term238
term240 = term203*term93
term241 = dt2*term240
term242 = term102*term127
term243 = term242*term96
term244 = term127*term94
term245 = term102*term244
term246 = term126*(term130*term239 + term130*term241 + term131*term238 + term131*term240 + term138*term243 + term141*term245 + term142*term245 + term238*term29 + term238*term30 + term238*term31 + term239*term28 + term240*term29 + term240*term30 + term240*term31 + term241*term28 + term243*term36 + term245*term37 + term245*term39)
term247 = term44*term99
term248 = term149*term99
term249 = term152*term156
term250 = L2*(-term102*term154 - term102*term76 - term102*term79 + term144*term152 - term145*term232 + term145*term233 + term145*term69 + term146*term148 + term146*term19 + term146*term81 - term147*term75 + term148*term150 - term148*term247 - term148*term248 + term150*term159 + term150*term161 + term150*term162 + term150*term81 - term151*term157 - term151*term165 + term152*term155 + term152*term71 + term152*term72 + term152*term83 + term154*term99 + term156*term164 - term157*term75 + term159*term163 - term161*term248 + term161*term249 + term163*term81 - term165*term75 - term19*term247 + term19*term249 - term73*term99 + term76*term99 + term79*term99)
term251 = 2*term244
term252 = term156*term251
term253 = term197*term244
term254 = term177*term3
term255 = term254*term77
term256 = L2*(-term100*term170 - term100*term186 + term103*term168 + term110*term188 + term110*term193 + term111*term188 + term111*term193 + term112*term252 + term122*term149*term189 + term122*term252 + term123*term252 + term13*term253 + term13*term255 + term149*term177*term57 + term149*term190 + term149*term196 + term15*term253 + term15*term255 - term150*term185*term93 + term173*term94 + term174*term65 - term176*term46 + term177*term230 - term177*term55 + term177*term67 + term181*term199 + term182*term199 + term183*term199 - term187*term46 - term191*term46 + term194*term251 + term200*term254 + term244*term63 + term244*term66 + term252*term56)
term257 = dt2*y1
term258 = term132*term156
term259 = dt2*term258
term260 = dt1*term149
term261 = p1*term149
term262 = term147*term242
term263 = term13*term149
term264 = term149*term15
term265 = term149*term180
term266 = term158*term242
term267 = term157*term242
term268 = term88*(dt1*term259 + dt1*term266 + dt2*term121*term132 + p1*term259 + p1*term266 - term107*term207 + term109*term208 + term109*term214 + term110*term206 + term110*term207 + term111*term206 + term111*term207 + term112*term132 + term112*term213 + term122*term132 + term123*term132 + term13*term258 + term13*term267 + term132*term257 + term132*term56 + term15*term258 + term15*term262 + term15*term267 + term179*term262 + term180*term258 + term180*term267 - term184*term90*term94 + term206*term263 + term206*term264 + term206*term265 - term206*term45 + term207*term263 + term207*term264 + term207*term265 - term207*term45 + term208*term260 + term208*term261 - term208*x1 + term213*term257 + term213*term56 + term214*term260 + term214*term261 - term214*x1 + term218*term262)
term269 = (3/4)*L2
term270 = term149 + term269*term90 + term44
term271 = sqrt(term270**2 + (term156 + term269*term93 + term54)**2)
# Now the remaining elements of `f`
f[3] = Mv*x2 - term106 - term125 - term143 - term167 + term2*x1 - term202 - term222 - term24 - term40 - term68 - term87
f[4] = Mv*y2 + term2*y1 - term224 - term229 - term231 - term234 - term236 - term237 - term246 - term250 - term256 - term268
f[5] = Iv*p2 + Z3*s3*term15 - s1*((3/4)*L1 + R*cos(t1))*sqrt((term125 + term24 + term40 + term68 + term87)**2 + (term224 + term229 + term231 + term234 + term237)**2) - s2*term271*sqrt((term106 + term143 + term167 + term202 + term222)**2 + (term236 + term246 + term250 + term256 + term268)**2)*sin(term89 + acos(term270/term271))
return f
| StarcoderdataPython |
3291866 | <filename>examples/mnist.py
from __future__ import absolute_import, division, print_function
import os
import time
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow import keras
from odin.training import Trainer
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
tf.random.set_seed(8)
np.random.seed(8)
# ===========================================================================
# Load data
# ===========================================================================
train, valid, test = tfds.load('fashion_mnist:3.0.0',
split=['train[:80%]', 'train[80%:]', 'test'],
read_config=tfds.ReadConfig(
shuffle_seed=1,
shuffle_reshuffle_each_iteration=True))
input_shape = tf.data.experimental.get_structure(train)['image'].shape
def process(data):
image = tf.cast(data['image'], tf.float32)
label = tf.cast(data['label'], tf.float32)
image = (image / 255. - 0.5) * 2.
return image, label
# ===========================================================================
# Test
# ===========================================================================
network = keras.Sequential([
keras.layers.Flatten(input_shape=input_shape),
keras.layers.Dropout(0.3),
keras.layers.Dense(512, activation='relu'),
keras.layers.Dense(256, activation='relu'),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10, activation='softmax'),
])
opt = tf.optimizers.Adam(learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-07,
amsgrad=False)
def optimize(inputs, training):
X, y_true = inputs
with tf.GradientTape(watch_accessed_variables=bool(training)) as tape:
y_pred = network(X, training=training)
loss = tf.reduce_mean(
tf.losses.sparse_categorical_crossentropy(y_true, y_pred))
acc = tf.cast(y_true == tf.cast(tf.argmax(y_pred, axis=-1), tf.float32),
tf.float32)
acc = tf.reduce_sum(acc) / tf.cast(tf.shape(y_true)[0], tf.float32)
if training:
Trainer.apply_gradients(tape, opt, loss, network)
return loss, acc
def callback():
signal = Trainer.early_stop(trainer.valid_loss, threshold=0.25, verbose=True)
if signal == Trainer.SIGNAL_BEST:
print(" - Save the best weights!")
Trainer.save_weights(network)
elif signal == Trainer.SIGNAL_TERMINATE:
print(" - Restore the best weights!")
Trainer.restore_weights(network)
return signal
trainer = Trainer()
start_time = time.time()
trainer.fit(Trainer.prepare(train,
postprocess=process,
parallel_postprocess=False,
shuffle=True,
epochs=32),
optimize,
valid_ds=Trainer.prepare(valid, postprocess=process),
valid_freq=2500,
autograph=True,
logging_interval=2,
on_valid_end=callback)
print("Total:", time.time() - start_time)
| StarcoderdataPython |
1726388 | <reponame>zauberzeug/rosys
from .communication import Communication
from .communication_factory import CommunicationFactory
from .serial_communication import SerialCommunication
from .web_communication import WebCommunication
| StarcoderdataPython |
1733198 |
if __name__ == "__main__":
final_answer = 0
lookup_values = {')':3,']':57,'}':1197,'>':25137}
lookup_opens = {')':'(',']':'[','}':'{','>':'<'}
with open("10input.txt") as f:
lines = f.readlines()
for line in lines:
stripped_line = list(line.rstrip("\n"))
# The entries here represent, respectively, (paren), [bracket], {curly}, and <tick>
opens_stack = []
for char in stripped_line:
if(char=='(' or char=='[' or char=='{' or char=='<'):
opens_stack.append(char)
elif(char==')' or char==']' or char=='}' or char=='>'):
open = opens_stack.pop()
if(open != lookup_opens[char]):
final_answer += lookup_values[char]
else:
print("I messed up my soup")
print(final_answer)
| StarcoderdataPython |
1728056 | <filename>flowserv/controller/worker/manager.py<gh_stars>1-10
# This file is part of the Reproducible and Reusable Data Analysis Workflow
# Server (flowServ).
#
# Copyright (C) 2019-2021 NYU.
#
# flowServ is free software; you can redistribute it and/or modify it under the
# terms of the MIT License; see LICENSE file for more details.
"""Factory for workers that implement the :class:`flowserv.controller.worker.base.Worker`
class. Workers are used to initiate and control the excution of workflow steps
using different execution backends and implementations.
Instances of worker classes are created from a configuration specifications that
follow the following schema:
.. code-block:: yaml
definitions:
keyValuePair:
description: Key-value pair object.
properties:
key:
description: Value key.
type: string
value:
anyOf:
- type: integer
- type: string
description: Scalar value associated with the key.
required:
- key
- value
type: object
workerSpec:
description: Specification for a worker engine instance.
properties:
env:
description: Key-value pairs for environment variables.
items:
$ref: '#/definitions/keyValuePair'
type: array
name:
description: Unique worker identifier.
type: string
type:
description: Worker type identifier
enum:
- code
- docker
- subprocess
type: string
vars:
description: Key-value pairs for template string variables.
items:
$ref: '#/definitions/keyValuePair'
type: array
volume:
description: Storage volume the worker has access to.
type: string
required:
- name
- type
type: object
"""
from typing import Dict, List, Optional
from flowserv.controller.worker.base import Worker
from flowserv.controller.worker.code import CodeWorker, CODE_WORKER
from flowserv.controller.worker.config import java_jvm, python_interpreter
from flowserv.controller.worker.docker import DockerWorker, DOCKER_WORKER
from flowserv.controller.worker.docker import NotebookDockerWorker, NOTEBOOK_DOCKER_WORKER
from flowserv.controller.worker.notebook import NotebookEngine, NOTEBOOK_WORKER
from flowserv.controller.worker.subprocess import SubprocessWorker, SUBPROCESS_WORKER
from flowserv.model.workflow.step import WorkflowStep
import flowserv.error as err
import flowserv.util as util
"""Create an instance of the sub-process worker that is used as the default
worker for container steps that do not have a responsible worker defined for
them.
"""
default_container_worker = SubprocessWorker(
variables={'python': python_interpreter(), 'java': java_jvm()}
)
"""Serialization label for worker identifier."""
WORKER_ID = 'name'
class WorkerPool(object):
"""Manager for a pool of worker instances. Workers are responsible for the
initiation and control of the execution of steps in a serial workflow.
Workers are instantiated from a dictionary serializations that follows the
`workerSpec` schema defined in the `schema.json` file.
"""
def __init__(
self, workers: Optional[List[Dict]] = list(),
managers: Optional[Dict] = None
):
"""Initialize the specifications for the workers that are managed by
this worker pool and the optional list of task managers for individual
workflow steps.
Parameters
----------
workers: list, default=list
List of worker specifications.
managers: dict, default=None
Mapping from workflow step identifier to worker identifier that
defines the worker that is responsible for the execution of the
respective workflow step.
"""
# Index of worker specifications.
self._workerspecs = {doc['name']: doc for doc in workers}
# Cache for created engine instance.
self._workers = dict()
self.managers = managers if managers is not None else dict()
def get(self, step: WorkflowStep) -> Worker:
"""Get the instance of the worker that is associated with the given
workflow step.
If no worker specification exists for the given step a default worker
is returned. The type of the default worker depends on the type of the
workflow step. For code steps, currently only one type of worker
exists. For container steps, a sub-process worker is used as the default
worker.
Parameters
----------
step: flowserv.model.workflow.step.WorkflowStep
Step in a serial workflow.
Returns
-------
flowserv.controller.worker.base.Worker
"""
# Return the worker that is associated with the given step via the
# manager mapping (if defined).
identifier = self.managers.get(step.name)
if identifier is None:
return self.get_default_worker(step)
# Return the worker from the cache if it exists.
if identifier in self._workers:
return self._workers[identifier]
# Get the worker specification for the container image. Raise an error
# if the identifier is unknown.
if identifier not in self._workerspecs:
raise err.UnknownObjectError(obj_id=identifier, type_name='worker')
# Create the worker and add it to the cache before returning it.
worker = create_worker(self._workerspecs[identifier])
self._workers[identifier] = worker
return worker
def get_default_worker(self, step: WorkflowStep) -> Worker:
"""Return the default worker depending on the type of the given
workflow step.
Parameters
----------
step: flowserv.model.workflow.step.WorkflowStep
Step in a serial workflow.
Returns
-------
flowserv.controller.worker.base.Worker
"""
if step.is_code_step():
return CodeWorker()
elif step.is_container_step():
return default_container_worker
elif step.is_notebook_step():
return NotebookEngine()
raise ValueError(f"unknown step type '{step.step_type}'")
# -- Helper Functions for Worker configurations -------------------------------
def create_worker(doc: Dict) -> Worker:
"""Factory pattern for workers.
Create an instance of a worker implementation from a given worker
serialization.
Parameters
----------
doc: dict
Dictionary serialization for a worker.
Returns
-------
flowserv.controller.worker.base.Worker
"""
identifier = doc['name']
worker_type = doc['type']
env = util.to_dict(doc.get('env', []))
vars = util.to_dict(doc.get('variables', []))
volume = doc.get('volume')
if worker_type == SUBPROCESS_WORKER:
return SubprocessWorker(
variables=vars,
env=env,
identifier=identifier,
volume=volume
)
elif worker_type == DOCKER_WORKER:
return DockerWorker(
variables=vars,
env=env,
identifier=identifier,
volume=volume
)
elif worker_type == CODE_WORKER:
return CodeWorker(identifier=identifier, volume=volume)
elif worker_type == NOTEBOOK_WORKER:
return NotebookEngine(identifier=identifier, volume=volume)
elif worker_type == NOTEBOOK_DOCKER_WORKER:
return NotebookDockerWorker(identifier=identifier, env=env, volume=volume)
raise ValueError(f"unknown worker type '{worker_type}'")
def WorkerSpec(
worker_type: str, identifier: Optional[str] = None, variables: Optional[Dict] = None,
env: Optional[Dict] = None, volume: Optional[str] = None
) -> Dict:
"""Get a serialization for a worker specification.
Parameters
----------
worker_type: string
Unique worker type identifier.
identifier: string, default=None
Unique worker identifier. If no identifier is given, a new unique
identifier will be generated.
variables: dict, default=None
Mapping with default values for placeholders in command template
strings.
env: dict, default=None
Default settings for environment variables when executing workflow
steps. These settings can get overridden by step-specific settings.
volume: string, default=None
Identifier for the storage volume that the worker has access to.
Returns
-------
dict
"""
# Set optional environment and variables dictionaries if not given.
env = env if env is not None else dict()
variables = variables if variables is not None else dict()
doc = {
WORKER_ID: identifier if identifier is not None else util.get_unique_identifier(),
'type': worker_type,
'env': [util.to_kvp(key=k, value=v) for k, v in env.items()],
'variables': [util.to_kvp(key=k, value=v) for k, v in variables.items()]
}
if volume:
doc['volume'] = volume
return doc
def Code(identifier: Optional[str] = None, volume: Optional[str] = None) -> Dict:
"""Get base configuration serialization for a code worker.
Parameters
----------
identifier: string, default=None
Unique worker identifier. If no identifier is given, a new unique
identifier will be generated.
volume: string, default=None
Identifier for the storage volume that the worker has access to.
Returns
-------
dict
"""
return WorkerSpec(
worker_type=CODE_WORKER,
identifier=identifier,
volume=volume
)
def Docker(
identifier: Optional[str] = None, variables: Optional[Dict] = None,
env: Optional[Dict] = None, volume: Optional[str] = None
) -> Dict:
"""Get base configuration for a subprocess worker with the given optional
arguments.
Parameters
----------
identifier: string, default=None
Unique worker identifier. If no identifier is given, a new unique
identifier will be generated.
variables: dict, default=None
Mapping with default values for placeholders in command template
strings.
env: dict, default=None
Default settings for environment variables when executing workflow
steps. These settings can get overridden by step-specific settings.
volume: string, default=None
Identifier for the storage volume that the worker has access to.
Returns
-------
dict
"""
return WorkerSpec(
worker_type=DOCKER_WORKER,
identifier=identifier,
variables=variables,
env=env,
volume=volume
)
def Notebook(identifier: Optional[str] = None, volume: Optional[str] = None) -> Dict:
"""Get base configuration serialization for a notebook worker.
Parameters
----------
identifier: string, default=None
Unique worker identifier. If no identifier is given, a new unique
identifier will be generated.
volume: string, default=None
Identifier for the storage volume that the worker has access to.
Returns
-------
dict
"""
return WorkerSpec(
worker_type=NOTEBOOK_WORKER,
identifier=identifier,
volume=volume
)
def Subprocess(
identifier: Optional[str] = None, variables: Optional[Dict] = None,
env: Optional[Dict] = None, volume: Optional[str] = None
) -> Dict:
"""Get base configuration for a subprocess worker with the given optional
arguments.
Parameters
----------
identifier: string, default=None
Unique worker identifier. If no identifier is given, a new unique
identifier will be generated.
variables: dict, default=None
Mapping with default values for placeholders in command template
strings.
env: dict, default=None
Default settings for environment variables when executing workflow
steps. These settings can get overridden by step-specific settings.
volume: string, default=None
Identifier for the storage volume that the worker has access to.
Returns
-------
dict
"""
return WorkerSpec(
worker_type=SUBPROCESS_WORKER,
variables=variables,
env=env,
identifier=identifier,
volume=volume
)
| StarcoderdataPython |
1724326 | <filename>tests/detections/test_detections.py
'''
Test that computes detections for all entries in the database that do not have
a `labels_path` column populated. The column is populated with the path to the
labels on disk.
Copyright 2020 Voxel51, Inc.
voxel51.com
'''
import pandemic51.core.detections as pand
pand.detect_objects_in_unprocessed_images()
| StarcoderdataPython |
180663 | <reponame>DanSava/showmewhatyougot<gh_stars>0
import requests
import re
import json
import sched
import time
from email.mime.text import MIMEText
import smtplib
import logging
import os
class Scraper(object):
def __init__(self):
self.logger = logging.getLogger('croller')
self.processed_urls = []
self.words_to_find_regex = None
self.main_url = 'https://beneficiar.fonduri-ue.ro:8080'
self.anunturi_url = '{}/anunturi'.format(self.main_url)
self.desc_regex = r'(\/desc-lot\?d=.*?)"'
self.matchObj = None
self.scheduler = sched.scheduler(time.time, time.sleep)
self.scheduler.enter(1, 1, self.run, [])
def find_in_text(self, text):
return len(re.findall(self.words_to_find_regex, text, re.M | re.I)) > 0
def process_url(self, url):
if url not in self.processed_urls:
self.processed_urls.insert(0, url)
if len(self.processed_urls) > 30:
self.processed_urls.pop()
def send_email(self, url):
self.logger.info("Found something interesting: {}".format(url))
# Send the new url as email notification
if url:
sender = os.environ.get('FONDURI_SENDER')
password = os.environ.get('<PASSWORD>')
target = os.environ.get('FONDURI_TARGET')
msg = MIMEText('Posibila postare interesanta: {}\n'.format(url))
msg['Subject'] = 'Postari interesante'
msg['From'] = sender
msg['To'] = target
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
# Next, log in to the server
server.login(sender, password)
# Send the mail
self.logger.info("Sending email to %s" % target)
server.sendmail(sender, target, msg.as_string())
server.quit()
def load_key_words(self):
with open('key_words.json', 'r') as f:
loaded_words = json.load(f)
self.words_to_find_regex = r'({})'.format(loaded_words['words'])
def run(self):
self.logger.info("Starting new run")
try:
self.load_key_words()
response = requests.get(self.anunturi_url)
text = response.text.encode('utf-8').decode('utf-8')
if response.status_code == 200:
for url_part in re.findall(r'a href="/anunturi(/details.*?)"', text, re.M | re.I):
anunt_url ='{}{}'.format(self.anunturi_url, url_part).encode("utf-8")
if anunt_url not in self.processed_urls:
anunt_resp = requests.get(anunt_url)
if anunt_resp.status_code == 200:
self.logger.info("Processing url {}".format(anunt_url.decode('utf-8')))
anunt_text = anunt_resp.text.encode('utf-8').decode('utf-8')
# Find interesting words in anunt page
if self.find_in_text(anunt_text):
self.send_email(anunt_url)
else:
for match in re.findall(self.desc_regex, anunt_text, re.M | re.I):
response = requests.get('{}{}'.format(self.main_url, match))
desc_text = response.text.encode('utf-8').decode('utf-8')
if self.find_in_text(desc_text):
self.send_email(anunt_url)
self.process_url(anunt_url)
except Exception as e:
self.logger.error(e)
self.scheduler.enter(600, 1, self.run, [])
def start(self):
self.scheduler.run()
def setup_logger():
logger = logging.getLogger('croller')
logger.setLevel(logging.INFO)
# create console handler and set level to info
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
# create formatter
formatter = logging.Formatter('[%(levelname)s]-%(asctime)s: %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
logger.addHandler(ch)
if __name__ == '__main__':
setup_logger()
x = Scraper()
x.start()
| StarcoderdataPython |
1711931 | from portality import models
# these are the ids of the journal and the issn of the erroneous continuation
id = "d241030fda0b419f9aaaf542d57a61af"
issn = "0049-3449"
# details to create the new journal record
name = "<NAME>"
email = "<EMAIL>"
owner = "00493449"
# get the journal and the other journal's bibjson from the history
para = models.Journal.pull(id)
vida = para.get_history_for(issn)
# create a new journal record from the bibjson, and set some default values
new = models.Journal()
new.set_bibjson(vida)
new.set_in_doaj(True)
new.set_application_status("accepted")
new.add_note("Journal separated from erroneous continuation by CL")
new.add_contact(name, email)
new.set_owner(owner)
new.save()
print "Created new record with id", new.id
# remove the erroneous journal from the history, add a note, and re-save the original journal
para.remove_history(issn)
para.add_note("CL removed journal " + issn + " from continuations history; was there erroneously")
para.save()
print "Removed erroneous record from", id | StarcoderdataPython |
1629406 | <filename>Arithmetic Formatter/arithmetic_arranger.py
def arithmetic_arranger(problems, print_ans = False):
count = 1
if len(problems)>5:
return "Error: Too many problems."
line_1 = ""
line_2 = ""
dash_line = ""
ans_line = ""
for problem in problems:
chars = problem.split()
try :
num1 = int(chars[0])
operator = chars[1]
num2 = int(chars[2])
except :
return "Error: Numbers must only contain digits."
if len(chars[0])>4 or len(chars[2])>4 :
return "Error: Numbers cannot be more than four digits."
if chars[1] not in ["+", "-"]:
return "Error: Operator must be '+' or '-'."
if print_ans:
if operator == "+":
answer = num1 + num2
else:
answer = num1 - num2
maxx = num1
if num2>num1:
maxx = num2
str_num1 = str(num1).rjust(len(str(maxx))+2)
str_num2 = operator + str(num2).rjust(len(str(maxx))+1)
line_1 = line_1 + str_num1
line_2 = line_2 + str_num2
dash_line = dash_line + ("-"*(2+len(str(maxx))))
if print_ans:
ans_line = ans_line + str(answer).rjust(len(str(maxx))+2)
if count<len(problems) :
line_1 += " "
line_2 += " "
dash_line += " "
if print_ans:
ans_line += " "
count += 1
if print_ans:
arranged_problems = line_1 + "\n" + line_2 + "\n" + dash_line + "\n" + ans_line
else:
arranged_problems = line_1 + "\n" + line_2 + "\n" + dash_line
return arranged_problems | StarcoderdataPython |
1638421 | """File IO for Flickr 30K images and text captions.
Author: <NAME>
Contact: <EMAIL>
Date: September 2019
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import logging
import numpy as np
def load_flickr30k_splits(splits_dir="data/splits/flickr30k",
flickr8k_splits=None):
"""Load train-dev-test splits from Flicker 30k text caption corpus."""
set_dict = {}
for subset in ["train", "dev", "test"]:
if subset not in set_dict:
set_dict[subset] = []
subset_path = os.path.join(
splits_dir, "{}.txt".format(subset))
assert os.path.exists(subset_path)
logging.log(logging.INFO, "Loading Flickr 30k {} split: {}".format(
subset, subset_path))
with open(subset_path) as f:
for line in f:
set_dict[subset].append(os.path.splitext(line.strip())[0])
if flickr8k_splits is not None: # remove flickr 8k images from 30k splits
set_dict = remove_flickr8k_splits(set_dict, flickr8k_splits)
return set_dict
def remove_flickr8k_splits(flickr30k_splits, flickr8k_splits):
"""Remove Flickr 8k images from Flickr 30k train-dev-test splits."""
flickr8k_all = []
for _, uids in flickr8k_splits.items():
flickr8k_all.extend(list(map(lambda uid: uid.split("_")[0], uids)))
flickr30_removed = {}
for subset, uids in flickr30k_splits.items():
uids_removed = []
for uid in uids:
if uid not in flickr8k_all:
uids_removed.append(uid)
flickr30_removed[subset] = uids_removed
return flickr30_removed
def _load_flickr30k_unrelated_captions(splits_dir="data/splits/flickr30k"):
"""Load unrelated image captions from the Flickr 30k text caption corpus."""
path = os.path.join(splits_dir, "UNRELATED_CAPTIONS")
assert os.path.exists(path)
image_uids, caption_numbers = [], []
with open(path, "rb") as f:
next(f) # skip header line
for line in f:
image_uid, caption_number = line.decode("utf8").strip().split(" ")
image_uids.append(image_uid)
caption_numbers.append(str(int(caption_number) - 1))
image_uids = np.asarray(image_uids)
caption_numbers = np.asarray(caption_numbers)
return image_uids, caption_numbers
def load_flickr30k_captions(captions_dir, splits_dir="data/splits/flickr30k",
flickr8k_splits=None):
"""Load Flickr 30k text caption corpus."""
train, val, test = None, None, None
split_dict = load_flickr30k_splits(splits_dir, flickr8k_splits)
captions_path = os.path.join(
captions_dir, "results_20130124.token")
assert os.path.exists(captions_path)
logging.log(logging.INFO, "Loading Flickr 30k text caption corpus: {}".format(
captions_path))
image_uids, captions, caption_numbers = [], [], []
with open(captions_path, "rb") as f:
for line in f:
caption_image, caption = line.decode("utf8").split("\t")
image_uid, caption_number = caption_image.split("#")
image_uid = image_uid.split(".jpg")[0]
image_uids.append(image_uid)
captions.append(str(caption).strip().lower())
caption_numbers.append(caption_number)
# remove unrelated captions
flickr30k_unrelated = _load_flickr30k_unrelated_captions(splits_dir)
def filter_remove_unrelated(index):
unrelated_idx = np.where(flickr30k_unrelated[0] == image_uids[index])[0]
return caption_numbers[index] not in flickr30k_unrelated[1][unrelated_idx]
filter_idx = list(filter(filter_remove_unrelated, range(len(image_uids))))
image_uids = np.asarray(image_uids)[filter_idx]
captions = np.asarray(captions)[filter_idx]
caption_numbers = np.asarray(caption_numbers)[filter_idx]
# split into train-dev-test
train_idx = np.isin(image_uids, split_dict["train"])
val_idx = np.isin(image_uids, split_dict["dev"])
test_idx = np.isin(image_uids, split_dict["test"])
train = (image_uids[train_idx], captions[train_idx], caption_numbers[train_idx])
val = (image_uids[val_idx], captions[val_idx], caption_numbers[val_idx])
test = (image_uids[test_idx], captions[test_idx], caption_numbers[test_idx])
return train, val, test
def fetch_flickr30k_image_paths(images_dir, splits_dir="data/splits/flickr30k",
flickr8k_splits=None):
"""Fetch Flickr 30k image paths corresponding to the caption corpus splits."""
train, val, test = None, None, None
split_dict = load_flickr30k_splits(splits_dir, flickr8k_splits)
image_paths = np.asarray([
os.path.join(images_dir, name) for name in os.listdir(images_dir)])
image_uids = np.asarray([
os.path.splitext(os.path.split(path)[-1])[0] for path in image_paths])
train_idx = np.isin(image_uids, split_dict["train"])
val_idx = np.isin(image_uids, split_dict["dev"])
test_idx = np.isin(image_uids, split_dict["test"])
train = (image_uids[train_idx], image_paths[train_idx])
val = (image_uids[val_idx], image_paths[val_idx])
test = (image_uids[test_idx], image_paths[test_idx])
return train, val, test
| StarcoderdataPython |
1704261 | <reponame>indigo-dc/dogs_breed_det<filename>dogs_breed_det/config.py
# -*- coding: utf-8 -*-
"""
Module to define CONSTANTS used across the project
"""
import os
from webargs import fields
from marshmallow import Schema, INCLUDE
# identify basedir for the package
BASE_DIR = os.path.dirname(os.path.normpath(os.path.dirname(__file__)))
# default location for input and output data, e.g. directories 'data' and 'models',
# is either set relative to the application path or via environment setting
IN_OUT_BASE_DIR = BASE_DIR
if 'APP_INPUT_OUTPUT_BASE_DIR' in os.environ:
env_in_out_base_dir = os.environ['APP_INPUT_OUTPUT_BASE_DIR']
if os.path.isdir(env_in_out_base_dir):
IN_OUT_BASE_DIR = env_in_out_base_dir
else:
msg = "[WARNING] \"APP_INPUT_OUTPUT_BASE_DIR=" + \
"{}\" is not a valid directory! ".format(env_in_out_base_dir) + \
"Using \"BASE_DIR={}\" instead.".format(BASE_DIR)
print(msg)
DATA_DIR = os.path.join(IN_OUT_BASE_DIR, 'data')
MODELS_DIR = os.path.join(IN_OUT_BASE_DIR, 'models')
Dog_RemoteSpace = 'rshare:/deep-oc-apps/dogs_breed_det/'
Dog_RemoteShare = 'https://nc.deep-hybrid-datacloud.eu/s/D7DLWcDsRoQmRMN/download?path=%2F&files='
Dog_DataDir = 'dogImages'
Dog_WeightsPattern = 'weights.best.NETWORK.3layers.hdf5'
Dog_LabelsFile = os.path.join(DATA_DIR, 'dog_names.txt')
REMOTE_DATA_DIR = os.path.join(Dog_RemoteSpace, 'data')
REMOTE_MODELS_DIR = os.path.join(Dog_RemoteSpace, 'models')
# FLAAT needs a list of trusted OIDC Providers. Here is an extended example:
#[
#'https://b2access.eudat.eu/oauth2/',
#'https://b2access-integration.fz-juelich.de/oauth2',
#'https://unity.helmholtz-data-federation.de/oauth2/',
#'https://login.helmholtz-data-federation.de/oauth2/',
#'https://login-dev.helmholtz.de/oauth2/',
#'https://login.helmholtz.de/oauth2/',
#'https://unity.eudat-aai.fz-juelich.de/oauth2/',
#'https://services.humanbrainproject.eu/oidc/',
#'https://accounts.google.com/',
#'https://aai.egi.eu/oidc/',
#'https://aai-dev.egi.eu/oidc/',
#'https://login.elixir-czech.org/oidc/',
#'https://iam-test.indigo-datacloud.eu/',
#'https://iam.deep-hybrid-datacloud.eu/',
#'https://iam.extreme-datacloud.eu/',
#'https://oidc.scc.kit.edu/auth/realms/kit/',
#'https://proxy.demo.eduteams.org'
#]
#
# we select following three providers:
Flaat_trusted_OP_list = [
'https://aai.egi.eu/oidc/',
'https://iam.deep-hybrid-datacloud.eu/',
'https://iam.extreme-datacloud.eu/',
]
machine_info = { 'cpu': '',
'gpu': '',
'memory_total': '',
'memory_available': ''
}
cnn_list = ['Resnet50', 'InceptionV3', 'VGG16', 'VGG19']
# class / place to describe arguments for predict()
class PredictArgsSchema(Schema):
class Meta:
unknown = INCLUDE # supports extra parameters
network = fields.Str(
required=False,
missing=cnn_list[0],
enum=cnn_list,
description="Neural model to use for prediction"
)
files = fields.Field(
required=False,
missing=None,
type="file",
data_key="data",
location="form",
description="Select the image you want to classify."
)
urls = fields.Url(
required=False,
missing=None,
description="Select an URL of the image you want to classify."
)
# class / place to describe arguments for train()
class TrainArgsSchema(Schema):
class Meta:
unknown = INCLUDE # supports extra parameters
num_epochs = fields.Integer(
required=False,
missing=1,
description="Number of training epochs")
network = fields.Str(
required=False,
missing=cnn_list[0],
enum=cnn_list,
description="Neural model to use")
sys_info = fields.Boolean(
required=False,
missing=False,
enum=[True, False],
description="Print information about the system (e.g. cpu, gpu, memory)")
| StarcoderdataPython |
1603549 | """Compare Pulsar and HabCat coordinates"""
import csv
import astropy.units as u
from astropy.coordinates import SkyCoord, Angle
from astropy import coordinates as coord
def flipra(coordinate):
"""Flips RA coordinates by 180 degrees"""
coordinate = coordinate + 180
if coordinate > 360:
coordinate = coordinate - 360
return coordinate
def flipde(coordinate):
"""Flips RA coordinates by 90 degrees"""
return coordinate * (-1.)
# Load HabCat
habcat_id = []
habcat_ra = []
habcat_de = []
with open('habcat.csv', 'rb') as csvfile:
dataset = csv.reader(csvfile, delimiter=';')
for row in dataset:
habcat_id.append(row[0])
ra = coord.Angle(row[1], unit=u.hour) # Define as hours
habcat_ra.append(ra.degree) # Convert to degree
de = coord.Angle(row[2], unit=u.deg)
habcat_de.append(de.degree)
print len(habcat_id), 'HabCat datalines loaded'
# Load Pulsar catalogue
pulsar_id = []
pulsar_ra = []
pulsar_de = []
pulsar_period = []
with open('pulsar_16msec.csv', 'rb') as csvfile:
dataset = csv.reader(csvfile, delimiter=';')
for row in dataset:
pulsar_id.append(row[0])
ra = coord.Angle(row[1], unit=u.hour) # Define as hours
pulsar_ra.append(ra.degree) # Convert to degree
de = coord.Angle(row[2], unit=u.deg)
pulsar_de.append(de.degree)
pulsar_period.append(row[3])
print len(pulsar_id), 'Pulsar datalines loaded'
# Nested loop through all Pulsars to find closest 180deg HabCat for each
for currentpulsar in range(len(pulsar_id)): # Pulsar loop
shortest_distance = 180 * 60 # set to max, in arcminutes
for currenthabcat in range(len(habcat_id)): # HabCat loop
habcat_coordinate = SkyCoord(
habcat_ra[currenthabcat],
habcat_de[currenthabcat],
unit="deg")
pulsar_coordinate_flipped = SkyCoord( # flip pulsar coordinates
flipra(pulsar_ra[currentpulsar]),
flipde(pulsar_de[currentpulsar]),
unit="deg")
distance = pulsar_coordinate_flipped.separation(habcat_coordinate)
if distance.arcminute < shortest_distance:
shortest_distance = distance.arcminute # New best found
bestfit_pulsar_id = pulsar_id[currentpulsar]
bestfit_habcat_id = habcat_id[currenthabcat]
print bestfit_pulsar_id, bestfit_habcat_id, shortest_distance / 60. # deg
with open('result.csv', 'a') as fp: # Append each result to CSV
a = csv.writer(fp, delimiter=';')
a.writerow([
bestfit_pulsar_id,
bestfit_habcat_id,
shortest_distance / 60.]) # degrees
print 'Done.'
| StarcoderdataPython |
3284562 | <filename>scripts/backup-tool.py
#! /usr/bin/python
"""Create a backup.
"""
import archive.bt
archive.bt.backup_tool()
| StarcoderdataPython |
1701740 | <reponame>datavaluepeople/tentaclio
"""HTTP Stream client."""
import io
from typing import Optional
from urllib import parse
import requests
from tentaclio import protocols
from . import base_client, decorators, exceptions
__all__ = ["HTTPClient"]
DEFAULT_TIMEOUT = 10.0
DEFAULT_HEADERS = {"Accept": "application/json"}
class HTTPClient(
base_client.BaseClient["HTTPClient"]
):
"""HTTP stream client.
This client is useful when dealing inputs that may change from local files to http,
or from s3 to http in simple usecases. We're not intending to wrap requests or rewrite it.
"""
allowed_schemes = ["http", "https"]
conn: requests.Session
timeout: float
headers: dict
protocol: str
username: Optional[str]
password: Optional[str]
hostname: str
port: Optional[int]
endpoint: str
def __init__(
self, url: str, default_timeout: float = None, default_headers: dict = None
) -> None:
"""Create a new http/https client based on the passed url and extra params."""
# Default connection timeout at 10''
self.timeout = default_timeout or DEFAULT_TIMEOUT
# Default JSON response back
self.headers = default_headers or DEFAULT_HEADERS
super().__init__(url)
self.protocol = self.url.scheme
if self.url.hostname is None:
raise exceptions.HTTPError("Missing URL hostname")
self.hostname = self.url.hostname
self.port = self.url.port
self.endpoint = self.url.path
# Enforce no empty credentials
self.username = None if self.url.username == "" else self.url.username
self.password = None if self.url.password == "" else self.url.password
# Connection methods:
def _connect(self) -> requests.Session:
session = requests.Session()
# credentials provided
if self.username and self.password:
session.auth = (self.username, self.password)
# Non-empty header
if self.headers:
session.headers.update(self.headers)
return session
# Stream methods:
@decorators.check_conn
def get(
self,
writer: protocols.ByteWriter,
endpoint: str = None,
params: dict = None,
options: dict = None,
) -> None:
"""Read the contents from the url and write them into the provided writer.
Arguments:
:end_point: Path to append to the url passed in the constructor.
:params: Url params to add
:options: More options for the request library.
"""
url = self._fetch_url(endpoint or "")
request = self._build_request("GET", url, default_params=params)
response = self._send_request(request, default_options=options)
writer.write(response.content)
@decorators.check_conn
def put(
self,
reader: protocols.ByteReader,
endpoint: str = None,
params: dict = None,
options: dict = None,
) -> None:
"""Write the contents of the provided reader into the url using POST.
Arguments:
:end_point: Path to append to the url passed in the constructor.
:params: Url params to add
:options: More options for the request library.
"""
url = self._fetch_url(endpoint or "")
buff = io.StringIO(bytes(reader.read()).decode(encoding="utf-8"))
request = self._build_request("POST", url, default_data=buff, default_params=params)
self._send_request(request, default_options=options)
# Helpers:
def _fetch_url(self, endpoint: str) -> str:
if endpoint == "" and self.endpoint == "":
raise exceptions.HTTPError("Missing URL end point")
# Fetch full base URL
base_url = parse.urlunparse((self.protocol, self.hostname, self.endpoint, "", "", ""))
return parse.urljoin(base_url, endpoint)
def _build_request(
self,
method: str,
url: str,
default_data: protocols.Reader = None,
default_params: dict = None,
):
data = default_data or []
params = default_params or {}
if method == "GET":
# GET uses params
request = requests.Request(method, url, params=params, headers=self.headers)
elif method == "POST":
# POST uses data & params
request = requests.Request(method, url, data=data, params=params, headers=self.headers)
else:
raise NotImplementedError
return self.conn.prepare_request(request)
def _send_request(self, request: requests.PreparedRequest, default_options: dict = None):
options = default_options or {}
response = self.conn.send(
request,
stream=options.get("stream", False),
verify=options.get("verify", False),
proxies=options.get("proxies", {}),
cert=options.get("cert"),
timeout=options.get("timeout", self.timeout),
allow_redirects=options.get("allow_redirects", True),
)
if options.get("check_response", True):
self._check_response(response)
return response
@staticmethod
def _check_response(response: requests.Response) -> None:
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise exceptions.HTTPError(f"{response.status_code}: {response.reason}")
| StarcoderdataPython |
77578 | # small helper script to check results
import fnmatch
import os
import shutil
import time
import json
import sys
liste = []
CHECKFOLDER = sys.argv[1]
"""
# we do not need the pickle files here
for root, dirnames, filenames in os.walk(CHECKFOLDER):
for filename in fnmatch.filter(filenames, 'scenario.pickle'):
#os.remove(os.path.join(root, 'result.pdf'))
print("remove", os.path.join(root, 'scenario.pickle'))
os.remove(os.path.join(root, 'scenario.pickle'))
os.system("rm -rf pdfs")
os.system("mkdir pdfs")
"""
missing = []
for root, dirnames, filenames in os.walk(CHECKFOLDER):
if not 'errors' in root:
for filename in fnmatch.filter(filenames, 'config.json'):
#print("found", filename)
if not os.path.exists(os.path.join(root, 'statistics.json')):
missing.append(root)
error_folder = os.path.join(CHECKFOLDER, 'errors')
if not os.path.exists(error_folder):
os.makedirs(error_folder)
for path in missing:
new_path = path.replace(CHECKFOLDER, '').replace('/custom', '').replace('/run0001', '')
if new_path.startswith('/'):
new_path = new_path[1:]
new_path = os.path.join(error_folder, new_path)
if not os.path.exists(new_path):
os.makedirs(new_path)
print("add errorreport: %s" % new_path)
outfile = os.path.join(path, 'out.txt')
outfile2 = os.path.join(new_path, 'out.txt')
if os.path.exists(outfile):
shutil.copyfile(outfile, outfile2)
config = os.path.join(path, 'config.json')
config2 = os.path.join(new_path, 'config.json')
if os.path.exists(config):
shutil.copyfile(config, config2)
importstring = ""
with open(config, "r") as file:
data = json.loads(file.read())
for k, v in data.items():
if k.startswith('param_'):
importstring += '%s %d\n' % (k, v)
if len(importstring) > 0:
importstring_path = os.path.join(new_path, 'import.txt')
with open(importstring_path, 'wb') as file:
file.write(importstring)
scenario = os.path.join(path, 'scenario.pdf')
scenario2 = os.path.join(new_path, 'scenario.pdf')
if os.path.exists(scenario):
shutil.copyfile(scenario, scenario2)
exit(0)
| StarcoderdataPython |
1740190 | <reponame>UCLCheminformatics/ScaffoldGraph
"""
scaffoldgraph tests.vis.test_vis_utils
"""
import scaffoldgraph.vis.utils as vis_utils
import matplotlib.pyplot as plt
import random
import pytest
import re
from rdkit.Chem.Draw import rdMolDraw2D
from rdkit import Chem
from scaffoldgraph.utils import suppress_rdlogger
from . import long_test_network
SVG_PATTERN = r'(?:<\?xml\b[^>]*>[^<]*)?(?:<!--.*?-->[^<]*)*(?:<svg|<!DOCTYPE svg)\b'
SVG_REGEX = re.compile(SVG_PATTERN, re.DOTALL)
SVG_DIM_PATTERN = r"width='(\d+px)'\s+height='(\d+px)"
SVG_DIM_REGEX = re.compile(SVG_DIM_PATTERN)
HEX_PATTERN = r'^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$'
HEX_REGEX = re.compile(HEX_PATTERN)
def naive_svg_check(svg_string):
"""Validate SVG format (naive)."""
return SVG_REGEX.match(svg_string) is not None
def svg_dimensions(svg_string):
"""Return dimensions of an (rdkit) SVG string."""
matches = SVG_DIM_REGEX.findall(svg_string)
if not matches:
return (None, None)
dims = map(lambda x: int(x.replace('px', '')), matches[0])
return tuple(dims)
def insert_random_node_attribute(graph, key, high=1, low=0):
"""Add a random attribute to nodes in a graph."""
for _, data in graph.nodes(data=True):
value = random.uniform(low, high)
data[key] = value
def is_valid_hex(hex):
"""Validate hexadecimal color code."""
if hex is None:
return False
if HEX_REGEX.search(hex):
return True
return False
def test_smiles_to_svg():
smi = 'Cc1ccc(NC(=O)c2ccc(CN3CCN(C)CC3)cc2)cc1Nc1nccc(-c2cccnc2)n1'
img = vis_utils.smiles_to_svg(smi) # smiles to SVG
assert img is not None
assert naive_svg_check(img) is True
# Check size updates.
dims = (450, 400)
img = vis_utils.smiles_to_svg(smi, size=dims)
assert svg_dimensions(img) == dims
# Check drawing options (clear background).
drawOpts = rdMolDraw2D.MolDrawOptions()
drawOpts.clearBackground = True
img = vis_utils.smiles_to_svg(smi, draw_options=drawOpts)
assert '</rect>' in img # <rect> exists with background
drawOpts.clearBackground = False
img = vis_utils.smiles_to_svg(smi, draw_options=drawOpts)
assert '</rect>' not in img
@suppress_rdlogger()
def test_smiles_to_image():
# These aren't paticularly great tests for this function...
smi = 'Cc1ccc(NC(=O)c2ccc(CN3CCN(C)CC3)cc2)cc1Nc1nccc(-c2cccnc2)n1'
img = vis_utils.smiles_to_image(smi) # smiles to SVG
assert img is not None
assert img != 'data:image/svg+xml;charset=utf-8,'
null_smi = 'xxx'
img = vis_utils.smiles_to_image(null_smi)
assert img == 'data:image/svg+xml;charset=utf-8,'
def test_embed_node_mol_images(network):
# Embed images into node attributes.
vis_utils.embed_node_mol_images(network)
for _, data in network.nodes(data=True):
img = data.get('img', None)
assert img is not None
# Remove images from node attributes.
vis_utils.remove_node_mol_images(network)
for _, data in network.nodes(data=True):
img = data.get('img', None)
assert img is None
def test_color_nodes_by_attribute(network):
key = 'attr'
insert_random_node_attribute(network, key)
# Color scaffold nodes.
vis_utils.color_scaffold_nodes_by_attribute(network, key, 'BuPu')
for _, data in network.get_scaffold_nodes(data=True):
c = data.get('color', None)
assert c is not None
assert is_valid_hex(c)
# Color molecule nodes.
cmap = plt.get_cmap('hot')
vis_utils.color_molecule_nodes_by_attribute(network, key, cmap, 'col')
for _, data in network.get_molecule_nodes(data=True):
c = data.get('col', None)
assert c is not None
assert is_valid_hex(c)
def test_root_node(network):
vis_utils.add_root_node(network)
assert network.has_node('root') is True
assert network.in_degree('root') == 0
vis_utils.remove_root_node(network)
assert network.has_node('root') is False
| StarcoderdataPython |
3278382 | <reponame>wilkeraziz/smtutils
"""
Extracts minimal phrases from symmetrized word alignments
"""
from collections import defaultdict, deque
def as_words(phrase, id2word):
return tuple(id2word[i] for i in phrase)
def try_expand(f, f2e, f_min, f_max, e_min, e_max):
"""
Try to expand the boundaries of a phrase pair based on the alignment points in f2e[f]
:param f: a position in the source
:param f2e: maps source positions into a sorted list of aligned target positions
:param f_min, f_max: source phrase boundary
:param e_min, e_max: target phrase boundary
:returns: f_min, f_max, e_min, e_max, discovered target positions
"""
# retrieve the target positions reachable from f
es = f2e.get(f, None)
extra = []
# if there is any
if es is not None:
if f_min is None:
# we just discovered that we know something about the source phrase
f_min = f_max = f
if e_min is None: # thus e_max is also None
# we just learnt the first thing about the target phrase
e_min = es[0]
e_max = es[-1]
# basically, we discovered the positions [e_min .. e_max]
extra.extend(xrange(e_min, e_max + 1))
else:
# we have the chance to update our target phrase
if e_min > es[0]:
# we discovered a few extra words on the left
extra.extend(xrange(es[0], e_min))
# and update e_min
e_min = es[0]
if e_max < es[-1]: # update e_max
# we discovered a few extra words to the right
extra.extend(xrange(e_max + 1, es[-1] + 1))
# and update e_max
e_max = es[-1]
return f_min, f_max, e_min, e_max, extra
def minimal_biphrases(f_words, e_words, links):
"""
Returns the minimal phrase pairs
:param f_words: list of source words
:param e_words: list of target words
:param links: list of alignment points
:return: list of tuples (source phrase, target phrase) where a phrase is a list of positions in f_words or e_words
"""
# 1) organise alignment points
# first we group them
f2e = defaultdict(set)
e2f = defaultdict(set)
for i, j in links:
f2e[i].add(j)
e2f[j].add(i)
# then we sort them
f2e = {f:sorted(es) for f, es in f2e.iteritems()}
e2f = {e:sorted(fs) for e, fs in e2f.iteritems()}
# biphrases
biphrases = set()
# 2) find minimal phrase pairs
f_done = set()
e_done = set()
# iterate investigating words in the source
# TODO: sort alignment points as to visit adjacent points first
for fi, ej in links:
# check if row or column have alread been done, if so, the minimal phrase consistent with this alignment point has already been found
if fi in f_done or ej in e_done:
continue
else:
# flag row and column as processed
f_done.add(fi)
e_done.add(ej)
# source phrase boundaries
f_min, f_max = fi, fi
# target phrase boundaries
e_min, e_max = ej, ej
# queue of words whose alignment points need be investigated
f_queue = deque([f_min])
e_queue = deque([e_min])
# for as long as there are words to be visited
while f_queue or e_queue:
if f_queue:
# get a source word
f = f_queue.popleft()
# try to expand the boundaries
f_min, f_max, e_min, e_max, extra = try_expand(f, f2e, f_min, f_max, e_min, e_max)
# book discovered target words
e_queue.extend(extra)
if e_queue:
# get a target word
e = e_queue.popleft()
# try to expand the boundaries (the logic is the same, only transposed)
e_min, e_max, f_min, f_max, extra = try_expand(e, e2f, e_min, e_max, f_min, f_max)
# book discovered source words
f_queue.extend(extra)
# store the minimal phrase
f_phrase = tuple(range(f_min, f_max + 1))
e_phrase = tuple(range(e_min, e_max + 1))
biphrases.add((f_phrase, e_phrase))
return biphrases
def unaligned_words(f_words, e_words, biphrases):
"""Find unaligned words
:param f_words: source words
:param e_words: target words
:param biphrases: list of phrase pairs (check `minimal_biphrases`)
:returns: set of unaligned source words, set of unaligned target words
"""
fs = set()
es = set()
for fp, ep in biphrases:
fs.update(fp)
es.update(ep)
return frozenset(range(len(f_words))) - fs, frozenset(range(len(e_words))) - es
def parse_strings(fstr, estr, astr):
"""
:param fstr: source string
:param estr: target string
:param astr: alingment string
:return: list of source words, a list of target words and list of alignment points
where an alignment point is a pair of integers (i, j)
"""
f = fstr.split()
e = estr.split()
a = [map(int, link.split('-')) for link in astr.split()]
return f, e, a
def parse_line(line, separator = ' ||| '):
"""returns the source words, the target words and the alignment points"""
return parse_strings(*line.split(separator))
def read_corpus(istream, separator=' ||| '):
"""
Reads a file containing lines like this:
source sentence ||| taget sentence ||| alignment points
and returns a list where each element is a triple
(source, target, alignment)
and source is a list of source words
target is a list of target words
alignment is a list of pairs (each pair represents an alignment point of the kind (f,e))
"""
return [parse_line(line.strip()) for line in istream]
| StarcoderdataPython |
4804265 | import humanize
import pathlib
from typing import Coroutine
async def async_progress(path: pathlib.Path, reader: Coroutine) -> bytes:
def progress(loaded, total, bar_len=30):
filled_len = int(round(bar_len * loaded / total))
empty_len = bar_len - filled_len
loaded = humanize.naturalsize(loaded).replace(" ", "")
total = humanize.naturalsize(total).replace(" ", "")
bar = "=" * filled_len + " " * empty_len
print(f"[{bar}] {loaded}/{total}\r", end="", flush=True)
total = path.stat().st_size
loaded = 0
progress(loaded, total)
async for chunk in reader:
yield chunk
loaded += len(chunk)
progress(loaded, total)
progress(loaded, total)
print("", flush=True)
| StarcoderdataPython |
105817 | # Copyright 2015 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FT Redis tests
Unit tests for minemeld.ft.redis
"""
import gevent.monkey
gevent.monkey.patch_all(thread=False, select=False)
import unittest
import mock
import redis
import time
import minemeld.ft.redis
FTNAME = 'testft-%d' % int(time.time())
class MineMeldFTRedisTests(unittest.TestCase):
def setUp(self):
SR = redis.StrictRedis()
SR.delete(FTNAME)
def tearDown(self):
SR = redis.StrictRedis()
SR.delete(FTNAME)
def test_init(self):
config = {}
chassis = mock.Mock()
b = minemeld.ft.redis.RedisSet(FTNAME, chassis, config)
self.assertEqual(b.name, FTNAME)
self.assertEqual(b.chassis, chassis)
self.assertEqual(b.config, config)
self.assertItemsEqual(b.inputs, [])
self.assertEqual(b.output, None)
self.assertEqual(b.redis_skey, FTNAME)
self.assertNotEqual(b.SR, None)
self.assertEqual(b.redis_host, '127.0.0.1')
self.assertEqual(b.redis_port, 6379)
self.assertEqual(b.redis_password, None)
self.assertEqual(b.redis_db, 0)
def test_connect_io(self):
config = {}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
b = minemeld.ft.redis.RedisSet(FTNAME, chassis, config)
inputs = ['a', 'b', 'c']
output = True
b.connect(inputs, output)
b.mgmtbus_initialize()
self.assertItemsEqual(b.inputs, inputs)
self.assertEqual(b.output, None)
icalls = []
for i in inputs:
icalls.append(
mock.call(
FTNAME, b, i,
allowed_methods=[
'update', 'withdraw', 'checkpoint'
]
)
)
chassis.request_sub_channel.assert_has_calls(
icalls,
any_order=True
)
chassis.request_rpc_channel.assert_called_once_with(
FTNAME,
b,
allowed_methods=[
'update',
'withdraw',
'checkpoint',
'get',
'get_all',
'get_range',
'length'
]
)
chassis.request_pub_channel.assert_not_called()
def test_uw(self):
config = {}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
b = minemeld.ft.redis.RedisSet(FTNAME, chassis, config)
inputs = ['a', 'b', 'c']
output = False
b.connect(inputs, output)
b.mgmtbus_initialize()
b.start()
time.sleep(1)
SR = redis.StrictRedis()
b.filtered_update('a', indicator='testi', value={'test': 'v'})
sm = SR.zrange(FTNAME, 0, -1)
self.assertEqual(len(sm), 1)
self.assertIn('testi', sm)
b.filtered_withdraw('a', indicator='testi')
sm = SR.zrange(FTNAME, 0, -1)
self.assertEqual(len(sm), 0)
b.stop()
self.assertNotEqual(b.SR, None)
def test_stats(self):
config = {}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
b = minemeld.ft.redis.RedisSet(FTNAME, chassis, config)
inputs = ['a', 'b', 'c']
output = False
b.connect(inputs, output)
b.mgmtbus_reset()
b.start()
time.sleep(1)
b.filtered_update('a', indicator='testi', value={'test': 'v'})
self.assertEqual(b.length(), 1)
status = b.mgmtbus_status()
self.assertEqual(status['statistics']['added'], 1)
b.filtered_update('a', indicator='testi', value={'test': 'v2'})
self.assertEqual(b.length(), 1)
status = b.mgmtbus_status()
self.assertEqual(status['statistics']['added'], 1)
self.assertEqual(status['statistics']['removed'], 0)
b.filtered_withdraw('a', indicator='testi')
self.assertEqual(b.length(), 0)
status = b.mgmtbus_status()
self.assertEqual(status['statistics']['removed'], 1)
b.stop()
def test_store_value(self):
config = {'store_value': True}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
b = minemeld.ft.redis.RedisSet(FTNAME, chassis, config)
inputs = ['a', 'b', 'c']
output = False
b.connect(inputs, output)
b.mgmtbus_reset()
b.start()
time.sleep(1)
SR = redis.StrictRedis()
b.filtered_update('a', indicator='testi', value={'test': 'v'})
sm = SR.zrange(FTNAME, 0, -1)
self.assertEqual(len(sm), 1)
self.assertIn('testi', sm)
sm = SR.hlen(FTNAME+'.value')
self.assertEqual(sm, 1)
b.filtered_withdraw('a', indicator='testi')
sm = SR.zrange(FTNAME, 0, -1)
self.assertEqual(len(sm), 0)
sm = SR.hlen(FTNAME+'.value')
self.assertEqual(sm, 0)
b.stop()
self.assertNotEqual(b.SR, None)
def test_store_value_overflow(self):
config = {'store_value': True}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
b = minemeld.ft.redis.RedisSet(FTNAME, chassis, config)
b.max_entries = 1
inputs = ['a', 'b', 'c']
output = False
b.connect(inputs, output)
b.mgmtbus_reset()
b.start()
time.sleep(1)
SR = redis.StrictRedis()
b.filtered_update('a', indicator='testi', value={'test': 'v'})
sm = SR.zrange(FTNAME, 0, -1)
self.assertEqual(len(sm), 1)
self.assertIn('testi', sm)
sm = SR.hlen(FTNAME+'.value')
self.assertEqual(sm, 1)
b.filtered_update('a', indicator='testio', value={'test': 'v'})
self.assertEqual(b.statistics['drop.overflow'], 1)
sm = SR.zrange(FTNAME, 0, -1)
self.assertEqual(len(sm), 1)
self.assertIn('testi', sm)
sm = SR.hlen(FTNAME+'.value')
self.assertEqual(sm, 1)
b.filtered_withdraw('a', indicator='testi')
sm = SR.zrange(FTNAME, 0, -1)
self.assertEqual(len(sm), 0)
sm = SR.hlen(FTNAME+'.value')
self.assertEqual(sm, 0)
b.filtered_update('a', indicator='testio', value={'test': 'v'})
self.assertEqual(b.statistics['drop.overflow'], 1)
sm = SR.zrange(FTNAME, 0, -1)
self.assertEqual(len(sm), 1)
self.assertIn('testio', sm)
sm = SR.hlen(FTNAME+'.value')
self.assertEqual(sm, 1)
b.stop()
self.assertNotEqual(b.SR, None)
| StarcoderdataPython |
3254930 |
from api import celery as celery_app
from api.models import Device as DeviceModel
from api.async.base import DeviceTask
from api.async import celery_logger
from api import device_refresh_redis_conection
import orangengine
REDIS_DEV_REF_INTV_COUNT_PREFIX = 'dev:refresh:interval:count:'
CELERY_BEAT_INTERVAL = 5
@celery_app.task(base=DeviceTask)
def init_devices():
"""Do the first (for full) refresh on all devices and schedule their
periodic refreshes
"""
celery_logger.debug('Running device init')
device_models = init_devices.device_factory.get_all_device_models()
for device_model in device_models:
# no delay here beceaue we should already be run as a task
# and we do not want to schedule if the refresh fails
refresh_device.delay(device_model.hostname)
if device_model.refresh_interval > 0:
# only some devices wish to be periodically refreshed
schedule_device_refresh(device_model.hostname, device_model.refresh_interval)
@celery_app.task(base=DeviceTask)
def refresh_device(hostname, reschedule=False):
"""Call the orangengine refresh method on the device"""
device = refresh_device.device_factory.get_device(hostname)
if reschedule:
device_model = refresh_device.device_factory.get_device_model(hostname)
schedule_device_refresh(hostname, device_model.refresh_interval)
celery_logger.info('Refreshing device: %s', hostname)
device.refresh()
def schedule_device_refresh(hostname, device_interval):
"""Set the device refresh interval for the given hostname and device_interval
"""
device_key = REDIS_DEV_REF_INTV_COUNT_PREFIX + hostname
unschedule_device_refresh(hostname)
celery_logger.info('Scheduling device refresh interval for %s', hostname)
interval = int(CELERY_BEAT_INTERVAL * round(float(device_interval)/CELERY_BEAT_INTERVAL))
device_refresh_redis_conection.set(device_key, int(interval / CELERY_BEAT_INTERVAL) - 1)
def unschedule_device_refresh(hostname):
"""Remove the device from the refresh schedule
"""
celery_logger.info("Unscheduling refresh for %s" % hostname)
device_key = REDIS_DEV_REF_INTV_COUNT_PREFIX + hostname
device_refresh_redis_conection.delete(device_key)
@celery_app.task(base=DeviceTask)
def beat_interval_runner():
"""Process the beat periodic task to check device refresh interval status
Spin off the refresh task if called for.
"""
celery_logger.info('Running beat interval checks')
device_keys = device_refresh_redis_conection.keys(REDIS_DEV_REF_INTV_COUNT_PREFIX + "*")
for hostname_key in device_keys:
value = int(device_refresh_redis_conection.get(hostname_key))
if value <= 0:
# due for a refresh
hostname = hostname_key.split(REDIS_DEV_REF_INTV_COUNT_PREFIX)[1]
refresh_device.delay(hostname)
device_interval = beat_interval_runner.device_factory.get_device_model(hostname)
device_interval = device_interval.refresh_interval
schedule_device_refresh(hostname, device_interval)
else:
# not due for a refresh, so just decrement the counter
device_refresh_redis_conection.set(hostname_key, value - 1)
@celery_app.task(base=DeviceTask)
def deprovision_device(hostname):
"""End the lifecycle for the device with the given hostname
Signal the factory, and unschedule the device
"""
celery_logger.info("Deprovisioning device %s" % hostname)
unschedule_device_refresh(hostname)
deprovision_device.device_factory.delete_device(hostname)
@celery_app.task(base=DeviceTask)
def get_candidate_policy(hostname, profile_name, match_criteria):
"""Generate a candidate policy
"""
celery_logger.debug("get_candidate_policy: match_criteria: %s", match_criteria)
device = get_candidate_policy.device_factory.get_device(hostname)
if isinstance(device, orangengine.drivers.PaloAltoPanoramaDriver):
cp = device.candidate_policy_match(match_criteria, device_group=profile_name)
else:
cp = device.candidate_policy_match(match_criteria)
return cp.serialize()
@celery_app.task(base=DeviceTask)
def apply_candidate_policy(hostname, candidate_policy, commit=False):
"""Apply the candidate policy to the device
"""
celery_logger.debug("apply_candidate_policy: applying candidate policy: %s", candidate_policy)
device = apply_candidate_policy.device_factory.get_device(hostname)
passed = False
#try:
# device.apply_candidate_policy(candidate_policy, commit)
# passed = True
#except Exception as e:
# celery_logger.error(e)
device.apply_candidate_policy(candidate_policy, commit)
return passed
| StarcoderdataPython |
196336 | # Copyright 2016 NTT DATA.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from masakari.api.validation import parameter_types
_base = {
'type': 'object',
'properties': {
'segment': {
'type': 'object',
'properties': {
'name': parameter_types.name,
'description': parameter_types.description,
'recovery_method': {
'type': 'string',
'enum': ["auto", "reserved_host",
"auto_priority", "rh_priority"]
},
'service_type': parameter_types.name
},
'additionalProperties': False
}
},
'required': ['segment'],
'additionalProperties': False
}
create = copy.deepcopy(_base)
create['properties']['segment']['required'] = ['name', 'recovery_method',
'service_type']
update = copy.deepcopy(_base)
update['properties']['segment']['anyOf'] = [{'required': ['name']},
{'required': ['description']},
{'required': ['recovery_method']},
{'required': ['service_type']},
]
| StarcoderdataPython |
117137 | from django import forms
from sysrev.api import PubMed
from sysrev.models import *
from widgets import *
class ProfileForm(forms.ModelForm):
class Meta:
model = User
fields = ("email",)
def clean_email(self):
email = self.cleaned_data.get('email')
if self.instance and self.instance.email == email:
return email
if User.objects.filter(email=email).count():
raise forms.ValidationError('That email address is already taken.')
return email
class ReviewCreateStep1(forms.Form):
title = forms.CharField(max_length=128, label="Review Title")
description = forms.CharField(widget=forms.Textarea, required=False)
invited = forms.CharField(widget=forms.Textarea, required=False, label="Invite Participants", help_text="Enter the email address or username of each participant, one per line")
def clean_invited(self):
invited = self.cleaned_data.get('invited')
invlist = filter(lambda i: i, map(lambda l: str.strip(str(l)), invited.splitlines()))
for invitee in invlist:
try:
if invitee.find("@") == -1:
User.objects.get(username=invitee)
else:
User.objects.get(email=invitee)
except User.DoesNotExist:
raise forms.ValidationError('User '+invitee+' not found.')
return invited
class ReviewCreateStep2(forms.Form):
query = forms.CharField(widget=QueryWidget)
def clean_query(self):
query = self.cleaned_data.get('query')
data = PubMed.get_data_from_query(query)
count = int(data["Count"])
limit = PubMed.get_query_limit()
if count >= limit:
raise forms.ValidationError("""Your query returned %d papers.
It must return fewer than %d papers.
Modify your query and try again.""" % (count, limit))
elif count == 0:
raise forms.ValidationError("Your query did not return any papers.")
return query
class ReviewUpdate(forms.ModelForm):
class Meta:
model = Review
fields = ("title", "description", "query", "participants")
widgets = {
'query': QueryWidget
}
help_texts = {
'query': """Changing the query will remove from or add to the current abstract pool.
Papers in the document, final, and rejected pools will not be affected."""
}
def clean(self):
query = self.cleaned_data.get('query')
data = PubMed.get_data_from_query(query)
count = int(data["Count"])
limit = PubMed.get_query_limit()
if count >= limit:
raise forms.ValidationError("""Your query returned %d papers.
It must return fewer than %d papers.
Modify your query and try again.""" % (count, limit))
elif count == 0:
raise forms.ValidationError("Your query did not return any papers.")
return self.cleaned_data
| StarcoderdataPython |
1606408 | from ..titanic import digital
from ..titanic import gmpmath
from ..titanic.ops import OP
class MPNum(digital.Digital):
# must be implemented in subclasses
@classmethod
def _select_context(cls, *args, ctx=None):
raise ValueError('virtual method: unimplemented')
@classmethod
def _round_to_context(cls, unrounded, ctx=None, strict=False):
raise ValueError('virtual method: unimplemented')
# most operations
def add(self, other, ctx=None):
ctx = self._select_context(self, other, ctx=ctx)
result = gmpmath.compute(OP.add, self, other, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def sub(self, other, ctx=None):
ctx = self._select_context(self, other, ctx=ctx)
result = gmpmath.compute(OP.sub, self, other, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def mul(self, other, ctx=None):
ctx = self._select_context(self, other, ctx=ctx)
result = gmpmath.compute(OP.mul, self, other, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def div(self, other, ctx=None):
ctx = self._select_context(self, other, ctx=ctx)
result = gmpmath.compute(OP.div, self, other, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def sqrt(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.sqrt, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def fma(self, other1, other2, ctx=None):
ctx = self._select_context(self, other1, other2, ctx=ctx)
result = gmpmath.compute(OP.fma, self, other1, other2, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def neg(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.neg, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def copysign(self, other, ctx=None):
ctx = self._select_context(self, other, ctx=ctx)
result = gmpmath.compute(OP.copysign, self, other, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def fabs(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.fabs, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def fdim(self, other, ctx=None):
# emulated
ctx = self._select_context(self, other, ctx=ctx)
result = gmpmath.compute(OP.sub, self, other, prec=ctx.p)
zero = digital.Digital(negative=False, c=0, exp=0)
if result < zero:
return type(self)(negative=False, c=0, exp=0, inexact=False, rc=0)
else:
# never return negative zero
rounded = self._round_to_context(result, ctx=ctx, strict=True)
return type(self)(rounded, negative=False)
def fmax(self, other, ctx=None):
# emulated
ctx = self._select_context(self, other, ctx=ctx)
if self.isnan:
return self._round_to_context(other, ctx=ctx, strict=False)
elif other.isnan:
return self._round_to_context(self, ctx=ctx, strict=False)
else:
return self._round_to_context(max(self, other), ctx=ctx, strict=False)
def fmin(self, other, ctx=None):
# emulated
ctx = self._select_context(self, other, ctx=ctx)
if self.isnan:
return self._round_to_context(other, ctx=ctx, strict=False)
elif other.isnan:
return self._round_to_context(self, ctx=ctx, strict=False)
else:
return self._round_to_context(min(self, other), ctx=ctx, strict=False)
def fmod(self, other, ctx=None):
ctx = self._select_context(self, other, ctx=ctx)
result = gmpmath.compute(OP.fmod, self, other, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def remainder(self, other, ctx=None):
ctx = self._select_context(self, other, ctx=ctx)
result = gmpmath.compute(OP.remainder, self, other, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def ceil(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.ceil, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def floor(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.floor, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def nearbyint(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.nearbyint, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def round(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.round, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def trunc(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.trunc, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def acos(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.acos, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def acosh(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.acosh, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def asin(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.asin, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def asinh(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.asinh, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def atan(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.atan, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def atan2(self, other, ctx=None):
ctx = self._select_context(self, other, ctx=ctx)
result = gmpmath.compute(OP.atan2, self, other, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def atanh(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.atanh, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def cos(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.cos, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def cosh(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.cosh, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def sin(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.sin, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def sinh(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.sinh, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def tan(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.tan, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def tanh(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.tanh, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def exp_(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.exp, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def exp2(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.exp2, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def expm1(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.expm1, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def log(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.log, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def log10(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.log10, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def log1p(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.log1p, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def log2(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.log2, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def cbrt(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.cbrt, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def hypot(self, other, ctx=None):
ctx = self._select_context(self, other, ctx=ctx)
result = gmpmath.compute(OP.hypot, self, other, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def pow(self, other, ctx=None):
ctx = self._select_context(self, other, ctx=ctx)
if other.is_zero():
# avoid possibly passing nan to gmpmath.compute
return type(self)(negative=False, c=1, exp=0, inexact=False, rc=0)
result = gmpmath.compute(OP.pow, self, other, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def erf(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.erf, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def erfc(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.erfc, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def lgamma(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.lgamma, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def tgamma(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.tgamma, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def isfinite(self):
return not (self.isinf or self.isnan)
# isinf and isnan are properties
# isnormal is implementation specific - override if necessary
def isnormal(self):
return not (
self.is_zero()
or self.isinf
or self.isnan
)
def signbit(self):
return self.negative
| StarcoderdataPython |
186048 | <reponame>vhn0912/python-snippets<filename>notebook/pypdf2_split.py
import PyPDF2
merger = PyPDF2.PdfFileMerger()
merger.append('data/src/pdf/sample1.pdf', pages=PyPDF2.pagerange.PageRange(':2'))
merger.write('data/temp/sample_split1.pdf')
merger.close()
merger = PyPDF2.PdfFileMerger()
merger.append('data/src/pdf/sample1.pdf', pages=PyPDF2.pagerange.PageRange('2:'))
merger.write('data/temp/sample_split2.pdf')
merger.close()
| StarcoderdataPython |
3320312 | <reponame>zStartKiller/BotDiscordPython<filename>main.py
# Importações
import discord
import os
import asyncio
import datetime
from discord.ext import commands
from config import settings
intents=intents=discord.Intents.all()
intents.members = True
bot = commands.Bot(intents=intents, command_prefix =commands.when_mentioned_or(settings['prefix']), help_command=None)
bot.remove_command('help')
bot.load_extension('manager')
bot.load_extension('joguinhos')
bot.load_extension('member_join')
global now
now = datetime.datetime.now()
now = now.strftime("%d/%m/%Y | %H:%M")
@bot.event
async def on_ready():
try:
version = "BETA"
print("=============COGS================\n")
print(" " + now)
print("\n=================================")
print(f"Meu nome é: {bot.user}")
print(f"Versão: {version}")
print("Ligado e sem erros!")
print("=================================")
while True:
prefix = (settings['prefix'])
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"Estou sendo programado ainda!"))
await asyncio.sleep(10)
await bot.change_presence(activity=discord.Game(name=f'📢 | /help'))
await asyncio.sleep(10)
except Exception as erro:
print(f"Erro: {erro}")
# Pegar Cogs
for filename in os.listdir('./cogs'):
if filename.endswith('.py'):
bot.load_extension(f'cogs.{filename[:-3]}')
bot.run(settings['token']) | StarcoderdataPython |
1760036 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import math
from functools import partial
import time
__all__ = ['ResNeXt', 'resnet50', 'resnet101']
def conv3x3x3(in_planes, out_planes, stride=1):
# 3x3x3 convolution with padding
return nn.Conv3d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
def downsample_basic_block(x, planes, stride):
out = F.avg_pool3d(x, kernel_size=1, stride=stride)
zero_pads = torch.Tensor(
out.size(0), planes - out.size(1), out.size(2), out.size(3),
out.size(4)).zero_()
if isinstance(out.data, torch.cuda.FloatTensor):
zero_pads = zero_pads.cuda()
out = Variable(torch.cat([out.data, zero_pads], dim=1))
return out
class ResNeXtBottleneck(nn.Module):
expansion = 2
def __init__(self, inplanes, planes, cardinality, stride=1,
downsample=None):
super(ResNeXtBottleneck, self).__init__()
mid_planes = cardinality * int(planes / 32)
self.conv1 = nn.Conv3d(inplanes, mid_planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm3d(mid_planes)
self.conv2 = nn.Conv3d(
mid_planes,
mid_planes,
kernel_size=3,
stride=stride,
padding=1,
groups=cardinality,
bias=False)
self.bn2 = nn.BatchNorm3d(mid_planes)
self.conv3 = nn.Conv3d(
mid_planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm3d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNeXt(nn.Module):
def __init__(self,
block,
layers,
sample_size,
sample_duration,
shortcut_type='B',
cardinality=16,
num_classes=400):
self.inplanes = 64
super(ResNeXt, self).__init__()
self.conv1 = nn.Conv3d(
in_channels=2,
out_channels=32,
kernel_size=3,
stride=(1, 1, 1),
padding=1,
bias=False)
self.bn1 = nn.BatchNorm3d(32)
self.bn2 = nn.BatchNorm3d(128)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=1, padding=1)
self.conv2 = nn.Conv3d(
in_channels=32,
out_channels=64,
kernel_size=3,
stride=(1, 2, 2),
padding=1,
bias=False)
self.conv3 = nn.Conv3d(
in_channels=64,
out_channels=64,
kernel_size=3,
stride=(2, 2, 2),
padding=1,
bias=False)
self.layer1 = self._make_layer(block, 64, layers[0], shortcut_type,
cardinality)
self.layer2 = self._make_layer(
block, 64, layers[1], shortcut_type, cardinality, stride=1)
self.layer3 = self._make_layer(
block, 32, layers[1], shortcut_type, cardinality, stride=2)
self.layer4 = self._make_layer(
block, 128, layers[1], shortcut_type, cardinality, stride=2)
last_duration = int(math.ceil(sample_duration / 16))
last_size = int(math.ceil(sample_size / 32))
self.avgpool = nn.AvgPool3d(
(last_duration, last_size, last_size), stride=1)
self.conv3d_8 = nn.Conv3d(in_channels=128, out_channels=128, kernel_size=3, stride=(1, 1, 1), padding=1)
self.conv3d_9 = nn.Conv3d(in_channels=128, out_channels=16, kernel_size=5, stride=(1, 1, 1), padding=0)
self.fc = nn.Linear(cardinality * 32 * block.expansion, num_classes)
self.fc_layer1 = nn.Linear(36864, 128)
self.fc_layer2 = nn.Linear(128, 6)
self.fc1 = nn.Linear(76800, 128)
self.fc2 = nn.Linear(128, 32)
self.fc3 = nn.Linear(32, 6)
for m in self.modules():
if isinstance(m, nn.Conv3d):
m.weight = nn.init.kaiming_normal(m.weight, mode='fan_out')
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self,
block,
planes,
blocks,
shortcut_type,
cardinality,
stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
if shortcut_type == 'A':
downsample = partial(
downsample_basic_block,
planes=planes * block.expansion,
stride=stride)
else:
downsample = nn.Sequential(
nn.Conv3d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False), nn.BatchNorm3d(planes * block.expansion))
layers = []
layers.append(
block(self.inplanes, planes, cardinality, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, cardinality))
return nn.Sequential(*layers)
def forward(self, x):
diary = False
# diary = True
if diary:
print('network input {}'.format(x.shape))
x = self.conv1(x)
print('conv1 {}'.format(x.shape))
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
print('maxpool {}'.format(x.shape))
x = self.conv2(x)
print('conv2 {}'.format(x.shape))
x_0 = self.conv3(x)
print('conv3 {}'.format(x_0.shape))
# time.sleep(30)
x = self.layer1(x)
print('layer1 {}'.format(x.shape))
x = self.layer2(x)
print('layer2 {}'.format(x.shape))
x_1 = self.layer3(x)
print('layer3 {}'.format(x_1.shape))
x_01 = torch.cat((x_0, x_1), 1)
x = self.bn2(x_01)
print('10_bn {}'.format(x.shape))
print('x_0 {}, x_1 {}'.format(x_0.shape, x_1.shape))
x = self.conv3d_8(x)
x = self.relu(x)
print('11_3d {}'.format(x.shape))
x = self.conv3d_9(x)
# x = self.relu(x)
print('12_3d {}'.format(x.shape))
x = x.view(x.size()[0], -1)
x = self.relu(x)
print('13_fl {}'.format(x.shape))
x = self.fc1(x)
x = self.relu(x)
print('15_fc {}'.format(x.shape))
x = self.fc2(x)
x = self.relu(x)
print('17_fc {}'.format(x.shape))
x = self.fc3(x)
print('19_fc {}'.format(x.shape))
time.sleep(30)
else:
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.conv2(x)
x_0 = self.conv3(x)
x = self.layer1(x)
x = self.layer2(x)
x_1 = self.layer3(x)
x_01 = torch.cat((x_0, x_1), 1)
x = self.bn2(x_01)
x = self.conv3d_8(x)
x = self.relu(x)
x = self.conv3d_9(x)
x = x.view(x.size()[0], -1)
x = self.relu(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.relu(x)
x = self.fc3(x)
# x = self.conv1(x)
# x = self.bn1(x)
# x = self.relu(x)
# x = self.maxpool(x)
#
# x = self.layer1(x)
# x = self.layer2(x)
# x = self.layer3(x)
# x = self.layer4(x)
# x = self.avgpool(x)
#
# x = x.view(x.size(0), -1)
# x = self.fc_layer1(x)
# x = self.relu(x)
# x = self.fc_layer2(x)
# print('output size {}'.format(x.shape))
# time.sleep(30)
# print('we are in resnext101')
return x
class PrevostNet(nn.Module):
def __init__(self):
self.inplanes = 64
super(PrevostNet, self).__init__()
self.conv1 = nn.Conv2d(2, 64, kernel_size=5, stride=2, padding=2,
bias=False)
self.conv2 = nn.Conv2d(64, 64, kernel_size=5, stride=2, padding=2,
bias=False)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1,
bias=False)
self.conv4 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(576, 6)
def forward(self, x):
show_size = False
# show_size = True
if show_size:
print('input shape {}'.format(x.shape))
x = self.conv1(x)
print('conv1 shape {}'.format(x.shape))
x = self.relu(x)
print('relu shape {}'.format(x.shape))
x = self.conv2(x)
print('conv2 shape {}'.format(x.shape))
x = self.relu(x)
print('relu shape {}'.format(x.shape))
x = self.maxpool(x)
print('maxpool shape {}'.format(x.shape))
x = self.conv3(x)
print('conv3 shape {}'.format(x.shape))
x = self.relu(x)
print('relu shape {}'.format(x.shape))
x = self.conv4(x)
print('conv4 shape {}'.format(x.shape))
x = self.relu(x)
print('relu shape {}'.format(x.shape))
x = self.maxpool(x)
print('maxpool shape {}'.format(x.shape))
x = x.view(x.size(0), -1)
print('view before fc {}'.format(x.shape))
x = self.fc(x)
print('fc {}'.format(x.shape))
time.sleep(30)
else:
x = self.conv1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.conv3(x)
x = self.relu(x)
x = self.conv4(x)
x = self.relu(x)
x = self.maxpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x, x
class My3DNet(nn.Module):
def __init__(self):
self.inplanes = 64
super(My3DNet, self).__init__()
self.conv1 = nn.Conv3d(in_channels=1, out_channels=64, kernel_size=(3, 7, 7),
stride=(1, 2, 2), padding=(0, 3, 3), bias=False)
self.bn1 = nn.BatchNorm3d(64)
self.layer1 = nn.Sequential(
nn.Conv3d(in_channels=64, out_channels=64, kernel_size=3,
stride=(1, 1, 1), padding=(0, 1, 1), bias=False),
nn.BatchNorm3d(64),
nn.ReLU()
)
self.conv2 = nn.Conv3d(in_channels=64, out_channels=128, kernel_size=(1, 3, 3),
stride=(1, 2, 2), padding=(0, 1, 1), bias=False)
self.bn2 = nn.BatchNorm3d(128)
self.layer2 = nn.Sequential(
nn.Conv3d(in_channels=128, out_channels=128, kernel_size=3,
stride=(1, 1, 1), padding=(0, 1, 1), bias=False),
nn.BatchNorm3d(128),
nn.ReLU()
)
self.conv3 = nn.Conv3d(in_channels=128, out_channels=64, kernel_size=(1, 3, 3),
stride=(1, 2, 2), padding=(0, 1, 1), bias=False)
self.layer3 = nn.Sequential(
nn.Conv3d(in_channels=64, out_channels=64, kernel_size=3,
stride=(1, 1, 1), padding=(0, 1, 1), bias=False),
nn.BatchNorm3d(64),
nn.ReLU()
)
self.maxpool1 = nn.MaxPool3d(kernel_size=(2, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1))
self.conv4 = nn.Conv3d(in_channels=64, out_channels=32, kernel_size=(1, 3, 3),
stride=(1, 2, 2), padding=(0, 1, 1), bias=False)
self.maxpool2 = nn.MaxPool3d(kernel_size=(1, 7, 7), stride=(1, 1, 1), padding=0)
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(64, 6)
def forward(self, x):
show_size = False
# show_size = True
if show_size:
print('input shape {}'.format(x.shape))
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
print('conv1 shape {}'.format(x.shape))
at1 = self.layer1(x)
print('at1 shape {}'.format(at1.shape))
x = x * at1
print('x shape {}'.format(x.shape))
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
print('conv2 shape {}'.format(x.shape))
at2 = self.layer2(x)
print('at2 shape {}'.format(at2.shape))
x = x * at2
print('x shape {}'.format(x.shape))
x = self.conv3(x)
x = self.bn1(x)
x = self.relu(x)
print('conv3 shape {}'.format(x.shape))
at2 = self.layer3(x)
print('at2 shape {}'.format(at2.shape))
x = x * at2
print('x shape {}'.format(x.shape))
x = self.maxpool1(x)
print('maxpool1 shape {}'.format(x.shape))
x = self.conv4(x)
print('conv4 shape {}'.format(x.shape))
x = self.maxpool2(x)
print('maxpool2 shape {}'.format(x.shape))
x = x.view(x.size(0), -1)
print('view before fc {}'.format(x.shape))
x = self.fc(x)
print('fc {}'.format(x.shape))
time.sleep(30)
else:
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
at1 = self.layer1(x)
x = x * at1
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
at2 = self.layer2(x)
x = x * at2
x = self.conv3(x)
x = self.bn1(x)
x = self.relu(x)
at2 = self.layer3(x)
x = x * at2
x = self.maxpool1(x)
x = self.conv4(x)
x = self.maxpool2(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def get_fine_tuning_parameters(model, ft_begin_index):
if ft_begin_index == 0:
return model.parameters()
ft_module_names = []
for i in range(ft_begin_index, 5):
ft_module_names.append('layer{}'.format(i))
ft_module_names.append('fc')
parameters = []
for k, v in model.named_parameters():
for ft_module in ft_module_names:
if ft_module in k:
parameters.append({'params': v})
break
else:
parameters.append({'params': v, 'lr': 0.0})
return parameters
def resnet50(**kwargs):
"""Constructs a ResNet-50 model.
"""
model = ResNeXt(ResNeXtBottleneck, [3, 4, 6, 3], **kwargs)
return model
def resnet101(**kwargs):
"""Constructs a ResNet-101 model.
"""
model = ResNeXt(ResNeXtBottleneck, [3, 4, 23, 3], **kwargs)
return model
def resnet152(**kwargs):
"""Constructs a ResNet-101 model.
"""
model = ResNeXt(ResNeXtBottleneck, [3, 8, 36, 3], **kwargs)
return model
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.bn1 = nn.BatchNorm3d(32)
self.bn2 = nn.BatchNorm3d(128)
self.relu = nn.ReLU(inplace=True)
self.conv3d_1 = nn.Conv3d(in_channels=2, out_channels=32, kernel_size=3, stride=(1, 1, 1), padding=1)
self.conv3d_2 = nn.Conv3d(in_channels=32, out_channels=32, kernel_size=3, stride=(1, 2, 2), padding=1)
self.conv3d_3 = nn.Conv3d(in_channels=32, out_channels=64, kernel_size=3, stride=(1, 1, 1), padding=1)
self.conv3d_4 = nn.Conv3d(in_channels=64, out_channels=64, kernel_size=3, stride=(2, 2, 2), padding=1)
self.conv3d_5 = nn.Conv3d(in_channels=64, out_channels=128, kernel_size=3, stride=(1, 1, 1), padding=1)
self.conv3d_6 = nn.Conv3d(in_channels=128, out_channels=128, kernel_size=3, stride=(1, 1, 1), padding=1)
self.conv3d_7 = nn.Conv3d(in_channels=128, out_channels=64, kernel_size=3, stride=(1, 1, 1), padding=1)
self.conv3d_8 = nn.Conv3d(in_channels=128, out_channels=128, kernel_size=3, stride=(1, 1, 1), padding=1)
self.conv3d_9 = nn.Conv3d(in_channels=128, out_channels=16, kernel_size=5, stride=(1, 1, 1), padding=0)
# self.conv3d_10 = nn.Conv3d(in_channels=16, out_channels=8, kernel_size=3, stride=(1, 1, 1), padding=1)
self.drop3d_1 = nn.Dropout3d(0.25)
self.drop2d_1 = nn.Dropout2d(0.25)
self.drop2d_2 = nn.Dropout2d(0.1)
# self.fc1 = nn.Linear(38400, 128)
self.fc1 = nn.Linear(76800, 128)
self.fc2 = nn.Linear(128, 32)
self.fc3 = nn.Linear(32, 6)
def forward(self, x):
# diary = True
diary = False
if diary == True:
print('give {}'.format(x.shape))
x = self.conv3d_1(x)
x = self.relu(x)
print('1_3d {}'.format(x.shape))
x = self.conv3d_2(x)
x = self.relu(x)
print('2_3d {}'.format(x.shape))
x = self.bn1(x)
print('3_bn {}'.format(x.shape))
x = self.conv3d_3(x)
x = self.relu(x)
print('4_3d {}'.format(x.shape))
x = self.conv3d_4(x)
x_0 = self.relu(x)
print('5_3d {}'.format(x_0.shape))
x = self.conv3d_5(x_0)
x = self.relu(x)
print('6_3d {}'.format(x.shape))
x = self.conv3d_6(x)
x = self.relu(x)
print('7_3d {}'.format(x.shape))
x = self.conv3d_7(x)
x_1 = self.relu(x)
print('8_3d {}'.format(x_1.shape))
x_01 = torch.cat((x_0, x_1), 1)
print('9_cn {}'.format(x_01.shape))
x = self.bn2(x_01)
print('10_bn {}'.format(x.shape))
x = self.conv3d_8(x)
x = self.relu(x)
print('11_3d {}'.format(x.shape))
x = self.conv3d_9(x)
# x = self.relu(x)
print('12_3d {}'.format(x.shape))
x = x.view(x.size()[0], -1)
x = self.relu(x)
x = self.drop3d_1(x)
print('13_fl {}'.format(x.shape))
x = self.fc1(x)
x = self.relu(x)
# x = self.drop2d_1(x)
print('15_fc {}'.format(x.shape))
x = self.fc2(x)
x = self.relu(x)
# x = self.drop2d_2(x)
print('17_fc {}'.format(x.shape))
x = self.fc3(x)
print('19_fc {}'.format(x.shape))
time.sleep(30)
else:
x = self.conv3d_1(x)
x = self.relu(x)
x = self.conv3d_2(x)
x = self.relu(x)
x = self.bn1(x)
x = self.conv3d_3(x)
x = self.relu(x)
x = self.conv3d_4(x)
x_0 = self.relu(x)
x = self.conv3d_5(x_0)
x = self.relu(x)
x = self.conv3d_6(x)
x = self.relu(x)
x = self.conv3d_7(x)
x_1 = self.relu(x)
x_01 = torch.cat((x_0, x_1), 1)
x = self.bn2(x_01)
x = self.conv3d_8(x)
x = self.relu(x)
x = self.conv3d_9(x)
x = x.view(x.size()[0], -1)
x = self.relu(x)
x = self.drop3d_1(x)
x = self.fc1(x)
x = self.relu(x)
x = self.drop2d_1(x)
x = self.fc2(x)
x = self.relu(x)
x = self.drop2d_2(x)
x = self.fc3(x)
# time.sleep(30)
return x | StarcoderdataPython |
3291757 | <reponame>adeutscher/core-tools
#!/usr/bin/env python
from __future__ import print_function
import csv, getopt, json, os, sys
format_name = "YAML"
#
# Common Colours and Message Functions
###
def _print_message(header_colour, header_text, message, stderr=False):
f=sys.stdout
if stderr:
f=sys.stderr
print("%s[%s]: %s" % (colour_text(header_text, header_colour), colour_text(os.path.basename(sys.argv[0]), COLOUR_GREEN), message), file=f)
def colour_text(text, colour = None):
if not colour:
colour = COLOUR_BOLD
# A useful shorthand for applying a colour to a string.
return "%s%s%s" % (colour, text, COLOUR_OFF)
def enable_colours(force = False):
global COLOUR_PURPLE
global COLOUR_RED
global COLOUR_GREEN
global COLOUR_YELLOW
global COLOUR_BLUE
global COLOUR_BOLD
global COLOUR_OFF
if force or sys.stdout.isatty():
# Colours for standard output.
COLOUR_PURPLE = '\033[1;35m'
COLOUR_RED = '\033[1;91m'
COLOUR_GREEN = '\033[1;92m'
COLOUR_YELLOW = '\033[1;93m'
COLOUR_BLUE = '\033[1;94m'
COLOUR_BOLD = '\033[1m'
COLOUR_OFF = '\033[0m'
else:
# Set to blank values if not to standard output.
COLOUR_PURPLE = ''
COLOUR_RED = ''
COLOUR_GREEN = ''
COLOUR_YELLOW = ''
COLOUR_BLUE = ''
COLOUR_BOLD = ''
COLOUR_OFF = ''
enable_colours()
error_count = 0
def print_error(message):
global error_count
error_count += 1
_print_message(COLOUR_RED, "Error", message, True)
def print_notice(message):
_print_message(COLOUR_BLUE, "Notice", message, True)
def print_usage(message):
_print_message(COLOUR_PURPLE, "Usage", message, True)
def print_warning(message):
_print_message(COLOUR_YELLOW, "Warning", message, True)
#
# Script Functions
###
def convert_list_to_csv(l):
if isinstance(l, dict):
print_warning("Content is a %s object and not a list. Converting as a single-entry CSV." % format_name)
l = [l]
elif not isinstance(l, list):
print_error("Must provide a list of objects.")
return
a = []
for list_count, item in enumerate(l):
if not isinstance(item, dict):
print_error("Row %s is not a %s object." % (colour_text("#%d" % list_count), format_name))
continue
a.append(flattenjson(item, '__'))
if not a:
# No rows to print, immediately return.
print_error("No valid CSV rows detected.")
return
columns = [ x for row in a for x in row.keys() ]
columns = sorted( list( set( columns ) ) )
csv_w = csv.writer( sys.stdout )
csv_w.writerow( columns )
for output_count, i_r in enumerate(a):
try:
csv_w.writerow( map( lambda x: i_r.get( x, "" ), columns ) )
except Exception as e:
print_error("Problem (item %s): %s" % (colour_text("#%d" % output_count), e))
def flattenjson( b, delim ):
val = {}
for i in b.keys():
if isinstance( b[i], dict ):
get = flattenjson( b[i], delim )
for j in get.keys():
val[ i + delim + j ] = get[j]
else:
val[i] = b[i]
return val
def get_content(file_handle, title):
try:
return yaml.load(file_handle.read())
except yaml.error.MarkedYAMLError as e:
# Conveniently, Python's pyyaml module gives a more informative error printout than the JSON module.
if title == "standard input":
print_error("Content of standard input is not in readable %s format: %s (line %d, column %d)" % (format_name, e.problem, e.problem_mark.line + 1, e.problem_mark.column + 1))
else:
print_error("Content of input (%s) is not in readable %s format: %s (line %d, column %d)" % (colour_text(title, COLOUR_GREEN), format_name, e.problem, e.problem_mark.line + 1, e.problem_mark.column + 1))
return None
def main():
file_handle = False
if len(sys.argv) >= 2:
source_file = sys.argv[1]
if source_file == "-":
file_handle = sys.stdin
print_notice("Reading %s content from standard input." % format_name)
elif not os.path.isfile(source_file):
print_error("%s file does not exist: %s" % (format_name, colour_text(source_file, COLOUR_GREEN)))
elif not os.access(source_file, os.R_OK):
print_error("%s file could not be read: %s" % (format_name, colour_text(source_file, COLOUR_GREEN)))
else:
print_error("No %s file path provided." % format_name)
# Quit if there was an argument error.
global error_count
if error_count:
hexit(1)
# Get our content
content = None
try:
if file_handle:
# File handle was already set (stdin)
content = get_content(file_handle, "standard input")
else:
with open(source_file) as file_handle:
content = get_content(file_handle, source_file)
except Exception:
# Unexpected problem
print_error("Problem with getting %s data." % format_name)
# Quit if there was a loading error.
if error_count or not content:
exit(1)
convert_list_to_csv(content)
# Quit with non-zero if there was a problem with list conversion
if error_count:
exit(1)
def hexit(code = 0):
print_usage("%s %s-file" % (colour_text("./%s" % os.path.basename(sys.argv[0]), COLOUR_GREEN), format_name.lower()))
exit(code)
try:
import yaml
except ImportError:
print_error("YAML module for Python is not installed.")
if __name__ == "__main__":
main()
| StarcoderdataPython |
1736118 | <reponame>minhongqi/federated
# Copyright 2018, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Federated EMNIST dataset utilities."""
import collections
import numpy as np
import tensorflow as tf
import tensorflow_federated as tff
from gans.experiments.emnist import emnist_data_utils
BATCH_SIZE = 7
def _summarize_model(model):
model.summary()
print('\n\n\n')
def _get_example_client_dataset():
client_data = tff.simulation.datasets.emnist.get_synthetic()
return client_data.create_tf_dataset_for_client(client_data.client_ids[0])
def _get_example_client_dataset_containing_lowercase():
example_ds = _get_example_client_dataset()
example_image = next(iter(example_ds))['pixels'].numpy()
num_labels = 62
image_list = [example_image for _ in range(num_labels)]
label_list = list(range(num_labels))
synthetic_data = collections.OrderedDict([
('label', label_list),
('pixels', image_list),
])
return tf.data.Dataset.from_tensor_slices(synthetic_data)
def _compute_dataset_length(dataset):
return dataset.reduce(0, lambda x, _: x + 1)
class EmnistTest(tf.test.TestCase):
def test_preprocessed_img_inversion(self):
raw_images_ds = _get_example_client_dataset()
# Inversion turned off, average pixel is dark.
standard_images_ds = emnist_data_utils.preprocess_img_dataset(
raw_images_ds, invert_imagery=False, batch_size=BATCH_SIZE)
for batch in iter(standard_images_ds):
for image in batch:
self.assertLessEqual(np.average(image), -0.7)
# Inversion turned on, average pixel is light.
inverted_images_ds = emnist_data_utils.preprocess_img_dataset(
raw_images_ds, invert_imagery=True, batch_size=BATCH_SIZE)
for batch in iter(inverted_images_ds):
for image in batch:
self.assertGreaterEqual(np.average(image), 0.7)
def test_preprocessed_img_labels_are_case_agnostic(self):
total_num_labels = 62
raw_dataset = _get_example_client_dataset_containing_lowercase()
raw_dataset_iterator = iter(raw_dataset)
num_raw_images = _compute_dataset_length(raw_dataset)
self.assertEqual(num_raw_images, total_num_labels)
processed_dataset = emnist_data_utils.preprocess_img_dataset(
raw_dataset, include_label=True, batch_size=None, shuffle=False)
processed_dataset_iterator = iter(processed_dataset)
num_processed_images = _compute_dataset_length(processed_dataset)
self.assertEqual(num_processed_images, total_num_labels)
for _ in range(total_num_labels):
raw_label = next(raw_dataset_iterator)['label']
if raw_label > 35:
raw_label = raw_label - 26 # Convert from lowercase to capital
processed_label = next(processed_dataset_iterator)[1]
self.assertEqual(raw_label, processed_label)
if __name__ == '__main__':
tf.test.main()
| StarcoderdataPython |
40444 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
LSBLK_OUTPUT = b"""
/dev/sda
/dev/sda1 32caaec3-ef40-4691-a3b6-438c3f9bc1c0
/dev/sda2 66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK
/dev/mapper/fedora_dhcp129--186-swap eae6059d-2fbe-4d1c-920d-a80bbeb1ac6d
/dev/mapper/fedora_dhcp129--186-root d34cf5e3-3449-4a6c-8179-a1feb2bca6ce
/dev/mapper/fedora_dhcp129--186-home 2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d
/dev/sr0
/dev/loop0 0f031512-ab15-497d-9abd-3a512b4a9390
/dev/loop1 7c1b0f30-cf34-459f-9a70-2612f82b870a
/dev/loop9 0f031512-ab15-497d-9abd-3a512b4a9390
/dev/loop9 7c1b4444-cf34-459f-9a70-2612f82b870a
/dev/mapper/docker-253:1-1050967-pool
/dev/loop2
/dev/mapper/docker-253:1-1050967-pool
"""
LSBLK_OUTPUT_2 = b"""
/dev/sda
/dev/sda1 32caaec3-ef40-4691-a3b6-438c3f9bc1c0
/dev/sda2 66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK
/dev/mapper/fedora_dhcp129--186-swap eae6059d-2fbe-4d1c-920d-a80bbeb1ac6d
/dev/mapper/fedora_dhcp129--186-root d34cf5e3-3449-4a6c-8179-a1feb2bca6ce
/dev/mapper/fedora_dhcp129--186-home 2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d
/dev/mapper/an-example-mapper with a space in the name 84639acb-013f-4d2f-9392-526a572b4373
/dev/sr0
/dev/loop0 0f031512-ab15-497d-9abd-3a512b4a9390
"""
LSBLK_UUIDS = {'/dev/sda1': '66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK'}
UDEVADM_UUID = 'N/A'
UDEVADM_OUTPUT = """
UDEV_LOG=3
DEVPATH=/devices/pci0000:00/0000:00:07.0/virtio2/block/vda/vda1
MAJOR=252
MINOR=1
DEVNAME=/dev/vda1
DEVTYPE=partition
SUBSYSTEM=block
MPATH_SBIN_PATH=/sbin
ID_PATH=pci-0000:00:07.0-virtio-pci-virtio2
ID_PART_TABLE_TYPE=dos
ID_FS_UUID=57b1a3e7-9019-4747-9809-7ec52bba9179
ID_FS_UUID_ENC=57b1a3e7-9019-4747-9809-7ec52bba9179
ID_FS_VERSION=1.0
ID_FS_TYPE=ext4
ID_FS_USAGE=filesystem
LVM_SBIN_PATH=/sbin
DEVLINKS=/dev/block/252:1 /dev/disk/by-path/pci-0000:00:07.0-virtio-pci-virtio2-part1 /dev/disk/by-uuid/57b1a3e7-9019-4747-9809-7ec52bba9179
"""
MTAB = """
sysfs /sys sysfs rw,seclabel,nosuid,nodev,noexec,relatime 0 0
proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0
devtmpfs /dev devtmpfs rw,seclabel,nosuid,size=8044400k,nr_inodes=2011100,mode=755 0 0
securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0
tmpfs /dev/shm tmpfs rw,seclabel,nosuid,nodev 0 0
devpts /dev/pts devpts rw,seclabel,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0
tmpfs /run tmpfs rw,seclabel,nosuid,nodev,mode=755 0 0
tmpfs /sys/fs/cgroup tmpfs ro,seclabel,nosuid,nodev,noexec,mode=755 0 0
cgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd 0 0
pstore /sys/fs/pstore pstore rw,seclabel,nosuid,nodev,noexec,relatime 0 0
cgroup /sys/fs/cgroup/devices cgroup rw,nosuid,nodev,noexec,relatime,devices 0 0
cgroup /sys/fs/cgroup/freezer cgroup rw,nosuid,nodev,noexec,relatime,freezer 0 0
cgroup /sys/fs/cgroup/memory cgroup rw,nosuid,nodev,noexec,relatime,memory 0 0
cgroup /sys/fs/cgroup/pids cgroup rw,nosuid,nodev,noexec,relatime,pids 0 0
cgroup /sys/fs/cgroup/blkio cgroup rw,nosuid,nodev,noexec,relatime,blkio 0 0
cgroup /sys/fs/cgroup/cpuset cgroup rw,nosuid,nodev,noexec,relatime,cpuset 0 0
cgroup /sys/fs/cgroup/cpu,cpuacct cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct 0 0
cgroup /sys/fs/cgroup/hugetlb cgroup rw,nosuid,nodev,noexec,relatime,hugetlb 0 0
cgroup /sys/fs/cgroup/perf_event cgroup rw,nosuid,nodev,noexec,relatime,perf_event 0 0
cgroup /sys/fs/cgroup/net_cls,net_prio cgroup rw,nosuid,nodev,noexec,relatime,net_cls,net_prio 0 0
configfs /sys/kernel/config configfs rw,relatime 0 0
/dev/mapper/fedora_dhcp129--186-root / ext4 rw,seclabel,relatime,data=ordered 0 0
selinuxfs /sys/fs/selinux selinuxfs rw,relatime 0 0
systemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=24,pgrp=1,timeout=0,minproto=5,maxproto=5,direct 0 0
debugfs /sys/kernel/debug debugfs rw,seclabel,relatime 0 0
hugetlbfs /dev/hugepages hugetlbfs rw,seclabel,relatime 0 0
tmpfs /tmp tmpfs rw,seclabel 0 0
mqueue /dev/mqueue mqueue rw,seclabel,relatime 0 0
/dev/loop0 /var/lib/machines btrfs rw,seclabel,relatime,space_cache,subvolid=5,subvol=/ 0 0
/dev/sda1 /boot ext4 rw,seclabel,relatime,data=ordered 0 0
/dev/mapper/fedora_dhcp129--186-home /home ext4 rw,seclabel,relatime,data=ordered 0 0
tmpfs /run/user/1000 tmpfs rw,seclabel,nosuid,nodev,relatime,size=1611044k,mode=700,uid=1000,gid=1000 0 0
gvfsd-fuse /run/user/1000/gvfs fuse.gvfsd-fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
fusectl /sys/fs/fuse/connections fusectl rw,relatime 0 0
grimlock.g.a: /home/adrian/sshfs-grimlock fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
grimlock.g.a:test_path/path_with'single_quotes /home/adrian/sshfs-grimlock-single-quote fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
grimlock.g.a:path_with'single_quotes /home/adrian/sshfs-grimlock-single-quote-2 fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
grimlock.g.a:/mnt/data/foto's /home/adrian/fotos fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
"""
MTAB_ENTRIES = [
[
'sysfs',
'/sys',
'sysfs',
'rw,seclabel,nosuid,nodev,noexec,relatime',
'0',
'0'
],
['proc', '/proc', 'proc', 'rw,nosuid,nodev,noexec,relatime', '0', '0'],
[
'devtmpfs',
'/dev',
'devtmpfs',
'rw,seclabel,nosuid,size=8044400k,nr_inodes=2011100,mode=755',
'0',
'0'
],
[
'securityfs',
'/sys/kernel/security',
'securityfs',
'rw,nosuid,nodev,noexec,relatime',
'0',
'0'
],
['tmpfs', '/dev/shm', 'tmpfs', 'rw,seclabel,nosuid,nodev', '0', '0'],
[
'devpts',
'/dev/pts',
'devpts',
'rw,seclabel,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000',
'0',
'0'
],
['tmpfs', '/run', 'tmpfs', 'rw,seclabel,nosuid,nodev,mode=755', '0', '0'],
[
'tmpfs',
'/sys/fs/cgroup',
'tmpfs',
'ro,seclabel,nosuid,nodev,noexec,mode=755',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/systemd',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd',
'0',
'0'
],
[
'pstore',
'/sys/fs/pstore',
'pstore',
'rw,seclabel,nosuid,nodev,noexec,relatime',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/devices',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,devices',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/freezer',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,freezer',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/memory',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,memory',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/pids',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,pids',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/blkio',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,blkio',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/cpuset',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,cpuset',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/cpu,cpuacct',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,cpu,cpuacct',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/hugetlb',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,hugetlb',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/perf_event',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,perf_event',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/net_cls,net_prio',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,net_cls,net_prio',
'0',
'0'
],
['configfs', '/sys/kernel/config', 'configfs', 'rw,relatime', '0', '0'],
[
'/dev/mapper/fedora_dhcp129--186-root',
'/',
'ext4',
'rw,seclabel,relatime,data=ordered',
'0',
'0'
],
['selinuxfs', '/sys/fs/selinux', 'selinuxfs', 'rw,relatime', '0', '0'],
[
'systemd-1',
'/proc/sys/fs/binfmt_misc',
'autofs',
'rw,relatime,fd=24,pgrp=1,timeout=0,minproto=5,maxproto=5,direct',
'0',
'0'
],
['debugfs', '/sys/kernel/debug', 'debugfs', 'rw,seclabel,relatime', '0', '0'],
[
'hugetlbfs',
'/dev/hugepages',
'hugetlbfs',
'rw,seclabel,relatime',
'0',
'0'
],
['tmpfs', '/tmp', 'tmpfs', 'rw,seclabel', '0', '0'],
['mqueue', '/dev/mqueue', 'mqueue', 'rw,seclabel,relatime', '0', '0'],
[
'/dev/loop0',
'/var/lib/machines',
'btrfs',
'rw,seclabel,relatime,space_cache,subvolid=5,subvol=/',
'0',
'0'
],
['/dev/sda1', '/boot', 'ext4', 'rw,seclabel,relatime,data=ordered', '0', '0'],
# A 'none' fstype
['/dev/sdz3', '/not/a/real/device', 'none', 'rw,seclabel,relatime,data=ordered', '0', '0'],
# lets assume this is a bindmount
['/dev/sdz4', '/not/a/real/bind_mount', 'ext4', 'rw,seclabel,relatime,data=ordered', '0', '0'],
[
'/dev/mapper/fedora_dhcp129--186-home',
'/home',
'ext4',
'rw,seclabel,relatime,data=ordered',
'0',
'0'
],
[
'tmpfs',
'/run/user/1000',
'tmpfs',
'rw,seclabel,nosuid,nodev,relatime,size=1611044k,mode=700,uid=1000,gid=1000',
'0',
'0'
],
[
'gvfsd-fuse',
'/run/user/1000/gvfs',
'fuse.gvfsd-fuse',
'rw,nosuid,nodev,relatime,user_id=1000,group_id=1000',
'0',
'0'
],
['fusectl', '/sys/fs/fuse/connections', 'fusectl', 'rw,relatime', '0', '0']]
STATVFS_INFO = {'/': {'block_available': 10192323,
'block_size': 4096,
'block_total': 12868728,
'block_used': 2676405,
'inode_available': 3061699,
'inode_total': 3276800,
'inode_used': 215101,
'size_available': 41747755008,
'size_total': 52710309888},
'/not/a/real/bind_mount': {},
'/home': {'block_available': 1001578731,
'block_size': 4096,
'block_total': 105871006,
'block_used': 5713133,
'inode_available': 26860880,
'inode_total': 26902528,
'inode_used': 41648,
'size_available': 410246647808,
'size_total': 433647640576},
'/var/lib/machines': {'block_available': 10192316,
'block_size': 4096,
'block_total': 12868728,
'block_used': 2676412,
'inode_available': 3061699,
'inode_total': 3276800,
'inode_used': 215101,
'size_available': 41747726336,
'size_total': 52710309888},
'/boot': {'block_available': 187585,
'block_size': 4096,
'block_total': 249830,
'block_used': 62245,
'inode_available': 65096,
'inode_total': 65536,
'inode_used': 440,
'size_available': 768348160,
'size_total': 1023303680}
}
# ['/dev/sdz4', '/not/a/real/bind_mount', 'ext4', 'rw,seclabel,relatime,data=ordered', '0', '0'],
BIND_MOUNTS = ['/not/a/real/bind_mount']
CPU_INFO_TEST_SCENARIOS = [
{
'architecture': 'armv61',
'nproc_out': 1,
'sched_getaffinity': set([0]),
'cpuinfo': open(os.path.join(os.path.dirname(__file__), '../fixtures/cpuinfo/armv6-rev7-1cpu-cpuinfo')).readlines(),
'expected_result': {
'processor': ['0', 'ARMv6-compatible processor rev 7 (v6l)'],
'processor_cores': 1,
'processor_count': 1,
'processor_nproc': 1,
'processor_threads_per_core': 1,
'processor_vcpus': 1},
},
{
'architecture': 'armv71',
'nproc_out': 4,
'sched_getaffinity': set([0, 1, 2, 3]),
'cpuinfo': open(os.path.join(os.path.dirname(__file__), '../fixtures/cpuinfo/armv7-rev4-4cpu-cpuinfo')).readlines(),
'expected_result': {
'processor': [
'0', 'ARMv7 Processor rev 4 (v7l)',
'1', 'ARMv7 Processor rev 4 (v7l)',
'2', 'ARMv7 Processor rev 4 (v7l)',
'3', 'ARMv7 Processor rev 4 (v7l)',
],
'processor_cores': 1,
'processor_count': 4,
'processor_nproc': 4,
'processor_threads_per_core': 1,
'processor_vcpus': 4},
},
{
'architecture': 'aarch64',
'nproc_out': 4,
'sched_getaffinity': set([0, 1, 2, 3]),
'cpuinfo': open(os.path.join(os.path.dirname(__file__), '../fixtures/cpuinfo/aarch64-4cpu-cpuinfo')).readlines(),
'expected_result': {
'processor': [
'0', 'AArch64 Processor rev 4 (aarch64)',
'1', 'AArch64 Processor rev 4 (aarch64)',
'2', 'AArch64 Processor rev 4 (aarch64)',
'3', 'AArch64 Processor rev 4 (aarch64)',
],
'processor_cores': 1,
'processor_count': 4,
'processor_nproc': 4,
'processor_threads_per_core': 1,
'processor_vcpus': 4},
},
{
'architecture': 'x86_64',
'nproc_out': 4,
'sched_getaffinity': set([0, 1, 2, 3]),
'cpuinfo': open(os.path.join(os.path.dirname(__file__), '../fixtures/cpuinfo/x86_64-4cpu-cpuinfo')).readlines(),
'expected_result': {
'processor': [
'0', 'AuthenticAMD', 'Dual-Core AMD Opteron(tm) Processor 2216',
'1', 'AuthenticAMD', 'Dual-Core AMD Opteron(tm) Processor 2216',
'2', 'AuthenticAMD', 'Dual-Core AMD Opteron(tm) Processor 2216',
'3', 'AuthenticAMD', 'Dual-Core AMD Opteron(tm) Processor 2216',
],
'processor_cores': 2,
'processor_count': 2,
'processor_nproc': 4,
'processor_threads_per_core': 1,
'processor_vcpus': 4},
},
{
'architecture': 'x86_64',
'nproc_out': 4,
'sched_getaffinity': set([0, 1, 2, 3]),
'cpuinfo': open(os.path.join(os.path.dirname(__file__), '../fixtures/cpuinfo/x86_64-8cpu-cpuinfo')).readlines(),
'expected_result': {
'processor': [
'0', 'GenuineIntel', 'Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz',
'1', 'GenuineIntel', 'Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz',
'2', 'GenuineIntel', 'Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz',
'3', 'GenuineIntel', 'Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz',
'4', 'GenuineIntel', 'Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz',
'5', 'GenuineIntel', 'Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz',
'6', 'GenuineIntel', 'Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz',
'7', 'GenuineIntel', 'Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz',
],
'processor_cores': 4,
'processor_count': 1,
'processor_nproc': 4,
'processor_threads_per_core': 2,
'processor_vcpus': 8},
},
{
'architecture': 'arm64',
'nproc_out': 4,
'sched_getaffinity': set([0, 1, 2, 3]),
'cpuinfo': open(os.path.join(os.path.dirname(__file__), '../fixtures/cpuinfo/arm64-4cpu-cpuinfo')).readlines(),
'expected_result': {
'processor': ['0', '1', '2', '3'],
'processor_cores': 1,
'processor_count': 4,
'processor_nproc': 4,
'processor_threads_per_core': 1,
'processor_vcpus': 4},
},
{
'architecture': 'armv71',
'nproc_out': 8,
'sched_getaffinity': set([0, 1, 2, 3, 4, 5, 6, 7]),
'cpuinfo': open(os.path.join(os.path.dirname(__file__), '../fixtures/cpuinfo/armv7-rev3-8cpu-cpuinfo')).readlines(),
'expected_result': {
'processor': [
'0', 'ARMv7 Processor rev 3 (v7l)',
'1', 'ARMv7 Processor rev 3 (v7l)',
'2', 'ARMv7 Processor rev 3 (v7l)',
'3', 'ARMv7 Processor rev 3 (v7l)',
'4', 'ARMv7 Processor rev 3 (v7l)',
'5', 'ARMv7 Processor rev 3 (v7l)',
'6', 'ARMv7 Processor rev 3 (v7l)',
'7', 'ARMv7 Processor rev 3 (v7l)',
],
'processor_cores': 1,
'processor_count': 8,
'processor_nproc': 8,
'processor_threads_per_core': 1,
'processor_vcpus': 8},
},
{
'architecture': 'x86_64',
'nproc_out': 2,
'sched_getaffinity': set([0, 1]),
'cpuinfo': open(os.path.join(os.path.dirname(__file__), '../fixtures/cpuinfo/x86_64-2cpu-cpuinfo')).readlines(),
'expected_result': {
'processor': [
'0', 'GenuineIntel', 'Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz',
'1', 'GenuineIntel', 'Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz',
],
'processor_cores': 1,
'processor_count': 2,
'processor_nproc': 2,
'processor_threads_per_core': 1,
'processor_vcpus': 2},
},
{
'cpuinfo': open(os.path.join(os.path.dirname(__file__), '../fixtures/cpuinfo/ppc64-power7-rhel7-8cpu-cpuinfo')).readlines(),
'architecture': 'ppc64',
'nproc_out': 8,
'sched_getaffinity': set([0, 1, 2, 3, 4, 5, 6, 7]),
'expected_result': {
'processor': [
'0', 'POWER7 (architected), altivec supported',
'1', 'POWER7 (architected), altivec supported',
'2', 'POWER7 (architected), altivec supported',
'3', 'POWER7 (architected), altivec supported',
'4', 'POWER7 (architected), altivec supported',
'5', 'POWER7 (architected), altivec supported',
'6', 'POWER7 (architected), altivec supported',
'7', 'POWER7 (architected), altivec supported'
],
'processor_cores': 1,
'processor_count': 8,
'processor_nproc': 8,
'processor_threads_per_core': 1,
'processor_vcpus': 8
},
},
{
'cpuinfo': open(os.path.join(os.path.dirname(__file__), '../fixtures/cpuinfo/ppc64le-power8-24cpu-cpuinfo')).readlines(),
'architecture': 'ppc64le',
'nproc_out': 24,
'sched_getaffinity': set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]),
'expected_result': {
'processor': [
'0', 'POWER8 (architected), altivec supported',
'1', 'POWER8 (architected), altivec supported',
'2', 'POWER8 (architected), altivec supported',
'3', 'POWER8 (architected), altivec supported',
'4', 'POWER8 (architected), altivec supported',
'5', 'POWER8 (architected), altivec supported',
'6', 'POWER8 (architected), altivec supported',
'7', 'POWER8 (architected), altivec supported',
'8', 'POWER8 (architected), altivec supported',
'9', 'POWER8 (architected), altivec supported',
'10', 'POWER8 (architected), altivec supported',
'11', 'POWER8 (architected), altivec supported',
'12', 'POWER8 (architected), altivec supported',
'13', 'POWER8 (architected), altivec supported',
'14', 'POWER8 (architected), altivec supported',
'15', 'POWER8 (architected), altivec supported',
'16', 'POWER8 (architected), altivec supported',
'17', 'POWER8 (architected), altivec supported',
'18', 'POWER8 (architected), altivec supported',
'19', 'POWER8 (architected), altivec supported',
'20', 'POWER8 (architected), altivec supported',
'21', 'POWER8 (architected), altivec supported',
'22', 'POWER8 (architected), altivec supported',
'23', 'POWER8 (architected), altivec supported',
],
'processor_cores': 1,
'processor_count': 24,
'processor_nproc': 24,
'processor_threads_per_core': 1,
'processor_vcpus': 24
},
},
{
'cpuinfo': open(os.path.join(os.path.dirname(__file__), '../fixtures/cpuinfo/sparc-t5-debian-ldom-24vcpu')).readlines(),
'architecture': 'sparc64',
'nproc_out': 24,
'sched_getaffinity': set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]),
'expected_result': {
'processor': [
'UltraSparc T5 (Niagara5)',
],
'processor_cores': 1,
'processor_count': 24,
'processor_nproc': 24,
'processor_threads_per_core': 1,
'processor_vcpus': 24
},
},
]
| StarcoderdataPython |
3223084 | <gh_stars>1-10
import re
import enchant
from wb_nlp import dir_manager
en_dict = enchant.Dict("en_US")
VALID_TOKEN_PAT = re.compile('^[a-z]+$')
with open(dir_manager.get_data_dir("whitelists", "whitelists", "wordfreq-enwiki-latest-pages-articles.xml.bz2.txt")) as fl:
with open(dir_manager.get_data_dir("whitelists", "whitelists", "wordfreq-enwiki-latest-pages-articles.xml.bz2.pwl.txt"), "w") as wfl:
with open(dir_manager.get_data_dir("whitelists", "whitelists", "wordfreq-enwiki-latest-pages-articles.xml.bz2.non_en.txt"), "w") as nfl:
for l in fl:
w, c = l.strip().split()
if VALID_TOKEN_PAT.match(w):
wfl.write(w.strip() + "\n")
if not en_dict.check(w):
nfl.write(w.strip() + "\n")
| StarcoderdataPython |
4817939 | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 03 11:30:20 2017
@author: <NAME>
Index forward/backward neighbors and admissibility for monotone sets.
"""
import numpy as np
def admissible_neighbors(index, index_set):
"""Given an index and a monotone index set, find admissible neighboring
indices"""
for_neighbors = forward_neighbors(index)
# find admissible neighbors
for_truefalse = [is_admissible(fn, index_set) for fn in for_neighbors]
adm_neighbors = np.array(for_neighbors)[for_truefalse].tolist()
return adm_neighbors
def is_admissible(index, index_set):
"""Given an index and a monotone index set, check index admissibility"""
back_neighbors = backward_neighbors(index)
for ind_b in back_neighbors:
if ind_b not in index_set:
return False
return True
def forward_neighbors(index):
"""Given a multiindex, return its forward neighbors as a list of
multiindices, e.g. (2,1) --> (3,1), (2,2)"""
N = len(index)
for_neighbors = []
for i in xrange(N):
index_tmp = index[:]
index_tmp[i] = index_tmp[i] + 1
for_neighbors.append(index_tmp)
return for_neighbors
def backward_neighbors(index):
"""Given a multiindex, return its backward neighbors as a list of
multiindices, e.g. (2,2) --> (1,2), (2,1)"""
N = len(index)
back_neighbors = []
for i in xrange(N):
index_tmp = index[:]
if index_tmp[i] > 0:
index_tmp[i] = index_tmp[i] - 1
back_neighbors.append(index_tmp)
return back_neighbors
| StarcoderdataPython |
3382441 | <reponame>InterstellarLabs/dca<filename>dca/templates/view.py
"""
DCA view template.
It's used to generate views for a model.
"""
# App imports.
from dca.utils import default | StarcoderdataPython |
102516 | """
TODO: doc for init
"""
__version__ = '1.0.0'
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__license__ = 'MIT'
__copyright__ = 'Copyright 2015 <NAME>'
from .timeline import Timeline
from .schemas import DocumentError
| StarcoderdataPython |
1650991 | from enum import Enum, unique
from .color import Color
@unique
class BrightColor(Enum):
"""Options for bright colors."""
GREEN = "\033[92m"
YELLOW = "\033[93m"
RED = "\033[91m"
MAGENTA = "\033[95m"
BLUE = "\033[94m"
CYAN = "\033[96m"
WHITE = "\033[97m"
BLACK = "\033[90m"
DEFAULT = Color.DEFAULT
OFF = Color.OFF
def __str__(self) -> str:
return str(self.value)
| StarcoderdataPython |
36630 | """
Compartmentalize:
[ ascii art input ]
---------------
| maybe some |
| sort of auxillary |
| drawing program |
|
|
\ /
v
[ lex/parser ] --> [ translater ]
--------- ----------
| grammar | | notes literals|
| to numerical |
| patterns with |
| timestamps |
| and midi meta |
| data |
|
,------------------------'
|
`--> [ sequencer ]
-----------
| do the |
| sequencing |
| duh |
|
|
\ /
v
[ midi sender ]
----------
| communicate to |
| midi external |
"""
| StarcoderdataPython |
1751340 | import enum
import construct
from construct import Struct, Int32ub, Switch, Float32b, Byte, PrefixedArray, Int64ub
from retro_data_structures.adapters.enum_adapter import EnumAdapter
from retro_data_structures.common_types import String, AssetId64, AssetId32
class MetaAnimationType(enum.IntEnum):
Play = 0
Blend = 1
PhaseBlend = 2
Random = 3
Sequence = 4
CharAnimTime = Struct(
time=Float32b,
differential_state=Int32ub, # TODO: use enum
)
def create(asset_id):
meta_bodies = {}
meta = Struct(
type=EnumAdapter(MetaAnimationType),
body=Switch(construct.this.type, meta_bodies),
)
meta_bodies[MetaAnimationType.Play] = Struct(
asset_id=asset_id,
primitive_id=Int32ub,
name=String,
unknown=CharAnimTime,
)
meta_bodies[MetaAnimationType.Blend] = meta_bodies[MetaAnimationType.PhaseBlend] = Struct(
anim_a=meta,
anim_b=meta,
unknown_1=Float32b,
unknown_2=Byte,
)
meta_bodies[MetaAnimationType.Random] = PrefixedArray(
Int32ub,
Struct(
animation=meta,
probability=Int32ub,
),
)
meta_bodies[MetaAnimationType.Sequence] = PrefixedArray(Int32ub, meta)
return meta
MetaAnimation_AssetId32 = create(AssetId32)
MetaAnimation_AssetId64 = create(AssetId64)
by_asset_type = {
Int32ub: MetaAnimation_AssetId32,
Int64ub: MetaAnimation_AssetId64,
}
def dependencies_for(obj, target_game):
if obj.type == MetaAnimationType.Play:
yield "ANIM", obj.body.asset_id
elif obj.type in (MetaAnimationType.Blend, MetaAnimationType.PhaseBlend):
yield from dependencies_for(obj.body.anim_a, target_game)
yield from dependencies_for(obj.body.anim_b, target_game)
elif obj.type == MetaAnimationType.Random:
for anim in obj.body:
yield from dependencies_for(anim.animation, target_game)
elif obj.type == MetaAnimationType.Sequence:
for item in obj.body:
yield from dependencies_for(item, target_game)
| StarcoderdataPython |
1667500 | """Data module."""
import math
import os
from typing import Collection, Dict, Iterable, List
import numpy as np
import pandas as pd
import torch
from gsea_api.molecular_signatures_db import (
GeneSet,
GeneSets,
MolecularSignaturesDatabase,
)
from spexlvm import config
# logging stuff
logger = config.logger
class Pathways(GeneSets):
"""A collection of pathways/gene sets, wraps GeneSets."""
def __init__(self, gene_sets: Collection[GeneSet], **kwargs):
"""Initialise Pathways.
Parameters
----------
gene_sets : Collection[GeneSet]
"""
super().__init__(gene_sets=gene_sets, **kwargs)
def info(self, verbose: int = 0):
"""Get an overview of this pathway collection.
Parameters
----------
verbose : int, optional
Level of verbosity, by default 0
Returns
-------
str
Raises
------
ValueError
Raised on negative verbosity level
"""
if verbose < 0:
raise ValueError("Invalid verbosity level of %s, please use 0, 1 or 2." % verbose)
info = str(self) + "\n"
if verbose == 1:
info += "Following gene sets are stored:\n"
info += "\n".join([gs.name for gs in self.gene_sets])
elif verbose == 2:
info += "Following gene sets (with genes) are stored:\n"
# double list comprehension is not readable
for gene_sets in self.gene_sets:
info += gene_sets.name + ": " + ", ".join([gene for gene in gene_sets.genes]) + "\n"
return info
def find(self, partial_gene_set_names: Iterable[str]):
"""Perform a simple search given a list of (partial) gene set names.
Parameters
----------
partial_gene_set_names : Iterable[str]
Collection of gene set names
Returns
-------
dict
Search results as a dictionary of {partial_gene_set_names[0]: [GeneSet], ...}
"""
search_results = {partial_gsn: [] for partial_gsn in partial_gene_set_names}
for partial_gsn in partial_gene_set_names:
search_results[partial_gsn] = [
full_gs for full_gs in self.gene_sets if partial_gsn in full_gs.name
]
return search_results
def remove(self, gene_set_names: Iterable[str]):
"""Remove specific pathways.
Parameters
----------
gene_sets : Iterable[str]
List of names (str) of unwanted pathways
Returns
-------
Pathways
"""
return Pathways(
{
GeneSet(name=gene_set.name, genes=gene_set.genes)
for gene_set in self.gene_sets
if gene_set.name not in gene_set_names
}
)
def subset(
self,
genes: Iterable[str],
fraction_available: float = 0.5,
min_gene_count: int = 0,
max_gene_count: int = 0,
keep: Iterable[str] = None,
):
"""Extract a subset of pathways available in a collection of genes.
Parameters
----------
genes : Iterable[str]
List of genes
fraction_available : float, optional
What fraction of the pathway genes should be available
in the genes collection to insert the pathway into the subset,
by default 0.5 (half of genes of a pathway must be present)
min_gene_count : int, optional
Minimal number of pathway genes available in the data
for the pathway to be considered in the subset
max_gene_count : int, optional
Maximal number of pathway genes available in the data
for the pathway to be considered in the subset
keep : Iterable[str]
List of pathways to keep regardless of filters
Returns
-------
Pathways
"""
if keep is None:
keep = []
if not isinstance(genes, set):
genes = set(genes)
pathways_subset = set()
for gene_set in self.gene_sets:
gene_intersection = gene_set.genes & genes # intersection
available_genes = len(gene_intersection)
gene_fraction = available_genes / len(gene_set.genes)
if gene_set.name in keep:
logger.info(
"Keeping a %s out of %s genes (%.2f) from the special gene set '%s'.",
available_genes,
len(gene_set.genes),
gene_fraction,
gene_set.name,
)
if gene_set.name in keep or (
gene_fraction >= fraction_available and available_genes >= min_gene_count
):
if max_gene_count == 0 or available_genes <= max_gene_count:
pathways_subset.add(
GeneSet(
name=gene_set.name,
genes=gene_intersection,
warn_if_empty=False,
)
)
return Pathways(pathways_subset)
def to_mask(self, genes: Iterable[str], sort: bool = False):
"""Generate a binary matrix of pathways x genes.
Parameters
----------
genes : Iterable[str]
List of genes
sort : bool, optional
Whether to sort alphabetically, by default False
Returns
-------
torch.Tensor
"""
gene_sets_list = list(self.gene_sets)
if sort:
gene_sets_list = sorted(gene_sets_list, key=lambda gs: gs.name)
# probably faster than calling list.index() for every gene in the pathways
gene_to_idx = {k: v for k, v in zip(genes, range(len(genes)))}
mask = torch.zeros(len(gene_sets_list), len(genes))
for i, gene_sets in enumerate(gene_sets_list):
for gene in gene_sets.genes:
mask[i, gene_to_idx[gene]] = 1.0
return mask, gene_sets_list
def load_pathways(keep=None):
"""Load pathways from the existing msigdb.
Parameters
----------
keep : list, optional
List of gene set collections, by default None
Returns
-------
Pathways
"""
if keep is None:
keep = ["hallmark", "reactome"]
# load msigdb files located at ./msigdb (.gmt extension)
msigdb = MolecularSignaturesDatabase(os.path.join("..", "msigdb"), version=7.4)
print(msigdb)
# relevant gene sets dictionary
gene_sets = {
"hallmark": "h.all",
"kegg": "c2.cp.kegg",
"reactome": "c2.cp.reactome",
}
# gene_sets = {"hallmark": "h.all"}
# load relevant pathways
pathway_dict = {k: msigdb.load(v, "symbols") for k, v in gene_sets.items() if k in keep}
# concatenate pathways
pathways = Pathways(sum([pathway_dict[k].gene_sets for k in pathway_dict.keys()], ()))
return pathways
def load_dataset(dataset, subsample_size=0, n_top_genes=0, center=True):
# lambda allows for lazy loading..
dataset_dict = {
"mesc": lambda: load_mesc,
"retina_small": lambda: load_retina_small,
"retina_rod": lambda: load_retina_rod,
"retina_large": lambda: load_retina_large,
}
Y, labels, batch = dataset_dict.get(dataset)()()
if n_top_genes > 0:
Y_var = Y.var()
top_var_col_indices = Y_var.argsort()[-n_top_genes:]
logger.info("Using %s most variable genes", n_top_genes)
Y = Y.iloc[:, top_var_col_indices]
if center:
Y_mean = Y.mean()
if subsample_size > 0:
logger.info("Using a random subsample of %s", subsample_size)
subsample_indices = np.random.choice(Y.shape[0], subsample_size, replace=False)
Y = Y.iloc[subsample_indices]
labels = labels[subsample_indices]
if batch is not None:
batch = batch[subsample_indices]
# center data column-wise, ignoring last columns (labels)
if center:
Y = Y - Y_mean
# all genes have uppercase in pathways
Y.columns = Y.columns.str.upper()
# Y = Y.rename(str.upper, axis='columns')
return Y, labels, batch
def load_mesc():
Y = pd.read_csv(os.path.join(config.DATASET_DIR, "Buettneretal.csv.gz"), compression="gzip")
return Y, Y.index, None
def load_retina_large():
# https://data.humancellatlas.org/explore/projects/8185730f-4113-40d3-9cc3-929271784c2b/project-matrices
# load data from storage
dataset_dir = os.path.join(
"/",
"data",
"aqoku",
"projects",
"spexlvm",
"processed",
)
Y = pd.read_pickle(
os.path.join(
dataset_dir,
"retina.pkl",
)
)
labels = pd.read_csv(
os.path.join(
dataset_dir,
"WongRetinaCelltype.csv",
)
)
labels = labels["annotated_cell_identity.ontology_label"]
batch = Y["batch"]
return Y.drop("batch", axis=1), labels.values, batch.values
def load_retina_rod():
Y, labels, batch = load_retina_large()
# remove dominant cluster
subsample_indices = labels == "retinal rod cell"
Y = Y.iloc[subsample_indices, :]
if batch is not None:
batch = batch[subsample_indices]
labels = labels[subsample_indices]
return Y, labels, batch
def load_retina_small():
Y, labels, batch = load_retina_large()
# remove dominant cluster
subsample_indices = labels != "retinal rod cell"
Y = Y.iloc[subsample_indices, :]
if batch is not None:
batch = batch[subsample_indices]
labels = labels[subsample_indices]
return Y, labels, batch
def generate_toy_dataset(
n_samples: int = 10000,
n_features: int = 200,
n_factors: int = 40,
n_active_features: float = 0.1,
n_active_factors: float = 0.5,
constant_weight: float = 4.0,
):
"""Generate toy dataset for simulated evaluation.
Parameters
----------
n_samples : int, optional
Number of samples, by default 10000
n_features : int, optional
Number of features (genes), by default 200
n_factors : int, optional
Number of factors, by default 40
n_active_features : float, optional
Number or fraction of active genes per factor, by default 0.1
n_active_factors : float, optional
Number of fraction of active factors, by default 0.5
constant_weight : float, optional
A constant weight to fill in the non-zero elements, by default 4.0
Returns
-------
tuple
w, mask, active factor indices, x, y
"""
if isinstance(n_active_features, float):
n_active_features = (n_active_features, n_active_features)
# convert active features and factors into fractions if > 1.0
n_active_features = tuple(
naft / n_features if naft > 1.0 else naft for naft in n_active_features
)
min_n_active_features, max_n_active_features = n_active_features
if n_active_factors > 1.0:
n_active_factors /= n_factors
w_shape = [n_factors, n_features]
x_shape = [n_samples, n_factors]
true_mask = torch.zeros(w_shape)
constant_w = constant_weight * torch.ones(w_shape)
for factor_idx, naft in enumerate(
np.random.uniform(min_n_active_features, max_n_active_features, n_factors)
):
true_mask[factor_idx] = torch.multinomial(
torch.tensor([1 - naft, naft]),
w_shape[1],
replacement=True,
)
# generate small random values around 0
random_noise = torch.normal(
mean=torch.zeros(w_shape), std=constant_weight / 50 * torch.ones(w_shape)
)
true_w = true_mask * constant_w + random_noise
true_x = torch.normal(mean=torch.zeros(x_shape), std=torch.ones(x_shape))
active_factor_indices = sorted(
np.random.choice(
range(n_factors),
size=math.ceil(n_factors * n_active_factors),
replace=False,
)
)
for row_idx in range(n_factors):
if row_idx not in active_factor_indices:
true_w[row_idx, :] = torch.normal(
torch.zeros(n_features),
std=constant_weight / 50 * torch.ones(n_features),
)
return (
true_w,
true_mask,
active_factor_indices,
true_x,
torch.matmul(true_x, true_w),
)
| StarcoderdataPython |
11285 | <filename>src/algoritmia/problems/binpacking/firstfitbinpacker.py<gh_stars>1-10
from algoritmia.problems.binpacking.nextfitbinpacker import NextFitBinPacker
class FirstFitBinPacker(NextFitBinPacker):#[full
def pack(self, w: "IList<Real>", C: "Real") -> "IList<int>":
x = [None] * len(w)
free = []
for i in range(len(w)):
for j in range(len(free)):
if free[j] >= w[i]:
x[i] = j
free[j] -= w[i]
break
if x[i] == None:
x[i] = len(free)
free.append(C-w[i])
return x#]full | StarcoderdataPython |
3292716 | <reponame>pasta96/django-drf-react-example
from django.contrib.auth import get_user_model
from rest_framework import serializers
from . import models
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = get_user_model()
fields = ('id', get_user_model().USERNAME_FIELD)
class PostSerializer(serializers.ModelSerializer):
class Meta:
model = models.Post
fields = ('id', 'owner', 'title', 'image', 'uploaded_at')
owner = UserSerializer()
class UserPostSerializer(serializers.ModelSerializer):
class Meta:
model = models.Post
fields = ('id', 'owner', 'title', 'image', 'uploaded_at')
owner = UserSerializer(read_only=True)
| StarcoderdataPython |
17542 | '''
' Python Regular Expression 正则表达式
'
'''
import re
def test_match():
s = 'hello python Hello'
p = 'hello'
o = re.match(p, s)
print(o)
print(dir(o))
print(o.group()) # 返回匹配的字符串
print(o.span()) # 范围
print(o.start()) # 开始处
print('*' * 30, 'flags参数的使用')
o2 = re.match(p, s, re.L)
print(o2.group()) # 返回匹配的字符串
# 常用字符的使用
def test_match_character():
print('-' * 30, ' . 匹配任意一个字符')
print(re.match('.', 'abv'))
print(re.match('.', '12'))
print(re.match('.', '\n'))
print('-' * 30, ' \d 匹配数字 0-9')
print(re.match('\d', 'abc456'))
print(re.match('\d', '234svd'))
print('-' * 30, ' \D 匹配非数字 0-9')
print(re.match('\D', 'abc456'))
print(re.match('\D', '234svd'))
print('-' * 30, ' \s 匹配空白字符')
print(re.match('\s', '\n12\t'))
print(re.match('\s', '\t'))
print(re.match('\s', 'addd'))
print('-' * 30, ' \S 匹配非空白字符')
print(re.match('\S', '\n12\t'))
print(re.match('\S', '\t'))
print(re.match('\S', 'addd'))
print('-' * 30, ' \w 匹配字母、数字')
print(re.match('\w', 'AB'))
print(re.match('\w', 'ab'))
print(re.match('\w', '12'))
print(re.match('\w', '__'))
print(re.match('\w', '##'))
print('-' * 30, ' \W 匹配非 字母、数字')
print(re.match('\W', 'AB'))
print(re.match('\W', 'ab'))
print(re.match('\W', '12'))
print(re.match('\W', '__'))
print(re.match('\W', '##'))
print('-' * 30, ' \[] 匹配列表中的字符')
print(re.match('[2468]', '22'))
print(re.match('[2468]', '33'))
print(re.match('[2468]', '83'))
print(re.match('[2468]', '38'))
def test_match_phone():
print('-' * 30, ' 匹配手机号')
patten = '\d\d\d\d\d\d\d\d\d\d\d'
print(re.match(patten, '13466669999'))
print(re.match('1[345789]\d\d\d\d\d\d\d\d\d', '13466669999'))
# 限定符
def test_match_qualifier():
print('-' * 30, ' * 匹配零次或多次')
print(re.match('\d*', '123abc')) # 匹配开头的数字
print(re.match('\d*', 'abc'))
print('-' * 30, ' + |匹配一次或多次')
print(re.match('\d+', '123abc')) # 匹配开头的数字
print(re.match('\d+', 'abc'))
print('-' * 30, ' ? |匹配一次或零次')
print(re.match('\d?', '1abc'))
print(re.match('\d?', '123abc')) # 匹配开头的数字
print(re.match('\d?', 'abc'))
print('-' * 30, ' {m} |重复m次')
print(re.match('\d{2}', '123abc')) # 匹配开头2个数字
print(re.match('\d{2}', '12abc'))
print(re.match('\d{2}', '1abc'))
print(re.match('\d{2}', 'abc'))
print('-' * 30, '{m,n}|重复m到n次')
print(re.match('\d{1,3}', '1234abc')) # 匹配开头2个数字
print(re.match('\d{1,3}', '123abc'))
print(re.match('\d{1,3}', '12abc'))
print(re.match('\d{1,3}', '1abc'))
print(re.match('\d{1,3}', 'abc'))
print('-' * 30, '{m,}|至少m次')
print(re.match('\d{2,}', '1234abc')) # 匹配开头2个数字
print(re.match('\d{2,}', '123abc'))
print(re.match('\d{2,}', '12abc'))
print(re.match('\d{2,}', '1abc'))
print(re.match('\d{2,}', 'abc'))
print('-' * 30, '案例1 首字母为大写字符,其他小写字符')
print(re.match('[A-Z][a-z]*', 'abc'))
print(re.match('[A-Z][a-z]*', 'ABC'))
print(re.match('[A-Z][a-z]*', 'Abc'))
print(re.match('[A-Z][a-z]*', 'AbC'))
print('-' * 30, '案例2 有效变量名 字母数字下划线,数字不开头')
print(re.match('[a-zA-Z_][a-zA-Z0-9_]*', 'abc'))
print(re.match('[a-zA-Z_]\w*', 'abc'))
print(re.match('[a-zA-Z_][a-zA-Z0-9_]*', 'abc123'))
print(re.match('[a-zA-Z_]\w*', '123abc'))
print(re.match('[a-zA-Z_]\w*', '_123abc'))
print('-' * 30, '案例2 1-99的数字')
print(re.match('[1-9]\d?', '23abc'))
print(re.match('[1-9]\d?', '100'))
print(re.match('[1-9]\d?', '11'))
print(re.match('[1-9]\d?', '1'))
print(re.match('[1-9]\d?', '0'))
print(re.match('[1-9]\d?', '09'))
print('-' * 30, '案例2 8-20随机密码 大写,小写,下划线,数字')
print(re.match('\w{8,20}', '1234567'))
print(re.match('\w{8,20}', '1234567$$'))
print(re.match('\w{8,20}', '1234567abc_'))
print(re.match('\w{8,20}', '1234567abc#'))
print(re.match('\w{8,20}', '12345678901234567890zx'))
# 转义字符 原生字符
def escape_character():
print('C:\t\d\e')
print('C:\\t\\d\\e')
print(r'C:\t\d\e')
# 边界字符
def boundary():
print('-' * 30, '$ 匹配字符串结尾')
print(re.match('[1-9]\d{4,<EMAIL>', '<EMAIL>'))
print(re.match('[1-9]\d{4,9}@qq.<EMAIL>', '<EMAIL>'))
print(re.match(r'[1-9]\d{4,9}@qq.<EMAIL>$', '<EMAIL>'))
print(re.match(r'[1-9]\d{<EMAIL>$', '<EMAIL>'))
print('-' * 30, ' ^ 匹配字符串开头')
print(re.match(r'^hello.*', 'hello abc'))
print(re.match(r'^hello.*', 'abc hello abc'))
print('-' * 30, ' \b 匹配单词的边界')
print(re.match(r'.*\bab', '123 aabc')) # 单词 ab 开始
print(re.match(r'.*\bab', '123 abcd'))
print(re.match(r'.*\bab', '123 aaa'))
print(re.match(r'.*\bab', '123 abcd cdab'))
print(re.match(r'.*ab\b', '123 abc')) # 单词 ab 结尾
print(re.match(r'.*ab\b', '123 aaa'))
print(re.match(r'.*ab\b', '123 ab'))
print(re.match(r'.*ab\b', '123 cdab'))
print(re.match(r'.*ab\b', '123 abcd cdab'))
def test_search():
print(re.match(r'hello', 'hello python'))
print(re.search(r'hello', 'hello python'))
print(re.match(r'hello', 'python hello'))
print(re.search(r'hello', 'python hello '))
print(re.match('aa|bb|cc', 'aa'))
print(re.match('aa|bb|cc', 'bbb'))
print(re.match('aa|bb|cc', 'ccc'))
print(re.match('aa|bb|cc', 'a bb ccc'))
print(re.search('aa|bb|cc', 'a bb ccc'))
# 多个字符
def test_multi_character():
print('-' * 30, '案例 0-100之间的数字: 0-99 | 100')
print(re.match('[1-9]?\d|100', '1'))
print(re.match('[1-9]?\d|100', '11'))
print(re.match('[1-9]?\d|100', '100'))
print(re.match('[1-9]?\d$|100$', '100'))
print(re.match('[1-9]?\d$|100$', '1000'))
print('-' * 30, '案例 ')
print(re.match('[ab][cd]', 'ab'))
print(re.match('[ab][cd]', 'ac'))
print(re.match('[ab][cd]', 'ad'))
print(re.match('ab|cd', 'abc'))
print(re.match('ab|cd', 'ac'))
# 匹配分组
def test_group():
print('-' * 30, '座机号码 区号{3,4} 号码{5,8} 010-0000 0791-222222')
print(re.match(r'\d{3,4}-[1-9]\d{4,7}', '010-10086'))
print(re.match(r'\d{3,4}-[1-9]\d{4,7}', '010-88888888'))
print(re.match(r'\d{3,4}-[1-9]\d{4,7}', '1111-10086'))
print(re.match(r'\d{3,4}-[1-9]\d{4,7}', '1111-88888888'))
print('-' * 30, ' 匹配分组')
o = re.match(r'(\d{3,4})-([1-9]\d{4,7})', '1111-88888888')
print(o)
print(o.group(0), o.group(1), o.group(2))
print(o.groups(), o.groups()[0], o.groups()[1])
print('-' * 30, 'html 标签')
print(re.match(r'<.+><.+>.+</.+></.+>', '<html><a>abc</a></html>'))
print(re.match(r'<.+><.+>.+</.+></.+>', '<html><a>abc</b></html>'))
print(re.match(r'<(.*)><(.*)>.*</\2></\1>', '<html><a>abc</b></html>'))
print(re.match(r'<(.*)><(.*)>.*</\2></\1>', '<html><d>abc</d></html>'))
print('-' * 30, 'html 标签 - 别名')
print(re.match(r'<(?P<k_html>.+)><(?P<k_head>.+)>.*</(?P=k_head)></(?P=k_html)>', '<html><d>abc</d></html>'))
## 搜索与替换
def test_sub():
print('-' * 30, ' 替换')
print(re.sub(r'#.*$', '', '2004-222-23322 # 这是个什么')) # 替换#开头的部分
print(re.sub(r'#\D*', '', '2004-222-23322 # 这是个什么'))
print('-' * 30, ' 替换 subn')
print(re.subn(r'#\D*', '', '2004-222-23322 # 这是个什么'))
print(re.subn(r'#.*$', '', '2004-222-23322 # 这是个什么'))
def test_compile():
print('-' * 30, ' compile的使用')
regex = re.compile(r'\w+') # 匹配字母或数字
print(regex.match('1223dfdf'))
print(regex.match('##1223dfdf'))
def test_findall():
print('-' * 30, ' findall 返回数组')
print(re.findall(r'\w', '##1223dfdf')) # 匹配字母或数字 f
print(re.findall(r'\w+', '## 1223 df df 1'))
print('-' * 30, ' finditer 返回迭代器')
print(re.finditer(r'\w+', '## 1223 df df 1'))
for i in re.finditer(r'\w+', '## 1223 df df 1'):
print(i, i.group())
def test_split():
print('-' * 30, ' split 返回数组')
print(re.split(r'\d+', '123abc123abc'))
print(re.split(r'\d+', '123 abc 123 abc'))
print(re.split(r'\d+', 'abc123 abc 123 abc'))
print(re.split(r'\d+', 'abc 123 abc 123 abc',1))
def greedy_mode():
print('-' * 30, ' 贪婪模式')
result = re.match(r'(.+)(\d+-\d+-\d+)', 'this is my tel: 122-1244-1242')
print(result.group(1))
print(result.group(2))
print('-' * 30, ' 非贪婪模式 尽可能少的匹配')
result = re.match(r'(.+?)(\d+-\d+-\d+)', 'this is my tel: 122-1244-1242')
print(result.group(1))
print(result.group(2))
print('-' * 30, ' 贪婪模式')
print(re.match(r'abc(\d+)', 'abc123456'))
print(re.match(r'abc(\d+?)', 'abc123456'))
if __name__ == '__main__':
# test_match()
# test_match_character()
# test_match_phone()
# test_match_qualifier()
# escape_character()
# boundary()
# test_search()
# test_multi_character()
# test_group()
# test_sub()
# test_compile()
# test_findall()
# test_split()
# greedy_mode()
# <.+><.+>.+</.+></.+>
s = '<link href="../assets/css/app.css?t=20112455" type="text/css" rel="stylesheet">'
mathched = re.findall(r'\S+assets/css/\S+.css\S+"', s)
for m in mathched:
print(m, m.index('.css'))
s = s.replace(m, m[:m.index('.css')] + '.css?t=00000"')
print(s)
| StarcoderdataPython |
1627053 | import FWCore.ParameterSet.Config as cms
fftjetVertexAdder = cms.EDProducer(
"FFTJetVertexAdder",
#
# Label for the beam spot info
beamSpotLabel = cms.InputTag("offlineBeamSpot"),
#
# Label for an existing collection of primary vertices
existingVerticesLabel = cms.InputTag("offlinePrimaryVertices"),
#
# Label for the output collection
outputLabel = cms.string("FFTJetFudgedVertices"),
#
# Do we want to use the beam spot info from the event data
#in order to generate the vertices?
useBeamSpot = cms.bool(True),
#
# Do we want to an existing collection (labeled by "existingVerticesLabel"
# defined above) to the fake vertices?
addExistingVertices = cms.bool(False),
#
# If we are not using the beam spot, what would be the average
# position of the generated vertices?
fixedX = cms.double(0.0),
fixedY = cms.double(0.0),
fixedZ = cms.double(0.0),
#
# If we are not using the beam spot, what would be the vertex spread?
sigmaX = cms.double(0.0014),
sigmaY = cms.double(0.0014),
sigmaZ = cms.double(6.0),
#
# Parameters of the vertex to generate (these are not varied)
nDof = cms.double(10.0),
chi2 = cms.double(10.0),
errX = cms.double(0.001),
errY = cms.double(0.001),
errZ = cms.double(0.01),
#
# How many fake vertices should we make?
nVerticesToMake = cms.uint32(1)
)
| StarcoderdataPython |
3284037 | <reponame>ResonantGeoData/ResonantGeoData
# Generated by Django 3.2.5 on 2021-07-13 10:17
import django.contrib.postgres.fields.ranges
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('rgd_imagery', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='bandmeta',
name='band_range',
field=django.contrib.postgres.fields.ranges.DecimalRangeField(
help_text='The spectral range of the band (in micrometers).', null=True
),
),
]
| StarcoderdataPython |
3273438 | import os
import random
from typing import Any, Dict
import numpy as np
# from mmcls.models.losses import accuracy, f1_score, precision, recall
# from mmcls.models.losses.eval_metrics import class_accuracy
from .base_dataset import BaseDataset
from .builder import DATASETS
# 0 1 2 3 4 5 6 7
FER_CLASSES = ['Anger', 'Disgust', 'Fear', 'Sadness', 'Happiness', 'Surprise', 'Neutral', 'Contempt']
def gen_class_map(dataset_class):
"""
generate the convert map from DATASET_CLASSES to FER_CLASSES
"""
convert_map = []
for i in dataset_class:
convert_map.append(FER_CLASSES.index(i))
assert sum(convert_map) == sum([i for i in range(len(dataset_class))])
return convert_map
@DATASETS.register_module()
class ABAW3(BaseDataset):
CLASSES = [
'Neutral',
'Anger',
'Disgust',
'Fear',
'Happiness',
'Sadness',
'Surprise',
'Other'
]
COARSE_CLASSES = [
'Neutral',
'Happiness',
'Surprise',
'Other',
'Negative'
]
coarse_map = [0, 4, 4, 4, 1, 4, 2, 3]
NEGATIVE_CLASSES = [
'Anger',
'Disgust',
'Fear',
'Sadness',
]
negative_map = [-1, 0, 1, 2, -1, 3, -1, -1]
task = 'all' # 'all', coarse', 'negative'
def __init__(self,
data_prefix,
pipeline,
classes=None,
ann_file=None,
test_mode=False):
super().__init__(data_prefix, pipeline, classes, ann_file, test_mode)
self.CLASSES = self.update_classes()
def update_classes(self):
if self.task_type == 'EXPR':
return self.CLASSES
elif self.task_type == 'AU':
return [f'AU{i+1}' for i in range(12)]
elif self.task_type == 'VA':
return ['V', 'A']
def process_one_ann(self, dir, ann_file:str):
with open(os.path.join(dir, ann_file), 'r') as f:
data = f.read().strip().split('\n')[1:]
return data
def list_txt_files(self, dir):
files = os.listdir(dir)
files = [i for i in files if i.endswith('.txt')]
return files
def load_annotations(self, label_file=None):
if 'EXPR_' in self.ann_file:
self.task_type = 'EXPR'
return self.load_ce_annotations(label_file)
elif 'AU_' in self.ann_file:
self.task_type = 'AU'
return self.load_au_annotations()
elif 'VA_' in self.ann_file:
self.task_type = 'VA'
return self.load_va_annotations()
else:
raise ValueError('invalid task')
def load_ce_annotations(self, label_file=None):
"""Load CE annotations"""
if label_file is None:
label_file = self.ann_file
if isinstance(label_file, str): # is a folder
ann_files = os.listdir(label_file)
ann_files = [i for i in ann_files if i.endswith('.txt')]
else:
raise TypeError('ann_file must be a str')
# label_map = gen_class_map(self.DATASET_CLASSES)
data_infos = []
for ann_file in ann_files: # xxx.txt
ce_labels = self.process_one_ann(label_file, ann_file)
for i, label in enumerate(ce_labels):
label = int(label)
if label == -1:
continue
if self.task == 'coarse':
label = self.coarse_map[label]
self.CLASSES = self.COARSE_CLASSES
elif self.task == 'negative':
label = self.negative_map[label]
self.CLASSES = self.NEGATIVE_CLASSES
if label == -1: # Only negative has -1
continue
img_prefix = os.path.join(self.data_prefix, ann_file.replace('.txt', ''))
filename = f'{str(i).zfill(5)}.jpg'
if not os.path.isfile(os.path.join(img_prefix, filename)):
continue
info = {'img_prefix': img_prefix}
info['img_info'] = {'filename': filename}
info['gt_label'] = np.array(label, dtype=np.int64)
data_infos.append(info)
return data_infos
def load_au_annotations(self,):
"""Load the AU annotations"""
label_file = self.ann_file
if isinstance(label_file, str): # is a folder
ann_files = os.listdir(label_file)
ann_files = [i for i in ann_files if i.endswith('.txt')]
else:
raise TypeError('ann_file must be a str')
data_infos = []
for ann_file in ann_files: # xxx.txt
# ce_labels = self.process_one_ann(label_file, ann_file)
au_labels = self.process_one_ann(self.ann_file, ann_file)
for i, label in enumerate(au_labels):
if label == '-1':
continue
img_prefix = os.path.join(self.data_prefix, ann_file.replace('.txt', ''))
filename = f'{str(i).zfill(5)}.jpg'
if not os.path.isfile(os.path.join(img_prefix, filename)):
continue
info = {'img_prefix': img_prefix}
info['img_info'] = {'filename': filename}
# label = label_map[int(label)]
# info['gt_label'] = np.array(label, dtype=np.int64)
info['gt_label'] = np.array(au_labels[i].split(','), dtype=np.int64)
data_infos.append(info)
return data_infos
def load_va_annotations(self,):
"""Load the VA annotations"""
label_file = self.ann_file
if isinstance(label_file, str): # is a folder
ann_files = os.listdir(label_file)
ann_files = [i for i in ann_files if i.endswith('.txt')]
else:
raise TypeError('ann_file must be a str')
data_infos = []
for ann_file in ann_files: # xxx.txt
va_labels = self.process_one_ann(self.ann_file, ann_file)
for i, label in enumerate(va_labels):
if float(label.split(',')[0]) < -1. or float(label.split(',')[1]) < -1: # skip -5
continue
img_prefix = os.path.join(self.data_prefix, ann_file.replace('.txt', ''))
filename = f'{str(i).zfill(5)}.jpg'
if not os.path.isfile(os.path.join(img_prefix, filename)):
continue
info = {'img_prefix': img_prefix}
info['img_info'] = {'filename': filename}
info['gt_label'] = np.array(label.split(','), dtype=np.float32)
assert info['gt_label'].max() <= 1 and info['gt_label'].min() >= -1
data_infos.append(info)
return data_infos
def load_exp_and_au_annotations(self):
if 'Train_Set' not in self.ann_file:
return self.load_annotations()
label_map = gen_class_map(self.DATASET_CLASSES)
assert 'EXPR_Set' in self.ann_file
au_file = self.ann_file.replace('EXPR_Set', 'AU_Set')
ce_files = self.list_txt_files(self.ann_file)
au_files = self.list_txt_files(au_file)
samples = dict() # key: filename, value: [ce, au]
for file in ce_files:
samples[file] = [self.process_one_ann(self.ann_file, file), None]
for file in au_files:
au_labels = self.process_one_ann(au_file, file)
if file in samples:
samples[file][1] = au_labels # 都保留
# if len(au_labels) == len(samples[file][0]):
# samples[file][1] = au_labels
# else:
# # samples[file] = [None, au_labels] # 这里有两种可能,CE和AU冲突时保留谁
# samples[file][1] = None
else:
samples[file] = [None, au_labels]
# va_path = self.ann_file.replace('EXPR_Set', 'VA_Set')
# va_files = self.list_txt_files(va_path)
# for file in va_files:
# va_labels = self.process_one_ann(va_path, file)
# if file in samples:
# else:
# samples[file] = [None, None, va_labels]
# build data_infos
data_infos = []
for ann_file, labels in samples.items():
have_ce_label = labels[0] is not None
have_au_label = labels[1] is not None
frame_num = len(labels[0]) if have_ce_label else len(labels[1])
for i in range(frame_num):
img_prefix = os.path.join(self.data_prefix, ann_file.replace('.txt', ''))
filename = f'{str(i).zfill(5)}.jpg'
if not os.path.isfile(os.path.join(img_prefix, filename)):
continue
info = {'img_prefix': img_prefix}
info['img_info'] = {'filename': filename}
# info['ann_file'] = self.ann_file
if have_ce_label:
origin_label = labels[0][i]
if origin_label != '-1':
label = label_map[int(origin_label)]
info['gt_label'] = np.array(label, dtype=np.int64)
else:
info['gt_label'] = np.array(255, dtype=np.int64)
else:
info['gt_label'] = np.array(255, dtype=np.int64)
if have_au_label:
try:
origin_label = labels[1][i].split(',')
except IndexError: # i > len(labels[1])
info['au_label'] = np.array([255]*12, dtype=np.int64)
else:
if origin_label[0] != '-1':
info['au_label'] = np.array(origin_label, dtype=np.int64)
else:
info['au_label'] = np.array([255]*12, dtype=np.int64)
else:
info['au_label'] = np.array([255]*12, dtype=np.int64)
data_infos.append(info)
return data_infos
| StarcoderdataPython |
1645592 | <reponame>govind794/all-programming<filename>python/String/add_string.py<gh_stars>0
# Write a python program to add 'ing' at the end of a givan string(length should be at least 3).
# if the givan string already ends with 'ing' then add 'ly' instead if the string length of the givan string
# is less then 3, leave it unchanged.
# Sample String : 'abc'
# Expected Result : 'abcing'
# Sample String : 'string'
# Expected Result : 'stringly'
def add_string(st):
if len(st) > 2:
if st.endswith('ing'):
st += 'ly'
else:
st += 'ing'
print(st)
if __name__ == '__main__':
add_string('abcing')
add_string('sting')
add_string('ab')
| StarcoderdataPython |
1710270 | <filename>tests/test_00_dxf_low_level_structs/test_016_encoding.py
# Created: 26.03.2016
# Copyright (C) 2016-2019, <NAME>
# License: MIT License
import pytest
import codecs
from ezdxf.lldxf.encoding import dxf_backslash_replace
codecs.register_error('dxfreplace', dxf_backslash_replace) # setup DXF unicode encoder -> '\U+nnnn'
from ezdxf.lldxf.encoding import encode
from ezdxf.lldxf.const import DXFEncodingError
DEFAULT_ENC = 'utf-8'
class TestEncoding:
def test_ascii_encoding(self):
assert b'123' == encode(u'123', 'ascii')
def test_ascii_encoding_error(self):
with pytest.raises(DXFEncodingError):
encode(u'123Ä', 'ascii')
def test_ignore_ascii_encoding_error(self):
assert u'123Ä'.encode(DEFAULT_ENC) == encode(u'123Ä', 'ascii', ignore_error=True)
def test_cp1252_encoding(self):
assert u'123ÄÜÖ'.encode('cp1252') == encode(u'123ÄÜÖ', 'cp1252')
def test_cp1252_encoding_error(self):
with pytest.raises(DXFEncodingError):
encode(u'更改', 'cp1252')
def test_cp1252_ignore_encoding_error(self):
assert u'更改'.encode(DEFAULT_ENC) == encode(u'更改', 'cp1252', ignore_error=True)
class TestACADEncoding:
def test_ascii_encoding(self):
assert b'123\\U+6539' == u'123改'.encode('ascii', errors='dxfreplace')
| StarcoderdataPython |
1645003 | # AUTHOR: whisperain (https://github.com/memset0)
from my_array import Array
STEP = 1.5
class List:
def __len__(self):
return self.len
def __getitem__(self, index):
return self.arr[index if index >= 0 else index + self.len]
def __setitem__(self, index, value):
self.arr[index if index >= 0 else index + self.len] = value
def __delitem__(self, index):
for i in range(index, self.len - 1):
self.arr[i] = self.arr[i + 1]
self.len -= 1
def __str__(self):
return str(self._list())
def _list(self):
return [self.__getitem__(i) for i in range(self.len)]
def _malloc(self, inneed):
if inneed > self.lim:
data = self._list()
self.lim = int(inneed * STEP)
del self.arr
self.arr = Array(self.lim)
for i in range(self.len):
self.arr[i] = data[i]
def append(self, value):
self._malloc(self.len + 1)
self.arr[self.len] = value
self.len += 1
def pop(self, index=-1):
if index == -1:
self.len -= 1
return self.arr[self.len]
else:
result = self.arr[index]
for i in range(index, self.len - 1):
self.arr[i] = self.arr[i + 1]
return result
def extend(self, iterables):
source = [item for item in iterables]
self._malloc(self.len + len(source))
for value in source:
self.arr[self.len] = value
self.len += 1
def index(self, value, start=None, end=None):
start = 0 if start == None else start
end = self.len if end == None else end
for i in range(start, end):
if self.arr[i] == value:
return i
raise ValueError(str(value) + ' is not in list')
def insert(self, index, value):
self._malloc(self.len + 1)
for i in range(self.len - 1, index, -1):
self.arr[i] = self.arr[i - 1]
self.arr[index] = value
def remove(self, value):
for i in range(self.len):
if self.arr[i] == value:
while i < self.len:
self.arr[i] = self.arr[i + 1]
i += 1
self.len -= 1
def reverse(self):
for i in range(self.len // 2):
temp = self.arr[i]
self.arr[i] = self.arr[self.len - 1 - i]
self.arr[self.len - 1 - i] = temp
def __init__(self, iterables=None):
if iterables == None:
source = []
else:
source = [item for item in iterables]
self.len = len(source)
self.lim = self.len
self.arr = Array(self.len)
for i in range(self.len):
self.arr[i] = source[i]
| StarcoderdataPython |
3389664 | import re
daysOfWeek = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
ScheduleType = {
"Понедельник": daysOfWeek[0],
"Вторник": daysOfWeek[1],
"Среда": daysOfWeek[2],
"Четверг": daysOfWeek[3],
"Пятница": daysOfWeek[4],
"Суббота": daysOfWeek[5],
"Воскресенье": daysOfWeek[6],
"Сегодня": "Today",
"Завтра": "Tomorrow",
"Вся неделя": daysOfWeek
}
romanDigit = {
'I': 1,
'II': 2,
'III': 3,
'IV': 4,
'V': 5,
'VI': 6,
'VII': 7,
'VIII': 8,
'IX': 9,
'X': 10
}
def parse_day(cell_value):
if cell_value != '':
return ScheduleType[cell_value]
else:
return None
def parse_time(cell_value):
time_start = ''
time_end = ''
try:
result = re.findall(r'\d{1,2}:\d{2}', cell_value)
except:
result = []
if len(result) == 2:
time_start = result[0]
time_end = result[1]
return time_start, time_end
def parse_lesson_number(cell_value):
try:
result = re.findall(r'^\w{1,3}', cell_value)
except:
result = []
if len(result) == 1:
return romanDigit[result[0]]
else:
return None
def parse_title(cell_value):
try:
result = re.split(r'(\w{1,2}\s?\d[_-]\d{3}|\d[_-]\d{3}|\w\.\d)', cell_value)
except:
result = []
if len(result) >= 3:
return result[0]
else:
return cell_value
def parse_lecturer(cell_value):
try:
result = re.split(r'(\w{1,2}\s?\d[_-]\d{3}|\d[_-]\d{3}|\w\.\d)', cell_value)
except:
result = []
if len(result) >= 3:
return result[len(result) - 1]
else:
return ''
def parse_classroom(cell_value):
classroom = ''
try:
result = re.findall(r'\w{1,2}\s?\d[_-]\d{3}|\d[_-]\d{3}|\w\.\d', cell_value)
except:
result = []
if len(result) >= 1:
# Кабинет
for room in result:
classroom += room + ''
return classroom
def parse_date(cell_value):
try:
result = re.findall(r'\d\d.\d\d.\d\d\d\d', cell_value)
except:
result = []
if len(result) == 1:
return result[0]
else:
return 0
def parse_exam_data(cell_value):
return cell_value.split('\n')
| StarcoderdataPython |
93624 | <reponame>Steinbeck-Lab/RanDepict
import os
import pathlib
import numpy as np
import io
from skimage import io as sk_io
from skimage.color import rgba2rgb, rgb2gray
from skimage.util import img_as_ubyte, img_as_float
from PIL import Image, ImageFont, ImageDraw, ImageStat
from multiprocessing import set_start_method, get_context
import imgaug.augmenters as iaa
import random
from copy import deepcopy
from typing import Tuple, List, Dict, Any
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem.rdAbbreviations import CondenseMolAbbreviations
from rdkit.Chem.rdAbbreviations import GetDefaultAbbreviations
from rdkit.Chem.Draw import rdMolDraw2D
from rdkit import DataStructs
from rdkit.SimDivFilters.rdSimDivPickers import MaxMinPicker
from itertools import product
from indigo import Indigo
from indigo.renderer import IndigoRenderer
from jpype import startJVM, getDefaultJVMPath
from jpype import JClass, JVMNotFoundException, isJVMStarted
import base64
class RandomDepictor:
"""
This class contains everything necessary to generate a variety of
random depictions with given SMILES strings. An instance of RandomDepictor
can be called with a SMILES str and returns an np.array that represents
the RGB image with the given chemical structure.
"""
def __init__(self, seed: int = 42):
"""
Load the JVM only once, load superatom list (OSRA),
set context for multiprocessing
"""
self.HERE = pathlib.Path(__file__).resolve().parent.joinpath("assets")
# Start the JVM to access Java classes
try:
self.jvmPath = getDefaultJVMPath()
except JVMNotFoundException:
print(
"If you see this message, for some reason JPype",
"cannot find jvm.dll.",
"This indicates that the environment varibale JAVA_HOME",
"is not set properly.",
"You can set it or set it manually in the code",
"(see __init__() of RandomDepictor)",
)
self.jvmPath = "Define/path/or/set/JAVA_HOME/variable/properly"
if not isJVMStarted():
self.jar_path = self.HERE.joinpath("jar_files/cdk_2_5.jar")
startJVM(self.jvmPath, "-ea",
"-Djava.class.path=" + str(self.jar_path))
self.seed = seed
random.seed(self.seed)
# Load list of superatoms for label generation
with open(self.HERE.joinpath("superatom.txt")) as superatoms:
superatoms = superatoms.readlines()
self.superatoms = [s[:-2] for s in superatoms]
# Define PIL resizing methods to choose from:
self.PIL_resize_methods = [
Image.NEAREST,
Image.BOX,
Image.BILINEAR,
Image.HAMMING,
Image.BICUBIC,
Image.LANCZOS,
]
self.from_fingerprint = False
self.depiction_features = False
# Set context for multiprocessing but make sure this only happens once
try:
set_start_method("spawn")
except RuntimeError:
pass
def __call__(
self,
smiles: str,
shape: Tuple[int, int, int] = (299, 299),
grayscale: bool = False,
):
# Depict structure with random parameters
depiction = self.random_depiction(smiles, shape)
# Add augmentations
depiction = self.add_augmentations(depiction)
if grayscale:
return self.to_grayscale_float_img(depiction)
return depiction
def __enter__(self):
return self
def __exit__(self, type, value, tb):
# I'd like to automatically close the JVM
# But if it is closed once, you cannot reopen it
# (for example when someone works in a IPython notebook)
# Shutdown the JVM
# shutdownJVM()
pass
def random_choice(self, iterable: List, log_attribute: str = False):
"""
This function takes an iterable, calls random.choice() on it,
increases random.seed by 1 and returns the result. This way, results
produced by RanDepict are replicable.
Additionally, this function handles the generation of depictions and
augmentations from given fingerprints by handling all random decisions
according to the fingerprint template.
Args:
iterable (List): iterable to pick from
log_attribute (str, optional): ID for fingerprint.
Defaults to False.
Returns:
Any: "Randomly" picked element
"""
# Keep track of seed and change it with every pseudo-random decision.
self.seed += 1
random.seed(self.seed)
# Generation from fingerprint:
if self.from_fingerprint and log_attribute:
# Get dictionaries that define positions and linked conditions
pos_cond_dicts = self.active_scheme[log_attribute]
for pos_cond_dict in pos_cond_dicts:
pos = pos_cond_dict["position"]
cond = pos_cond_dict["one_if"]
if self.active_fingerprint[pos]:
# If the condition is a range: adapt iterable and go on
if isinstance(cond, tuple):
iterable = [
item
for item in iterable
if item > cond[0] - 0.001
if item < cond[1] + 0.001
]
break
# Otherwise, simply return the condition value
else:
return cond
# Pseudo-randomly pick an element from the iterable
result = random.choice(iterable)
return result
def random_image_size(self, shape: Tuple[int, int]) -> Tuple[int, int]:
"""
This function takes a random image shape and returns an image shape
where the first two dimensions are slightly distorted
(90-110% of original value).
Args:
shape (Tuple[int, int]): original shape
Returns:
Tuple[int, int]: distorted shape
"""
# Set random depiction image shape (to cause a slight distortion)
y = int(shape[0] * self.random_choice(np.arange(0.9, 1.1, 0.02)))
x = int(shape[1] * self.random_choice(np.arange(0.9, 1.1, 0.02)))
return y, x
def get_random_indigo_rendering_settings(
self, shape: Tuple[int, int] = (299, 299)
) -> Indigo:
"""
This function defines random rendering options for the structure
depictions created using Indigo.
It returns an Indigo object with the settings.
Args:
shape (Tuple[int, int], optional): im shape. Defaults to (299, 299)
Returns:
Indigo: Indigo object that contains depictions settings
"""
# Define random shape for depiction (within boundaries);)
indigo = Indigo()
renderer = IndigoRenderer(indigo)
# Get slightly distorted shape
y, x = self.random_image_size(shape)
indigo.setOption("render-image-width", x)
indigo.setOption("render-image-height", y)
# Set random bond line width
bond_line_width = float(
self.random_choice(
np.arange(
0.5,
2.5,
0.1),
log_attribute="indigo_bond_line_width"))
indigo.setOption("render-bond-line-width", bond_line_width)
# Set random relative thickness
relative_thickness = float(
self.random_choice(
np.arange(
0.5,
1.5,
0.1),
log_attribute="indigo_relative_thickness"))
indigo.setOption("render-relative-thickness", relative_thickness)
# Set random bond length
# Changing the bond length does not change the bond length relative to
# other elements. Instead, the whole molecule is scaled down!
# bond_length = self.random_choice(range(int(shape[0]/19),
# int(shape[0]/6)))
# indigo.setOption("render-bond-length", bond_length)
# Output_format: PNG
indigo.setOption("render-output-format", "png")
# Set random atom label rendering model
# (standard is rendering terminal groups)
if self.random_choice(
[True] + [False] * 19,
log_attribute="indigo_labels_all"):
# show all atom labels
indigo.setOption("render-label-mode", "all")
elif self.random_choice(
[True] + [False] * 3, log_attribute="indigo_labels_hetero"
):
indigo.setOption(
"render-label-mode", "hetero"
) # only hetero atoms, no terminal groups
# Set random depiction colour / not necessary for us as we binarise
# everything anyway
# if self.random_choice([True, False, False, False, False]):
# R = str(self.random_choice(np.arange(0.1, 1.0, 0.1)))
# G = str(self.random_choice(np.arange(0.1, 1.0, 0.1)))
# B = str(self.random_choice(np.arange(0.1, 1.0, 0.1)))
# indigo.setOption("render-base-color", ", ".join([R,G,B]))
# Render bold bond for Haworth projection
if self.random_choice([True, False],
log_attribute="indigo_render_bold_bond"):
indigo.setOption("render-bold-bond-detection", "True")
# Render labels for stereobonds
stereo_style = self.random_choice(
["ext", "old", "none"], log_attribute="indigo_stereo_label_style"
)
indigo.setOption("render-stereo-style", stereo_style)
# Collapse superatoms (default: expand)
if self.random_choice(
[True, False], log_attribute="indigo_collapse_superatoms"
):
indigo.setOption("render-superatom-mode", "collapse")
return indigo, renderer
def depict_and_resize_indigo(
self, smiles: str, shape: Tuple[int, int] = (299, 299)
) -> np.array:
"""
This function takes a smiles str and an image shape.
It renders the chemical structures using Indigo with random
rendering/depiction settings and returns an RGB image (np.array)
with the given image shape.
Args:
smiles (str): SMILES representation of molecule
shape (Tuple[int, int], optional): im shape. Defaults to (299, 299)
Returns:
np.array: Chemical structure depiction
"""
# Instantiate Indigo with random settings and IndigoRenderer
indigo, renderer = self.get_random_indigo_rendering_settings()
# Load molecule
molecule = indigo.loadMolecule(smiles)
# Kekulize in 67% of cases
if not self.random_choice(
[True, True, False], log_attribute="indigo_kekulized"
):
molecule.aromatize()
molecule.layout()
# Write to buffer
temp = renderer.renderToBuffer(molecule)
temp = io.BytesIO(temp)
depiction = sk_io.imread(temp)
depiction = self.resize(depiction, (shape[0], shape[1]))
depiction = rgba2rgb(depiction)
depiction = img_as_ubyte(depiction)
return depiction
def get_random_rdkit_rendering_settings(
self, shape: Tuple[int, int] = (299, 299)
) -> rdMolDraw2D.MolDraw2DCairo:
"""
This function defines random rendering options for the structure
depictions created using rdkit. It returns an MolDraw2DCairo object
with the settings.
Args:
shape (Tuple[int, int], optional): im_shape. Defaults to (299, 299)
Returns:
rdMolDraw2D.MolDraw2DCairo: Object that contains depiction settings
"""
# Get slightly distorted shape
y, x = self.random_image_size(shape)
# Instantiate object that saves the settings
depiction_settings = rdMolDraw2D.MolDraw2DCairo(y, x)
# Stereo bond annotation
if self.random_choice(
[True, False], log_attribute="rdkit_add_stereo_annotation"
):
depiction_settings.drawOptions().addStereoAnnotation = True
if self.random_choice(
[True, False], log_attribute="rdkit_add_chiral_flag_labels"
):
depiction_settings.drawOptions().includeChiralFlagLabel = True
# Atom indices
if self.random_choice(
[True, False, False, False], log_attribute="rdkit_add_atom_indices"
):
depiction_settings.drawOptions().addAtomIndices = True
# Bond line width
bond_line_width = self.random_choice(
range(1, 5), log_attribute="rdkit_bond_line_width"
)
depiction_settings.drawOptions().bondLineWidth = bond_line_width
# Draw terminal methyl groups
if self.random_choice(
[True, False], log_attribute="rdkit_draw_terminal_methyl"
):
depiction_settings.drawOptions().explicitMethyl = True
# Label font type and size
font_dir = self.HERE.joinpath("fonts/")
font_path = os.path.join(
str(font_dir),
self.random_choice(
os.listdir(str(font_dir)), log_attribute="rdkit_label_font"
),
)
depiction_settings.drawOptions().fontFile = font_path
min_font_size = self.random_choice(
range(10, 20), log_attribute="rdkit_min_font_size"
)
depiction_settings.drawOptions().minFontSize = min_font_size
depiction_settings.drawOptions().maxFontSize = 30
# Rotate the molecule
depiction_settings.drawOptions().rotate = self.random_choice(range(360))
# Fixed bond length
fixed_bond_length = self.random_choice(
range(30, 45), log_attribute="rdkit_fixed_bond_length"
)
depiction_settings.drawOptions().fixedBondLength = fixed_bond_length
# Comic mode (looks a bit hand drawn)
if self.random_choice(
[True, False, False, False, False], log_attribute="rdkit_comic_style"
):
depiction_settings.drawOptions().comicMode = True
# Keep it black and white
depiction_settings.drawOptions().useBWAtomPalette()
return depiction_settings
def depict_and_resize_rdkit(
self, smiles: str, shape: Tuple[int, int] = (299, 299)
) -> np.array:
"""
This function takes a smiles str and an image shape.
It renders the chemical structuresusing Rdkit with random
rendering/depiction settings and returns an RGB image (np.array)
with the given image shape.
Args:
smiles (str): SMILES representation of molecule_
shape (Tuple[int, int], optional): im shape. Defaults to (299, 299)
Returns:
np.array: Chemical structure depiction
"""
# Generate mol object from smiles str
mol = Chem.MolFromSmiles(smiles)
if mol:
AllChem.Compute2DCoords(mol)
# Abbreviate superatoms
if self.random_choice(
[True, False], log_attribute="rdkit_collapse_superatoms"
):
abbrevs = GetDefaultAbbreviations()
mol = CondenseMolAbbreviations(mol, abbrevs)
# Get random depiction settings
depiction_settings = self.get_random_rdkit_rendering_settings()
# Create depiction
# TODO: Figure out how to depict without kekulization here
# The following line does not prevent the molecule from being
# depicted kekulized:
# mol = rdMolDraw2D.PrepareMolForDrawing(mol, kekulize = False)
# The molecule must get kekulized somewhere "by accident"
rdMolDraw2D.PrepareAndDrawMolecule(depiction_settings, mol)
depiction = depiction_settings.GetDrawingText()
depiction = sk_io.imread(io.BytesIO(depiction))
# Resize image to desired shape
depiction = self.resize(depiction, shape)
depiction = img_as_ubyte(depiction)
return np.asarray(depiction)
else:
print("RDKit was unable to read SMILES: {}".format(smiles))
def get_random_cdk_rendering_settings(self, rendererModel, molecule):
"""
This function defines random rendering options for the structure
depictions created using CDK.
It takes a cdk.renderer.AtomContainerRenderer.2DModel
and a cdk.AtomContainer and returns the 2DModel object with random
rendering settings and the AtomContainer.
I followed https://github.com/cdk/cdk/wiki/Standard-Generator while
creating this.
Args:
rendererModel (cdk.renderer.AtomContainerRenderer.2DModel)
molecule (cdk.AtomContainer): Atom container
Returns:
rendererModel, molecule: Objects that hold depiction parameters
"""
cdk_base = "org.openscience.cdk"
StandardGenerator = JClass(
cdk_base + ".renderer.generators.standard.StandardGenerator"
)
# Define visibility of atom/superatom labels
symbol_visibility = self.random_choice(
["iupac_recommendation", "no_terminal_methyl", "show_all_atom_labels"],
log_attribute="cdk_symbol_visibility",
)
SymbolVisibility = JClass(
"org.openscience.cdk.renderer.SymbolVisibility")
if symbol_visibility == "iupac_recommendation":
rendererModel.set(
StandardGenerator.Visibility.class_,
SymbolVisibility.iupacRecommendations(),
)
elif symbol_visibility == "no_terminal_methyl":
# only hetero atoms, no terminal alkyl groups
rendererModel.set(
StandardGenerator.Visibility.class_,
SymbolVisibility.iupacRecommendationsWithoutTerminalCarbon(),
)
elif symbol_visibility == "show_all_atom_labels":
rendererModel.set(
StandardGenerator.Visibility.class_, SymbolVisibility.all()
) # show all atom labels
# Define bond line stroke width
stroke_width = self.random_choice(
np.arange(0.8, 2.0, 0.1), log_attribute="cdk_stroke_width"
)
rendererModel.set(StandardGenerator.StrokeRatio.class_, stroke_width)
# Define symbol margin ratio
margin_ratio = self.random_choice(
[0, 1, 2, 2, 2, 3, 4], log_attribute="cdk_margin_ratio"
)
rendererModel.set(
StandardGenerator.SymbolMarginRatio.class_,
JClass("java.lang.Double")(margin_ratio),
)
# Define bond properties
double_bond_dist = self.random_choice(
np.arange(0.11, 0.25, 0.01), log_attribute="cdk_double_bond_dist"
)
rendererModel.set(
StandardGenerator.BondSeparation.class_,
double_bond_dist)
wedge_ratio = self.random_choice(
np.arange(4.5, 7.5, 0.1), log_attribute="cdk_wedge_ratio"
)
rendererModel.set(
StandardGenerator.WedgeRatio.class_,
JClass("java.lang.Double")(wedge_ratio))
if self.random_choice([True, False],
log_attribute="cdk_fancy_bold_wedges"):
rendererModel.set(StandardGenerator.FancyBoldWedges.class_, True)
if self.random_choice([True, False],
log_attribute="cdk_fancy_hashed_wedges"):
rendererModel.set(StandardGenerator.FancyHashedWedges.class_, True)
hash_spacing = self.random_choice(
np.arange(4.0, 6.0, 0.2), log_attribute="cdk_hash_spacing"
)
rendererModel.set(StandardGenerator.HashSpacing.class_, hash_spacing)
# Add CIP labels
labels = False
if self.random_choice([True, False],
log_attribute="cdk_add_CIP_labels"):
labels = True
JClass("org.openscience.cdk.geometry.cip.CIPTool").label(molecule)
for atom in molecule.atoms():
label = atom.getProperty(
JClass("org.openscience.cdk.CDKConstants").CIP_DESCRIPTOR
)
atom.setProperty(StandardGenerator.ANNOTATION_LABEL, label)
for bond in molecule.bonds():
label = bond.getProperty(
JClass("org.openscience.cdk.CDKConstants").CIP_DESCRIPTOR
)
bond.setProperty(StandardGenerator.ANNOTATION_LABEL, label)
# Add atom indices to the depictions
if self.random_choice(
[True, False, False, False], log_attribute="cdk_add_atom_indices"
):
labels = True
for atom in molecule.atoms():
label = JClass("java.lang.Integer")(
1 + molecule.getAtomNumber(atom))
atom.setProperty(StandardGenerator.ANNOTATION_LABEL, label)
if labels:
# We only need black
rendererModel.set(
StandardGenerator.AnnotationColor.class_,
JClass("java.awt.Color")(0x000000),
)
# Font size of labels
font_scale = self.random_choice(
np.arange(0.5, 0.8, 0.1), log_attribute="cdk_label_font_scale"
)
rendererModel.set(
StandardGenerator.AnnotationFontScale.class_,
font_scale)
# Distance between atom numbering and depiction
annotation_distance = self.random_choice(np.arange(
0.15, 0.30, 0.05), log_attribute="cdk_annotation_distance")
rendererModel.set(
StandardGenerator.AnnotationDistance.class_,
annotation_distance)
# Abbreviate superatom labels in half of the cases
# TODO: Find a way to define Abbreviations object as a class attribute.
# Problem: can't be pickled.
# Right now, this is loaded every time when a structure is depicted.
# That seems inefficient.
if self.random_choice([True, False],
log_attribute="cdk_collapse_superatoms"):
cdk_superatom_abrv = JClass(
"org.openscience.cdk.depict.Abbreviations")()
abbreviation_path = str(self.HERE.joinpath("smiles_list.smi"))
abbreviation_path = abbreviation_path.replace("\\", "/")
abbreviation_path = JClass("java.lang.String")(abbreviation_path)
cdk_superatom_abrv.loadFromFile(abbreviation_path)
cdk_superatom_abrv.apply(molecule)
return rendererModel, molecule
def depict_and_resize_cdk(
self, smiles: str, shape: Tuple[int, int] = (299, 299)
) -> np.array:
"""
This function takes a smiles str and an image shape.
It renders the chemical structures using CDK with random
rendering/depiction settings and returns an RGB image (np.array)
with the given image shape.
The general workflow here is a JPype adaptation of code published
by <NAME> in 'Groovy Cheminformatics with the Chemistry
Development Kit':
https://egonw.github.io/cdkbook/ctr.html#depict-a-compound-as-an-image
with additional adaptations to create all the different depiction
types from
https://github.com/cdk/cdk/wiki/Standard-Generator
Args:
smiles (str): SMILES representation of molecule
shape (Tuple[int, int], optional): im shape. Defaults to (299, 299)
Returns:
np.array: Chemical structure depiction
"""
cdk_base = "org.openscience.cdk"
# Read molecule from SMILES str
SCOB = JClass(cdk_base + ".silent.SilentChemObjectBuilder")
SmilesParser = JClass(
cdk_base +
".smiles.SmilesParser")(
SCOB.getInstance())
if self.random_choice([True, False, False],
log_attribute="cdk_kekulized"):
SmilesParser.kekulise(False)
molecule = SmilesParser.parseSmiles(smiles)
# Add hydrogens for coordinate generation (to make it look nicer/
# avoid overlaps)
matcher = JClass(
cdk_base +
".atomtype.CDKAtomTypeMatcher").getInstance(
molecule.getBuilder())
for atom in molecule.atoms():
atom_type = matcher.findMatchingAtomType(molecule, atom)
JClass(
cdk_base +
".tools.manipulator.AtomTypeManipulator").configure(
atom,
atom_type)
adder = JClass(cdk_base + ".tools.CDKHydrogenAdder").getInstance(
molecule.getBuilder()
)
adder.addImplicitHydrogens(molecule)
AtomContainerManipulator = JClass(
cdk_base + ".tools.manipulator.AtomContainerManipulator"
)
AtomContainerManipulator.convertImplicitToExplicitHydrogens(molecule)
# Instantiate StructureDiagramGenerator, determine coordinates
sdg = JClass(cdk_base + ".layout.StructureDiagramGenerator")()
sdg.setMolecule(molecule)
sdg.generateCoordinates(molecule)
molecule = sdg.getMolecule()
# Remove explicit hydrogens again
AtomContainerManipulator.suppressHydrogens(molecule)
# Rotate molecule randomly
point = JClass(
cdk_base +
".geometry.GeometryTools").get2DCenter(molecule)
rot_degrees = self.random_choice(range(360))
JClass(cdk_base + ".geometry.GeometryTools").rotate(
molecule, point, rot_degrees
)
# Get Generators
generators = JClass("java.util.ArrayList")()
BasicSceneGenerator = JClass(
"org.openscience.cdk.renderer.generators.BasicSceneGenerator"
)()
generators.add(BasicSceneGenerator)
font_size = self.random_choice(
range(10, 20), log_attribute="cdk_atom_label_font_size"
)
Font = JClass("java.awt.Font")
font_name = self.random_choice(
["Verdana", "Times New Roman", "Arial", "Gulliver Regular"],
# log_attribute='cdk_atom_label_font'
)
font_style = self.random_choice(
[Font.PLAIN, Font.BOLD],
# log_attribute='cdk_atom_label_font_style'
)
font = Font(font_name, font_style, font_size)
StandardGenerator = JClass(
cdk_base + ".renderer.generators.standard.StandardGenerator"
)(font)
generators.add(StandardGenerator)
# Instantiate renderer
AWTFontManager = JClass(cdk_base + ".renderer.font.AWTFontManager")
renderer = JClass(cdk_base + ".renderer.AtomContainerRenderer")(
generators, AWTFontManager()
)
# Create an empty image of the right size
y, x = self.random_image_size(shape)
# Workaround for structures that are cut off at edged of images:
# Make image twice as big, reduce Zoom factor, then remove white
# areas at borders and resize to originally desired shape
# TODO: Find out why the structures are cut off in the first place
y = y * 3
x = x * 3
drawArea = JClass("java.awt.Rectangle")(x, y)
BufferedImage = JClass("java.awt.image.BufferedImage")
image = BufferedImage(x, y, BufferedImage.TYPE_INT_RGB)
# Draw the molecule
renderer.setup(molecule, drawArea)
model = renderer.getRenderer2DModel()
# Get random rendering settings
model, molecule = self.get_random_cdk_rendering_settings(
model, molecule)
double = JClass("java.lang.Double")
model.set(
JClass(
cdk_base +
".renderer.generators.BasicSceneGenerator.ZoomFactor"),
double(0.75),
)
g2 = image.getGraphics()
g2.setColor(JClass("java.awt.Color").WHITE)
g2.fillRect(0, 0, x, y)
AWTDrawVisitor = JClass(
"org.openscience.cdk.renderer.visitor.AWTDrawVisitor")
renderer.paint(molecule, AWTDrawVisitor(g2))
# Write the image into a format that can be read by skimage
ImageIO = JClass("javax.imageio.ImageIO")
os = JClass("java.io.ByteArrayOutputStream")()
Base64 = JClass("java.util.Base64")
ImageIO.write(
image,
JClass("java.lang.String")("PNG"),
Base64.getEncoder().wrap(os))
depiction = bytes(os.toString("UTF-8"))
depiction = base64.b64decode(depiction)
# Read image in skimage
depiction = sk_io.imread(depiction, plugin="imageio")
# Normalise padding and get non-distorted image of right size
depiction = self.normalise_padding(depiction)
depiction = self.central_square_image(depiction)
depiction = self.resize(depiction, shape)
depiction = img_as_ubyte(depiction)
return depiction
def normalise_padding(self, im: np.array) -> np.array:
"""This function takes an RGB image (np.array) and deletes white space at
the borders. Then 0-10% of the image width/height is added as padding
again. The modified image is returned
Args:
im: input image (np.array)
Returns:
output: the modified image (np.array)
"""
# Remove white space at borders
mask = im > 200
all_white = mask.sum(axis=2) > 0
rows = np.flatnonzero((~all_white).sum(axis=1))
cols = np.flatnonzero((~all_white).sum(axis=0))
crop = im[rows.min(): rows.max() + 1, cols.min(): cols.max() + 1, :]
# Add padding again.
pad_range = np.arange(5, int(crop.shape[0] * 0.2), 1)
if len(pad_range) > 0:
pad = self.random_choice(np.arange(5, int(crop.shape[0] * 0.2), 1))
else:
pad = 5
crop = np.pad(
crop,
pad_width=((pad, pad), (pad, pad), (0, 0)),
mode="constant",
constant_values=255,
)
return crop
def central_square_image(self, im: np.array) -> np.array:
"""
This function takes image (np.array) and will add white padding
so that the image has a square shape with the width/height of the
longest side of the original image.
Args:
im (np.array): Input image
Returns:
np.array: Output image
"""
# Create new blank white image
max_wh = max(im.shape)
new_im = 255 * np.ones((max_wh, max_wh, 3), np.uint8)
# Determine paste coordinates and paste image
upper = int((new_im.shape[0] - im.shape[0]) / 2)
lower = int((new_im.shape[0] - im.shape[0]) / 2) + im.shape[0]
left = int((new_im.shape[1] - im.shape[1]) / 2)
right = int((new_im.shape[1] - im.shape[1]) / 2) + im.shape[1]
new_im[upper:lower, left:right] = im
return new_im
def random_depiction(
self, smiles: str, shape: Tuple[int, int] = (299, 299)
) -> np.array:
"""
This function takes a SMILES and depicts it using Rdkit, Indigo or CDK.
The depiction method and the specific parameters for the depiction are
chosen completely randomly. The purpose of this function is to enable
depicting a diverse variety of chemical structure depictions.
Args:
smiles (str): SMILES representation of molecule
shape (Tuple[int, int], optional): im shape. Defaults to (299, 299)
Returns:
np.array: Chemical structure depiction
"""
depiction_functions = [
self.depict_and_resize_rdkit,
self.depict_and_resize_indigo,
self.depict_and_resize_cdk,
]
depiction_function = self.random_choice(depiction_functions)
depiction = depiction_function(smiles, shape)
# RDKit sometimes has troubles reading SMILES. If that happens,
# use Indigo or CDK
if depiction is False or depiction is None:
depiction_function = self.random_choice(
[self.depict_and_resize_indigo, self.depict_and_resize_cdk]
)
depiction = depiction_function(smiles, shape)
return depiction
def resize(self, image: np.array, shape: Tuple[int]) -> np.array:
"""
This function takes an image (np.array) and a shape and returns
the resized image (np.array). It uses Pillow to do this, as it
seems to have a bigger variety of scaling methods than skimage.
The up/downscaling method is chosen randomly.
Args:
image (np.array): the input image
shape (Tuple[int, int], optional): im shape. Defaults to (299, 299)
___
Returns:
np.array: the resized image
"""
image = Image.fromarray(image)
shape = (shape[0], shape[1])
image = image.resize(
shape, resample=self.random_choice(self.PIL_resize_methods)
)
return np.asarray(image)
def imgaug_augment(
self,
image: np.array,
) -> np.array:
"""
This function applies a random amount of augmentations to
a given image (np.array) using and returns the augmented image
(np.array).
Args:
image (np.array): input image
Returns:
np.array: output image (augmented)
"""
original_shape = image.shape
# Choose number of augmentations to apply (0-2);
# return image if nothing needs to be done.
aug_number = self.random_choice(range(0, 3))
if not aug_number:
return image
# Add some padding to avoid weird artifacts after rotation
image = np.pad(image, ((1, 1), (1, 1), (0, 0)),
mode="constant", constant_values=255)
def imgaug_rotation():
# Rotation between -10 and 10 degrees
if not self.random_choice(
[True, True, False], log_attribute="has_imgaug_rotation"
):
return False
rot_angle = self.random_choice(np.arange(-10, 10, 1))
aug = iaa.Affine(rotate=rot_angle, mode="edge", fit_output=True)
return aug
def imgaug_black_and_white_noise():
# Black and white noise
if not self.random_choice(
[True, True, False], log_attribute="has_imgaug_salt_pepper"
):
return False
coarse_dropout_p = self.random_choice(
np.arange(0.0002, 0.0015, 0.0001))
coarse_dropout_size_percent = self.random_choice(
np.arange(1.0, 1.1, 0.01))
replace_elementwise_p = self.random_choice(
np.arange(0.01, 0.3, 0.01))
aug = iaa.Sequential(
[
iaa.CoarseDropout(
coarse_dropout_p,
size_percent=coarse_dropout_size_percent),
iaa.ReplaceElementwise(
replace_elementwise_p,
255),
])
return aug
def imgaug_shearing():
# Shearing
if not self.random_choice(
[True, True, False], log_attribute="has_imgaug_shearing"
):
return False
shear_param = self.random_choice(np.arange(-5, 5, 1))
aug = self.random_choice(
[
iaa.geometric.ShearX(
shear_param,
mode="edge",
fit_output=True),
iaa.geometric.ShearY(
shear_param,
mode="edge",
fit_output=True),
])
return aug
def imgaug_imgcorruption():
# Jpeg compression or pixelation
if not self.random_choice(
[True, True, False], log_attribute="has_imgaug_corruption"
):
return False
imgcorrupt_severity = self.random_choice(np.arange(1, 2, 1))
aug = self.random_choice(
[
iaa.imgcorruptlike.JpegCompression(
severity=imgcorrupt_severity), iaa.imgcorruptlike.Pixelate(
severity=imgcorrupt_severity), ])
return aug
def imgaug_brightness_adjustment():
# Brightness adjustment
if not self.random_choice(
[True, True, False], log_attribute="has_imgaug_brightness_adj"
):
return False
brightness_adj_param = self.random_choice(np.arange(-50, 50, 1))
aug = iaa.WithBrightnessChannels(iaa.Add(brightness_adj_param))
return aug
def imgaug_colour_temp_adjustment():
# Colour temperature adjustment
if not self.random_choice(
[True, True, False], log_attribute="has_imgaug_col_adj"
):
return False
colour_temp = self.random_choice(np.arange(1100, 10000, 1))
aug = iaa.ChangeColorTemperature(colour_temp)
return aug
# Define list of available augmentations
aug_list = [
imgaug_rotation,
imgaug_black_and_white_noise,
imgaug_shearing,
imgaug_imgcorruption,
imgaug_brightness_adjustment,
imgaug_colour_temp_adjustment,
]
# Every one of them has a 1/3 chance of returning False
aug_list = [fun() for fun in aug_list]
aug_list = [fun for fun in aug_list if fun]
aug = iaa.Sequential(aug_list)
augmented_image = aug.augment_images([image])[0]
augmented_image = self.resize(augmented_image, original_shape)
augmented_image = augmented_image.astype(np.uint8)
return augmented_image
def add_augmentations(self, depiction: np.array) -> np.array:
"""
This function takes a chemical structure depiction (np.array)
and returns the same image with added augmentation elements
Args:
depiction (np.array): chemical structure depiction
Returns:
np.array: chemical structure depiction with added augmentations
"""
if self.random_choice(
[True, False, False, False, False, False], log_attribute="has_curved_arrows"
):
depiction = self.add_curved_arrows_to_structure(depiction)
if self.random_choice(
[True, False, False], log_attribute="has_straight_arrows"
):
depiction = self.add_straight_arrows_to_structure(depiction)
if self.random_choice(
[True, False, False, False, False, False], log_attribute="has_id_label"
):
depiction = self.add_chemical_label(depiction, "ID")
if self.random_choice(
[True, False, False, False, False, False], log_attribute="has_R_group_label"
):
depiction = self.add_chemical_label(depiction, "R_GROUP")
if self.random_choice(
[True, False, False, False, False, False],
log_attribute="has_reaction_label",
):
depiction = self.add_chemical_label(depiction, "REACTION")
if self.random_choice([True, False, False]):
depiction = self.imgaug_augment(depiction)
return depiction
def get_random_label_position(
self, width: int, height: int) -> Tuple[int, int]:
"""
Given the width and height of an image (int), this function
determines a random position in the outer 15% of the image and
returns a tuple that contain the coordinates (y,x) of that position.
Args:
width (int): image width
height (int): image height
Returns:
Tuple[int, int]: Random label position
"""
if self.random_choice([True, False]):
y_range = range(0, height)
x_range = list(range(0, int(0.15 * width))) + list(
range(int(0.85 * width), width)
)
else:
y_range = list(range(0, int(0.15 * height))) + list(
range(int(0.85 * height), height)
)
x_range = range(0, width)
return self.random_choice(y_range), self.random_choice(x_range)
def ID_label_text(self) -> str:
"""
This function returns a string that resembles a typical
chemical ID label
Returns:
str: Label text
"""
label_num = range(1, 50)
label_letters = [
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
]
options = [
"only_number",
"num_letter_combination",
"numtonum",
"numcombtonumcomb",
]
option = self.random_choice(options)
if option == "only_number":
return str(self.random_choice(label_num))
if option == "num_letter_combination":
return str(self.random_choice(label_num)) + self.random_choice(
label_letters
)
if option == "numtonum":
return (
str(self.random_choice(label_num)) +
"-" +
str(self.random_choice(label_num))
)
if option == "numcombtonumcomb":
return (
str(self.random_choice(label_num)) +
self.random_choice(label_letters) +
"-" +
self.random_choice(label_letters)
)
def new_reaction_condition_elements(self) -> Tuple[str, str, str]:
"""
Randomly redefine reaction_time, solvent and other_reactand.
Returns:
Tuple[str, str, str]: Reaction time, solvent, reactand
"""
reaction_time = self.random_choice(
[str(num) for num in range(30)]
) + self.random_choice([" h", " min"])
solvent = self.random_choice(
[
"MeOH",
"EtOH",
"CHCl3",
"DCM",
"iPrOH",
"MeCN",
"DMSO",
"pentane",
"hexane",
"benzene",
"Et2O",
"THF",
"DMF",
]
)
other_reactand = self.random_choice(
[
"HF",
"HCl",
"HBr",
"NaOH",
"Et3N",
"TEA",
"Ac2O",
"DIBAL",
"DIBAL-H",
"DIPEA",
"DMAP",
"EDTA",
"HOBT",
"HOAt",
"TMEDA",
"p-TsOH",
"Tf2O",
]
)
return reaction_time, solvent, other_reactand
def reaction_condition_label_text(self) -> str:
"""
This function returns a random string that looks like a
reaction condition label.
Returns:
str: Reaction condition label text
"""
reaction_condition_label = ""
label_type = self.random_choice(["A", "B", "C", "D"])
if label_type in ["A", "B"]:
for n in range(self.random_choice(range(1, 5))):
(
reaction_time,
solvent,
other_reactand,
) = self.new_reaction_condition_elements()
if label_type == "A":
reaction_condition_label += (
str(n + 1) +
" " +
other_reactand +
", " +
solvent +
", " +
reaction_time +
"\n"
)
elif label_type == "B":
reaction_condition_label += (
str(n + 1) +
" " +
other_reactand +
", " +
solvent +
" (" +
reaction_time +
")\n"
)
elif label_type == "C":
(
reaction_time,
solvent,
other_reactand,
) = self.new_reaction_condition_elements()
reaction_condition_label += (
other_reactand + "\n" + solvent + "\n" + reaction_time
)
elif label_type == "D":
reaction_condition_label += self.random_choice(
self.new_reaction_condition_elements()
)
return reaction_condition_label
def make_R_group_str(self) -> str:
"""
This function returns a random string that looks like an R group label.
It generates them by inserting randomly chosen elements into one of
five templates.
Returns:
str: R group label text
"""
rest_variables = [
"X",
"Y",
"Z",
"R",
"R1",
"R2",
"R3",
"R4",
"R5",
"R6",
"R7",
"R8",
"R9",
"R10",
"Y2",
"D",
]
# Load list of superatoms (from OSRA)
superatoms = self.superatoms
label_type = self.random_choice(["A", "B", "C", "D", "E"])
R_group_label = ""
if label_type == "A":
for _ in range(1, self.random_choice(range(2, 6))):
R_group_label += (
self.random_choice(rest_variables) +
" = " +
self.random_choice(superatoms) +
"\n"
)
elif label_type == "B":
R_group_label += " " + \
self.random_choice(rest_variables) + "\n"
for n in range(1, self.random_choice(range(2, 6))):
R_group_label += str(n) + " " + \
self.random_choice(superatoms) + "\n"
elif label_type == "C":
R_group_label += (
" " +
self.random_choice(rest_variables) +
" " +
self.random_choice(rest_variables) +
"\n"
)
for n in range(1, self.random_choice(range(2, 6))):
R_group_label += (
str(n) +
" " +
self.random_choice(superatoms) +
" " +
self.random_choice(superatoms) +
"\n"
)
elif label_type == "D":
R_group_label += (
" " +
self.random_choice(rest_variables) +
" " +
self.random_choice(rest_variables) +
" " +
self.random_choice(rest_variables) +
"\n"
)
for n in range(1, self.random_choice(range(2, 6))):
R_group_label += (
str(n) +
" " +
self.random_choice(superatoms) +
" " +
self.random_choice(superatoms) +
" " +
self.random_choice(superatoms) +
"\n"
)
if label_type == "E":
for n in range(1, self.random_choice(range(2, 6))):
R_group_label += (
str(n) +
" " +
self.random_choice(rest_variables) +
" = " +
self.random_choice(superatoms) +
"\n"
)
return R_group_label
def add_chemical_label(
self, image: np.array, label_type: str, foreign_fonts: bool = True
) -> np.array:
"""
This function takes an image (np.array) and adds random text that
looks like a chemical ID label, an R group label or a reaction
condition label around the structure. It returns the modified image.
The label type is determined by the parameter label_type (str),
which needs to be 'ID', 'R_GROUP' or 'REACTION'
Args:
image (np.array): Chemical structure depiction
label_type (str): 'ID', 'R_GROUP' or 'REACTION'
foreign_fonts (bool, optional): Defaults to True.
Returns:
np.array: Chemical structure depiction with label
"""
im = Image.fromarray(image)
orig_image = deepcopy(im)
width, height = im.size
# Choose random font
if self.random_choice([True, False]) or not foreign_fonts:
font_dir = self.HERE.joinpath("fonts/")
# In half of the cases: Use foreign-looking font to generate
# bigger noise variety
else:
font_dir = self.HERE.joinpath("foreign_fonts/")
fonts = os.listdir(str(font_dir))
# Choose random font size
font_sizes = range(10, 20)
size = self.random_choice(font_sizes)
# Generate random string that resembles the desired type of label
if label_type == "ID":
label_text = self.ID_label_text()
if label_type == "R_GROUP":
label_text = self.make_R_group_str()
if label_type == "REACTION":
label_text = self.reaction_condition_label_text()
try:
font = ImageFont.truetype(
str(os.path.join(str(font_dir), self.random_choice(fonts))), size=size
)
except OSError:
font = ImageFont.load_default()
draw = ImageDraw.Draw(im, "RGBA")
# Try different positions with the condition that the label´does not
# overlap with non-white pixels (the structure)
for _ in range(50):
y_pos, x_pos = self.get_random_label_position(width, height)
bounding_box = draw.textbbox(
(x_pos, y_pos), label_text, font=font
) # left, up, right, low
paste_region = orig_image.crop(bounding_box)
try:
mean = ImageStat.Stat(paste_region).mean
except ZeroDivisionError:
return np.asarray(im)
if sum(mean) / len(mean) == 255:
draw.text((x_pos, y_pos), label_text,
font=font, fill=(0, 0, 0, 255))
break
return np.asarray(im)
def add_curved_arrows_to_structure(self, image: np.array) -> np.array:
"""
This function takes an image of a chemical structure (np.array)
and adds between 2 and 4 curved arrows in random positions in the
central part of the image.
Args:
image (np.array): Chemical structure depiction
Returns:
np.array: Chemical structure depiction with curved arrows
"""
height, width, _ = image.shape
image = Image.fromarray(image)
orig_image = deepcopy(image)
# Determine area where arrows are pasted.
x_min, x_max = (int(0.1 * width), int(0.9 * width))
y_min, y_max = (int(0.1 * height), int(0.9 * height))
arrow_dir = os.path.normpath(
str(self.HERE.joinpath("arrow_images/curved_arrows/"))
)
for _ in range(self.random_choice(range(2, 4))):
# Load random curved arrow image, resize and rotate it randomly.
arrow_image = Image.open(
os.path.join(
str(arrow_dir),
self.random_choice(
os.listdir(
str(arrow_dir)))))
new_arrow_image_shape = int(
(x_max - x_min) / self.random_choice(range(3, 6))
), int((y_max - y_min) / self.random_choice(range(3, 6)))
arrow_image = self.resize(
np.asarray(arrow_image),
new_arrow_image_shape)
arrow_image = Image.fromarray(arrow_image)
arrow_image = arrow_image.rotate(
self.random_choice(range(360)),
resample=self.random_choice(
[Image.BICUBIC, Image.NEAREST, Image.BILINEAR]
),
expand=True,
)
# Try different positions with the condition that the arrows are
# overlapping with non-white pixels (the structure)
for _ in range(50):
x_position = self.random_choice(
range(x_min, x_max - new_arrow_image_shape[0])
)
y_position = self.random_choice(
range(y_min, y_max - new_arrow_image_shape[1])
)
paste_region = orig_image.crop(
(
x_position,
y_position,
x_position + new_arrow_image_shape[0],
y_position + new_arrow_image_shape[1],
)
)
mean = ImageStat.Stat(paste_region).mean
if sum(mean) / len(mean) < 252:
image.paste(
arrow_image, (x_position, y_position), arrow_image)
break
return np.asarray(image)
def get_random_arrow_position(
self, width: int, height: int) -> Tuple[int, int]:
"""
Given the width and height of an image (int), this function determines
a random position to paste a reaction arrow in the outer 15% frame of
the image
Args:
width (_type_): image width
height (_type_): image height
Returns:
Tuple[int, int]: Random arrow position
"""
if self.random_choice([True, False]):
y_range = range(0, height)
x_range = list(range(0, int(0.15 * width))) + list(
range(int(0.85 * width), width)
)
else:
y_range = list(range(0, int(0.15 * height))) + list(
range(int(0.85 * height), height)
)
x_range = range(0, int(0.5 * width))
return self.random_choice(y_range), self.random_choice(x_range)
def add_straight_arrows_to_structure(self, image: np.array) -> np.array:
"""
This function takes an image of a chemical structure (np.array)
and adds between 1 and 2 straight arrows in random positions in the
image (no overlap with other elements)
Args:
image (np.array): Chemical structure depiction
Returns:
np.array: Chemical structure depiction with straight arrow
"""
height, width, _ = image.shape
image = Image.fromarray(image)
arrow_dir = os.path.normpath(
str(self.HERE.joinpath("arrow_images/straight_arrows/"))
)
for _ in range(self.random_choice(range(1, 3))):
# Load random curved arrow image, resize and rotate it randomly.
arrow_image = Image.open(
os.path.join(
str(arrow_dir),
self.random_choice(
os.listdir(
str(arrow_dir)))))
# new_arrow_image_shape = (int(width *
# self.random_choice(np.arange(0.9, 1.5, 0.1))),
# int(height/10 * self.random_choice(np.arange(0.7, 1.2, 0.1))))
# arrow_image = arrow_image.resize(new_arrow_image_shape,
# resample=Image.BICUBIC)
# Rotate completely randomly in half of the cases and in 180° steps
# in the other cases (higher probability that pasting works)
if self.random_choice([True, False]):
arrow_image = arrow_image.rotate(
self.random_choice(range(360)),
resample=self.random_choice(
[Image.BICUBIC, Image.NEAREST, Image.BILINEAR]
),
expand=True,
)
else:
arrow_image = arrow_image.rotate(
self.random_choice([180, 360]))
new_arrow_image_shape = arrow_image.size
# Try different positions with the condition that the arrows are
# overlapping with non-white pixels (the structure)
for _ in range(50):
y_position, x_position = self.get_random_arrow_position(
width, height)
x2_position = x_position + new_arrow_image_shape[0]
y2_position = y_position + new_arrow_image_shape[1]
# Make sure we only check a region inside of the image
if x2_position > width:
x2_position = width - 1
if y2_position > height:
y2_position = height - 1
paste_region = image.crop(
(x_position, y_position, x2_position, y2_position)
)
try:
mean = ImageStat.Stat(paste_region).mean
if sum(mean) / len(mean) == 255:
image.paste(
arrow_image, (x_position, y_position), arrow_image)
break
except ZeroDivisionError:
pass
return np.asarray(image)
def to_grayscale_float_img(self, image: np.array) -> np.array:
"""
This function takes an image (np.array), converts it to grayscale
and returns it.
Args:
image (np.array): image
Returns:
np.array: grayscale float image
"""
return img_as_float(rgb2gray(image))
def depict_save(
self,
smiles: str,
images_per_structure: int,
output_dir: str,
augment: bool,
ID: str,
shape: Tuple[int, int] = (299, 299),
seed: int = 42,
):
"""
This function takes a SMILES str, the amount of images to create
per SMILES str and the path of an output directory. It then creates
images_per_structure depictions of the chemical structure that is
represented by the SMILES str and saves it as PNG images in output_dir.
If augment == True, it adds augmentations to the structure depiction.
If an ID is given, it is used as the base filename. Otherwise, the
SMILES str is used.
Args:
smiles (str): SMILES representation of molecule
images_per_structure (int): Number of images to create per SMILES
output_dir (str): output directory path
augment (bool): Add augmentations (if True)
ID (str): ID (used for name of saved image)
shape (Tuple[int, int], optional): im shape. Defaults to (299, 299)
seed (int, optional): Seed. Defaults to 42.
"""
depictor = RandomDepictor(seed + 13)
if not ID:
name = smiles
else:
name = ID
for n in range(images_per_structure):
if augment:
image = depictor(smiles, shape)
else:
image = depictor.random_depiction(smiles, shape)
output_file_path = os.path.join(
output_dir, name + "_" + str(n) + ".png")
sk_io.imsave(output_file_path, img_as_ubyte(image))
def batch_depict_save(
self,
smiles_list: List[str],
images_per_structure: int,
output_dir: str,
augment: bool,
ID_list: List[str],
shape: Tuple[int, int] = (299, 299),
processes: int = 4,
seed: int = 42,
) -> None:
"""
Batch generation of chemical structure depictions without usage of
fingerprints. The images are saved at a given path.
Args:
smiles_list (List[str]): List of SMILES str
images_per_structure (int): Amount of images to create per SMILES
output_dir (str): Output directory
augment (bool): indicates whether or not to use augmentations
ID_list (List[str]): List of IDs (should be as long as smiles_list)
shape (Tuple[int, int], optional): Defaults to (299, 299).
processes (int, optional): Number of threads. Defaults to 4.
seed (int, optional): Seed for random decisions. Defaults to 42.
"""
starmap_tuple_generator = (
(
smiles_list[n],
images_per_structure,
output_dir,
augment,
ID_list[n],
shape,
(seed * n + 1) * len(smiles_list), # individual seed
)
for n in range(len(smiles_list))
)
with get_context("spawn").Pool(processes) as p:
p.starmap(self.depict_save, starmap_tuple_generator)
def depict_from_fingerprint(
self,
smiles: str,
fingerprints: List[np.array],
schemes: List[Dict],
shape: Tuple[int, int] = (299, 299),
seed: int = 42,
) -> np.array:
"""
This function takes a SMILES representation of a molecule,
a list of one or two fingerprints and a list of the corresponding
fingerprint schemes and generates a chemical structure depiction
that fits the fingerprint.
___
If only one fingerprint/scheme is given, we assume that they contain
information for a depiction without augmentations. If two are given,
we assume that the first one contains information about the depiction
and the second one contains information about the augmentations.
___
All this function does is set the class attributes in a manner that
random_choice() knows to not to actually pick parameters randomly.
Args:
fingerprints (List[np.array]): List of one or two fingerprints
schemes (List[Dict]): List of one or two fingerprint schemes
shape (Tuple[int,int]): Desired output image shape
Returns:
np.array: Chemical structure depiction
"""
# This needs to be done to ensure that the Java Virtual Machine is
# running when working with multiproessing
depictor = RandomDepictor(seed=seed)
self.from_fingerprint = True
self.active_fingerprint = fingerprints[0]
self.active_scheme = schemes[0]
# Depict molecule
if "indigo" in list(schemes[0].keys())[0]:
depiction = depictor.depict_and_resize_indigo(smiles, shape)
elif "rdkit" in list(schemes[0].keys())[0]:
depiction = depictor.depict_and_resize_rdkit(smiles, shape)
elif "cdk" in list(schemes[0].keys())[0]:
depiction = depictor.depict_and_resize_cdk(smiles, shape)
# Add augmentations
if len(fingerprints) == 2:
self.active_fingerprint = fingerprints[1]
self.active_scheme = schemes[1]
depiction = self.add_augmentations(depiction)
self.from_fingerprint, self.active_fingerprint, self.active_scheme = (
False,
False,
False,
)
return depiction
def depict_save_from_fingerprint(
self,
smiles: str,
fingerprints: List[np.array],
schemes: List[Dict],
output_dir: str,
filename: str,
shape: Tuple[int, int] = (299, 299),
seed: int = 42,
) -> None:
"""
This function takes a SMILES representation of a molecule, a list
of one or two fingerprints and a list of the corresponding fingerprint
schemes, generates a chemical structure depiction that fits the
fingerprint and saves the resulting image at a given path.
___
If only one fingerprint/scheme is given, we assume that they contain
information for a depiction without augmentations. If two are given,
we assume that the first one contains information about the depiction
and the second one contains information about the augmentations.
___
All this function does is set the class attributes in a manner that
random_choice() knows to not to actually pick parameters randomly.
Args:
smiles (str): SMILES representation of molecule
fingerprints (List[np.array]): List of one or two fingerprints
schemes (List[Dict]): List of one or two fingerprint schemes
output_dir (str): output directory
filename (str): filename
shape (Tuple[int,int]): output image shape Defaults to (299,299).
seed (int): Seed for remaining random decisions
Returns:
np.array: Chemical structure depiction
"""
# Generate chemical structure depiction
image = self.depict_from_fingerprint(
smiles, fingerprints, schemes, shape, seed)
# Save at given_path:
output_file_path = os.path.join(output_dir, filename + ".png")
sk_io.imsave(output_file_path, img_as_ubyte(image))
def batch_depict_save_with_fingerprints(
self,
smiles_list: List[str],
images_per_structure: int,
output_dir: str,
ID_list: List[str],
indigo_proportion: float = 0.15,
rdkit_proportion: float = 0.3,
cdk_proportion: float = 0.55,
aug_proportion: float = 0.5,
shape: Tuple[int, int] = (299, 299),
processes: int = 4,
seed: int = 42,
) -> None:
"""
Batch generation of chemical structure depictions with usage
of fingerprints. This takes longer than the procedure with
batch_depict_save but the diversity of the depictions and
augmentations is ensured. The images are saved in the given
output_directory
Args:
smiles_list (List[str]): List of SMILES str
images_per_structure (int): Amount of images to create per SMILES
output_dir (str): Output directory
ID_list (List[str]): IDs (len: smiles_list * images_per_structure)
indigo_proportion (float): Indigo proportion. Defaults to 0.15.
rdkit_proportion (float): RDKit proportion. Defaults to 0.3.
cdk_proportion (float): CDK proportion. Defaults to 0.55.
aug_proportion (float): Augmentation proportion. Defaults to 0.5.
shape (Tuple[int, int]): [description]. Defaults to (299, 299).
processes (int, optional): Number of threads. Defaults to 4.
"""
# Duplicate elements in smiles_list images_per_structure times
smiles_list = [
smi for smi in smiles_list for _ in range(images_per_structure)]
# Generate corresponding amount of fingerprints
dataset_size = len(smiles_list)
FR = DepictionFeatureRanges()
fingerprint_tuples = FR.generate_fingerprints_for_dataset(
dataset_size,
indigo_proportion,
rdkit_proportion,
cdk_proportion,
aug_proportion,
)
starmap_tuple_generator = (
(
smiles_list[n],
fingerprint_tuples[n],
[
FR.FP_length_scheme_dict[len(element)]
for element in fingerprint_tuples[n]
],
output_dir,
ID_list[n],
shape,
n * 100 * seed,
)
for n in range(len(fingerprint_tuples))
)
with get_context("spawn").Pool(processes) as p:
p.starmap(
self.depict_save_from_fingerprint,
starmap_tuple_generator)
return None
def batch_depict_with_fingerprints(
self,
smiles_list: List[str],
images_per_structure: int,
indigo_proportion: float = 0.15,
rdkit_proportion: float = 0.3,
cdk_proportion: float = 0.55,
aug_proportion: float = 0.5,
shape: Tuple[int, int] = (299, 299),
processes: int = 4,
seed: int = 42,
) -> None:
"""
Batch generation of chemical structure depictions with usage
of fingerprints. This takes longer than the procedure with
batch_depict_save but the diversity of the depictions and
augmentations is ensured. The images are saved in the given
output_directory
Args:
smiles_list (List[str]): List of SMILES str
images_per_structure (int): Amount of images to create per SMILES
output_dir (str): Output directory
ID_list (List[str]): IDs (len: smiles_list * images_per_structure)
indigo_proportion (float): Indigo proportion. Defaults to 0.15.
rdkit_proportion (float): RDKit proportion. Defaults to 0.3.
cdk_proportion (float): CDK proportion. Defaults to 0.55.
aug_proportion (float): Augmentation proportion. Defaults to 0.5.
shape (Tuple[int, int]): [description]. Defaults to (299, 299).
processes (int, optional): Number of threads. Defaults to 4.
"""
# Duplicate elements in smiles_list images_per_structure times
smiles_list = [
smi for smi in smiles_list for _ in range(images_per_structure)]
# Generate corresponding amount of fingerprints
dataset_size = len(smiles_list)
FR = DepictionFeatureRanges()
fingerprint_tuples = FR.generate_fingerprints_for_dataset(
dataset_size,
indigo_proportion,
rdkit_proportion,
cdk_proportion,
aug_proportion,
)
starmap_tuple_generator = (
(
smiles_list[n],
fingerprint_tuples[n],
[
FR.FP_length_scheme_dict[len(element)]
for element in fingerprint_tuples[n]
],
shape,
n * 100 * seed,
)
for n in range(len(fingerprint_tuples))
)
with get_context("spawn").Pool(processes) as p:
depictions = p.starmap(
self.depict_from_fingerprint, starmap_tuple_generator
)
return list(depictions)
class DepictionFeatureRanges(RandomDepictor):
"""Class for depiction feature fingerprint generation"""
def __init__(self):
super().__init__()
# Fill ranges. By simply using all the depiction and augmentation
# functions, the available features are saved by the overwritten
# random_choice function. We just have to make sure to run through
# every available decision once to get all the information about the
# feature space that we need.
smiles = "CN1C=NC2=C1C(=O)N(C(=O)N2C)C"
# Call every depiction function
depiction = self(smiles)
depiction = self.depict_and_resize_cdk(smiles)
depiction = self.depict_and_resize_rdkit(smiles)
depiction = self.depict_and_resize_indigo(smiles)
# Call augmentation function
depiction = self.add_augmentations(depiction)
# Generate schemes for Fingerprint creation
self.schemes = self.generate_fingerprint_schemes()
(
self.CDK_scheme,
self.RDKit_scheme,
self.Indigo_scheme,
self.augmentation_scheme,
) = self.schemes
# Generate the pool of all valid fingerprint combinations
self.generate_all_possible_fingerprints()
self.FP_length_scheme_dict = {
len(self.CDK_fingerprints[0]): self.CDK_scheme,
len(self.RDKit_fingerprints[0]): self.RDKit_scheme,
len(self.Indigo_fingerprints[0]): self.Indigo_scheme,
len(self.augmentation_fingerprints[0]): self.augmentation_scheme,
}
def random_choice(self, iterable: List, log_attribute: str = False) -> Any:
"""
In RandomDepictor, this function would take an iterable, call
random_choice() on it, increase the seed attribute by 1 and return
the result.
___
Here, this function is overwritten, so that it also sets the class
attribute $log_attribute_range to contain the iterable.
This way, a DepictionFeatureRanges object can easily be filled with
all the iterables that define the complete depiction feature space.
___
Args:
iterable (List): iterable to pick from
log_attribute (str, optional): ID for fingerprint.
Defaults to False.
Returns:
Any: "Randomly" picked element
"""
if log_attribute:
setattr(self, "{}_range".format(log_attribute), iterable)
self.seed += 1
random.seed(self.seed)
result = random.choice(iterable)
# Add result(s) to augmentation_logger
if log_attribute and self.depiction_features:
found_logged_attribute = getattr(
self.augmentation_logger, log_attribute)
# If the attribute is not saved in a list, simply write it,
# otherwise append it
if not isinstance(found_logged_attribute, list):
setattr(self.depiction_features, log_attribute, result)
else:
setattr(
self.depiction_features,
log_attribute,
found_logged_attribute + [result],
)
return result
def generate_fingerprint_schemes(self) -> List[Dict]:
"""
Generates fingerprint schemes (see generate_fingerprint_scheme())
for the depictions with CDK, RDKit and Indigo as well as the
augmentations.
___
Returns:
List[Dict]: [cdk_scheme: Dict, rdkit_scheme: Dict,
indigo_scheme: Dict, augmentation_scheme: Dict]
"""
fingerprint_schemes = []
range_IDs = [att for att in dir(self) if "range" in att]
# Generate fingerprint scheme for our cdk, indigo and rdkit depictions
depiction_toolkits = ["cdk", "rdkit", "indigo", ""]
for toolkit in depiction_toolkits:
toolkit_range_IDs = [att for att in range_IDs if toolkit in att]
# Delete toolkit-specific ranges
# (The last time this loop runs, only augmentation-related ranges
# are left)
for ID in toolkit_range_IDs:
range_IDs.remove(ID)
toolkit_range_dict = {
attr[:-6]: list(set(getattr(self, attr))) for attr in toolkit_range_IDs
}
fingerprint_scheme = self.generate_fingerprint_scheme(
toolkit_range_dict)
fingerprint_schemes.append(fingerprint_scheme)
return fingerprint_schemes
def generate_fingerprint_scheme(self, ID_range_map: Dict) -> Dict:
"""
This function takes the ID_range_map and returns a dictionary that
defines where each feature is represented in the depiction feature
fingerprint.
___
Example:
>> example_ID_range_map = {'thickness': [0, 1, 2, 3],
'kekulized': [True, False]}
>> generate_fingerprint_scheme(example_ID_range_map)
>>>> {'thickness': [{'position': 0, 'one_if': 0},
{'position': 1, 'one_if': 1},
{'position': 2, 'one_if': 2},
{'position': 3, 'one_if': 3}],
'kekulized': [{'position': 4, 'one_if': True}]}
Args:
ID_range_map (Dict): dict that maps an ID (str) of a feature range
to the feature range itself (iterable)
Returns:
Dict: Map of feature ID (str) and dictionaries that define the
fingerprint position and a condition
"""
fingerprint_scheme = {}
position = 0
for feature_ID in ID_range_map.keys():
feature_range = ID_range_map[feature_ID]
# Make sure numeric ranges don't take up more than 5 positions
# in the fingerprint
if (
type(feature_range[0]) in [int, float, np.float64, np.float32] and
len(feature_range) > 5
):
subranges = self.split_into_n_sublists(feature_range, n=3)
position_dicts = []
for subrange in subranges:
subrange_minmax = (min(subrange), max(subrange))
position_dict = {
"position": position,
"one_if": subrange_minmax}
position_dicts.append(position_dict)
position += 1
fingerprint_scheme[feature_ID] = position_dicts
# Bools take up only one position in the fingerprint
elif isinstance(feature_range[0], bool):
assert len(feature_range) == 2
position_dicts = [{"position": position, "one_if": True}]
position += 1
fingerprint_scheme[feature_ID] = position_dicts
else:
# For other types of categorical data: Each category gets one
# position in the FP
position_dicts = []
for feature in feature_range:
position_dict = {"position": position, "one_if": feature}
position_dicts.append(position_dict)
position += 1
fingerprint_scheme[feature_ID] = position_dicts
return fingerprint_scheme
def split_into_n_sublists(self, iterable, n: int) -> List[List]:
"""
Takes an iterable, sorts it, splits it evenly into n lists
and returns the split lists.
Args:
iterable ([type]): Iterable that is supposed to be split
n (int): Amount of sublists to return
Returns:
List[List]: Split list
"""
iterable = sorted(iterable)
iter_len = len(iterable)
sublists = []
for i in range(0, iter_len, int(np.ceil(iter_len / n))):
sublists.append(iterable[i: i + int(np.ceil(iter_len / n))])
return sublists
def get_number_of_possible_fingerprints(self, scheme: Dict) -> int:
"""
This function takes a fingerprint scheme (Dict) as returned by
generate_fingerprint_scheme()
and returns the number of possible fingerprints for that scheme.
Args:
scheme (Dict): Output of generate_fingerprint_scheme()
Returns:
int: Number of possible fingerprints
"""
comb_count = 1
for feature_key in scheme.keys():
if len(scheme[feature_key]) != 1:
# n fingerprint positions -> n options
# (because only one position can be [1])
# n = 3 --> [1][0][0] or [0][1][0] or [0][0][1]
comb_count *= len(scheme[feature_key])
else:
# One fingerprint position -> two options: [0] or [1]
comb_count *= 2
return comb_count
def get_FP_building_blocks(self, scheme: Dict) -> List[List[List]]:
"""
This function takes a fingerprint scheme (Dict) as returned by
generate_fingerprint_scheme()
and returns a list of possible building blocks.
Example:
scheme = {'thickness': [{'position': 0, 'one_if': 0},
{'position': 1, 'one_if': 1},
{'position': 2, 'one_if': 2},
{'position': 3, 'one_if': 3}],
'kekulized': [{'position': 4, 'one_if': True}]}
--> Output: [[[1,0,0,0], [0,1,0,0], [0,0,1,0], [0,0,0,1]],
[[1], [0]]]
Args:
scheme (Dict): Output of generate_fingerprint_scheme()
Returns:
List that contains the valid fingerprint parts that represent the
different features
"""
FP_building_blocks = []
for feature_key in scheme.keys():
position_condition_dicts = scheme[feature_key]
FP_building_blocks.append([])
# Add every single valid option to the building block
for position_index in range(len(position_condition_dicts)):
# Add list of zeros
FP_building_blocks[-1].append([0] *
len(position_condition_dicts))
# Replace one zero with a one
FP_building_blocks[-1][-1][position_index] = 1
# If a feature is described by only one position in the FP,
# make sure that 0 and 1 are listed options
if FP_building_blocks[-1] == [[1]]:
FP_building_blocks[-1].append([0])
return FP_building_blocks
def flatten_fingerprint(
self,
unflattened_list: List[List],
) -> List:
"""
This function takes a list of lists and returns a list.
___
Looks like this could be one line elsewhere but this function used for
parallelisation of FP generation and consequently needs to be wrapped
up in a separate function.
Args:
unflattened_list (List[List[X,Y,Z]])
Returns:
flattened_list (List[X,Y,Z]):
"""
flattened_list = [
element for sublist in unflattened_list for element in sublist
]
return flattened_list
def generate_all_possible_fingerprints_per_scheme(
self,
scheme: Dict,
) -> List[List[int]]:
"""
This function takes a fingerprint scheme (Dict) as returned by
generate_fingerprint_scheme()
and returns a List of all possible fingerprints for that scheme.
Args:
scheme (Dict): Output of generate_fingerprint_scheme()
name (str): name that is used for filename of saved FPs
Returns:
List[List[int]]: List of fingerprints
"""
# Determine valid building blocks for fingerprints
FP_building_blocks = self.get_FP_building_blocks(scheme)
# Determine cartesian product of valid building blocks to get all
# valid fingerprints
FP_generator = product(*FP_building_blocks)
flattened_fingerprints = list(
map(self.flatten_fingerprint, FP_generator))
return flattened_fingerprints
def generate_all_possible_fingerprints(self) -> None:
"""
This function generates all possible valid fingerprint combinations
for the four available fingerprint schemes if they have not been
created already. Otherwise, they are loaded from files.
This function returns None but saves the fingerprint pools as a
class attribute $ID_fingerprints
"""
exists_already = False
FP_names = ["CDK", "RDKit", "Indigo", "augmentation"]
for scheme_index in range(len(self.schemes)):
n_FP = self.get_number_of_possible_fingerprints(
self.schemes[scheme_index])
# Load fingerprint pool from file (if it exists)
FP_filename = "{}_fingerprints.npz".format(FP_names[scheme_index])
FP_file_path = self.HERE.joinpath(FP_filename)
if os.path.exists(FP_file_path):
fps = np.load(FP_file_path)["arr_0"]
if len(fps) == n_FP:
exists_already = True
# Otherwise, generate the fingerprint pool
if not exists_already:
print("No saved fingerprints found. This may take a minute.")
fps = self.generate_all_possible_fingerprints_per_scheme(
self.schemes[scheme_index]
)
np.savez_compressed(FP_file_path, fps)
print(
"{} fingerprints were saved in {}.".format(
FP_names[scheme_index], FP_file_path
)
)
setattr(
self, "{}_fingerprints".format(
FP_names[scheme_index]), fps)
return
def convert_to_int_arr(
self, fingerprints: List[List[int]]
) -> List[DataStructs.cDataStructs.ExplicitBitVect]:
"""
Takes a list of fingerprints (List[int]) and returns them as a list of
rdkit.DataStructs.cDataStructs.ExplicitBitVect so that they can be
processed by RDKit's MaxMinPicker.
Args:
fingerprints (List[List[int]]): List of fingerprints
Returns:
List[DataStructs.cDataStructs.ExplicitBitVect]: Converted arrays
"""
converted_fingerprints = []
for fp in fingerprints:
bitstring = "".join(np.array(fp).astype(str))
fp_converted = DataStructs.cDataStructs.CreateFromBitString(
bitstring)
converted_fingerprints.append(fp_converted)
return converted_fingerprints
def pick_fingerprints(
self,
fingerprints: List[List[int]],
n: int,
) -> np.array:
"""
Given a list of fingerprints and a number n of fingerprints to pick,
this function uses RDKit's MaxMin Picker to pick n fingerprints and
returns them.
Args:
fingerprints (List[List[int]]): List of fingerprints
n (int): Number of fingerprints to pick
Returns:
np.array: Picked fingerprints
"""
converted_fingerprints = self.convert_to_int_arr(fingerprints)
"""TODO: I don't like this function definition in the function but
according to the RDKit Documentation, the fingerprints need to be
given in the distance function as the default value."""
def dice_dist(
fp_index_1: int,
fp_index_2: int,
fingerprints: List[
DataStructs.cDataStructs.ExplicitBitVect
] = converted_fingerprints,
) -> float:
"""
Returns the dice similarity between two fingerprints.
Args:
fp_index_1 (int): index of first fingerprint in fingerprints
fp_index_2 (int): index of second fingerprint in fingerprints
fingerprints (List[cDataStructs.ExplicitBitVect]): fingerprints
Returns:
float: Dice similarity between the two fingerprints
"""
return 1 - DataStructs.DiceSimilarity(
fingerprints[fp_index_1], fingerprints[fp_index_2]
)
n_fingerprints = len(fingerprints)
# If we want to pick more fingerprints than there are in the pool,
# simply distribute the complete pool as often as possible and pick
# the amount that is not dividable by the size of the pool
if n > n_fingerprints:
oversize_factor = int(n / n_fingerprints)
picked_fingerprints = fingerprints * oversize_factor
n = n - n_fingerprints * oversize_factor
else:
picked_fingerprints = False
picker = MaxMinPicker()
pick_indices = picker.LazyPick(dice_dist, n_fingerprints, n, seed=42)
if isinstance(picked_fingerprints, bool):
picked_fingerprints = np.array([fingerprints[i] for i in pick_indices])
else:
picked_fingerprints = np.concatenate((picked_fingerprints,
np.array([fingerprints[i]
for i in pick_indices])))
return picked_fingerprints
def generate_fingerprints_for_dataset(
self,
size: int,
indigo_proportion: float = 0.15,
rdkit_proportion: float = 0.3,
cdk_proportion: float = 0.55,
aug_proportion: float = 0.5,
) -> List[List[int]]:
"""Given a dataset size (int) and (optional) proportions for the
different types of fingerprints, this function returns
Args:
size (int): Desired dataset size, number of returned fingerprints
indigo_proportion (float): Indigo proportion. Defaults to 0.15.
rdkit_proportion (float): RDKit proportion. Defaults to 0.3.
cdk_proportion (float): CDK proportion. Defaults to 0.55.
aug_proportion (float): Augmentation proportion. Defaults to 0.5.
Raises:
ValueError:
- If the sum of Indigo, RDKit and CDK proportions is not 1
- If the augmentation proportion is > 1
Returns:
List[List[int]]: List of lists containing the fingerprints.
___
Depending on augmentation_proportion, the depiction fingerprints
are paired with augmentation fingerprints or not.
Example output:
[[$some_depiction_fingerprint, $some augmentation_fingerprint],
[$another_depiction_fingerprint]
[$yet_another_depiction_fingerprint]]
"""
# Make sure that the given proportion arguments make sense
if sum((indigo_proportion, rdkit_proportion, cdk_proportion)) != 1:
raise ValueError(
"Sum of Indigo, CDK and RDKit proportion arguments has to be 1"
)
if aug_proportion > 1:
raise ValueError(
"The proportion of augmentation fingerprints can't be > 1."
)
# Pick and return diverse fingerprints
picked_Indigo_fingerprints = self.pick_fingerprints(
self.Indigo_fingerprints, int(size * indigo_proportion)
)
picked_RDKit_fingerprints = self.pick_fingerprints(
self.RDKit_fingerprints, int(size * rdkit_proportion)
)
picked_CDK_fingerprints = self.pick_fingerprints(
self.CDK_fingerprints, int(size * cdk_proportion)
)
picked_augmentation_fingerprints = self.pick_fingerprints(
self.augmentation_fingerprints, int(size * aug_proportion)
)
# Distribute augmentation_fingerprints over depiction fingerprints
fingerprint_tuples = self.distribute_elements_evenly(
picked_augmentation_fingerprints,
picked_Indigo_fingerprints,
picked_RDKit_fingerprints,
picked_CDK_fingerprints,
)
# Shuffle fingerprint tuples randomly to avoid the same smiles
# always being depicted with the same cheminformatics toolkit
random.seed(self.seed)
random.shuffle(fingerprint_tuples)
return fingerprint_tuples
def distribute_elements_evenly(
self, elements_to_be_distributed: List[Any], *args: List[Any]
) -> List[List[Any]]:
"""
This function distributes the elements from elements_to_be_distributed
evenly over the lists of elements given in args. It can be used to link
augmentation fingerprints to given lists of depiction fingerprints.
Example:
distribute_elements_evenly(["A", "B", "C", "D"], [1, 2, 3], [4, 5, 6])
Output: [[1, "A"], [2, "B"], [3], [4, "C"], [5, "D"], [6]]
--> see test_distribute_elements_evenly() in ../Tests/test_functions.py
Args:
elements_to_be_distributed (List[Any]): elements to be distributed
args: Every arg is a list of elements (B)
Returns:
List[List[Any]]: List of Lists (B, A) where the elements A are
distributed evenly over the elements B according
to the length of the list of elements B
"""
# Make sure that the input is valid
args_total_len = len(
[element for sublist in args for element in sublist])
if len(elements_to_be_distributed) > args_total_len:
raise ValueError(
"Can't take more elements to be distributed than in args.")
output = []
start_index = 0
for element_list in args:
# Define part of elements_to_be_distributed that belongs to this
# element_sublist
sublist_len = len(element_list)
end_index = start_index + int(
sublist_len / args_total_len * len(elements_to_be_distributed)
)
select_elements_to_be_distributed = elements_to_be_distributed[
start_index:end_index
]
for element_index in range(len(element_list)):
if element_index < len(select_elements_to_be_distributed):
output.append(
[
element_list[element_index],
select_elements_to_be_distributed[element_index],
]
)
else:
output.append([element_list[element_index]])
start_index = start_index + int(
sublist_len / args_total_len * len(elements_to_be_distributed)
)
return output
class DepictionFeatures:
"""
A DepictionFeatures objects simply holds all depiction parameters
of a chemical structure depiction generated with RanDepict
"""
def __init__(self):
pass
| StarcoderdataPython |
3326976 | <filename>SRT/reservation.py
from .constants import *
class SRTTicket:
SEAT_TYPE = {"1": "일반실", "2": "특실"}
PASSENGER_TYPE = {
"1": "어른/청소년",
"2": "장애 1~3급",
"3": "장애 4~6급",
"4": "경로",
"5": "어린이",
}
def __init__(self, data):
self.car = data["scarNo"]
self.seat = data["seatNo"]
self.seat_type_code = data["psrmClCd"]
self.seat_type = self.SEAT_TYPE[self.seat_type_code]
self.passenger_type_code = data["psgTpCd"]
self.passenger_type = self.PASSENGER_TYPE[self.passenger_type_code]
self.price = int(data["rcvdAmt"])
self.original_price = int(data["stdrPrc"])
self.discount = int(data["dcntPrc"])
def __str__(self):
return self.dump()
def __repr__(self):
return self.dump()
def dump(self):
d = (
"{car}호차 {seat} ({seat_type}) {passenger_type} "
"[{price}원({discount}원 할인)]"
).format(
car=self.car,
seat=self.seat,
seat_type=self.seat_type,
passenger_type=self.passenger_type,
price=self.price,
discount=self.discount,
)
return d
class SRTReservation:
def __init__(self, train, pay, tickets):
self.reservation_number = train["pnrNo"]
self.total_cost = train["rcvdAmt"]
self.seat_count = train["tkSpecNum"]
self.train_code = pay["stlbTrnClsfCd"]
self.train_name = TRAIN_NAME[self.train_code]
self.train_number = pay["trnNo"]
self.dep_date = pay["dptDt"]
self.dep_time = pay["dptTm"]
self.dep_station_code = pay["dptRsStnCd"]
self.dep_station_name = STATION_NAME[self.dep_station_code]
self.arr_time = pay["arvTm"]
self.arr_station_code = pay["arvRsStnCd"]
self.arr_station_name = STATION_NAME[self.arr_station_code]
self.payment_date = pay["iseLmtDt"]
self.payment_time = pay["iseLmtTm"]
self._tickets = tickets
def __str__(self):
return self.dump()
def __repr__(self):
return self.dump()
def dump(self):
d = (
"[{name}] "
"{month}월 {day}일, "
"{dep}~{arr}"
"({dep_hour}:{dep_min}~{arr_hour}:{arr_min}) "
"{cost}원({seats}석), "
"구입기한 {pay_month}월 {pay_day}일 {pay_hour}:{pay_min}"
).format(
name=self.train_name,
month=self.dep_date[4:6],
day=self.dep_date[6:8],
dep=self.dep_station_name,
arr=self.arr_station_name,
dep_hour=self.dep_time[0:2],
dep_min=self.dep_time[2:4],
arr_hour=self.arr_time[0:2],
arr_min=self.arr_time[2:4],
cost=self.total_cost,
seats=self.seat_count,
pay_month=self.payment_date[4:6],
pay_day=self.payment_date[6:8],
pay_hour=self.payment_time[0:2],
pay_min=self.payment_time[2:4],
)
return d
@property
def tickets(self):
return self._tickets
| StarcoderdataPython |
4814702 | <gh_stars>0
#!/usr/bin/env python
import math
n = 325489
square = n
while True:
root = math.sqrt(square)
if root%1 == 0 and root%2 == 1:
break
square = square + 1
diff = (square - n)%root
middle = root//2
ans = abs(diff - middle) + (root//2)
| StarcoderdataPython |
111044 | <gh_stars>1-10
"""
Tools for working with files and file systems
"""
import os
import re
import itertools
import functools
import calendar
import contextlib
import logging
import datetime
import glob
import tempfile
import platform
import ctypes
import importlib
import pathlib
from typing import Dict, Union
from singledispatch import singledispatch
log = logging.getLogger(__name__)
def get_unique_pathname(path, root=''):
"""Return a pathname possibly with a number appended to it so that it is
unique in the directory."""
path = os.path.join(root, path)
# consider the path supplied, then the paths with numbers appended
potentialPaths = itertools.chain((path,), __get_numbered_paths(path))
potentialPaths = itertools.filterfalse(os.path.exists, potentialPaths)
return next(potentialPaths)
def __get_numbered_paths(filepath):
"""Append numbers in sequential order to the filename or folder name
Numbers should be appended before the extension on a filename."""
format = '%s (%%d)%s' % splitext_files_only(filepath)
return map(lambda n: format % n, itertools.count(1))
def splitext_files_only(filepath):
"Custom version of splitext that doesn't perform splitext on directories"
return (filepath, '') if os.path.isdir(filepath) else os.path.splitext(filepath)
def set_time(filename, mod_time):
"""
Set the modified time of a file
"""
log.debug('Setting modified time to %s', mod_time)
mtime = calendar.timegm(mod_time.utctimetuple())
# utctimetuple discards microseconds, so restore it (for consistency)
mtime += mod_time.microsecond / 1000000
atime = os.stat(filename).st_atime
os.utime(filename, (atime, mtime))
def get_time(filename):
"""
Get the modified time for a file as a datetime instance
"""
ts = os.stat(filename).st_mtime
return datetime.datetime.utcfromtimestamp(ts)
def insert_before_extension(filename, content):
"""
Given a filename and some content, insert the content just before
the extension.
>>> insert_before_extension('pages.pdf', '-old')
'pages-old.pdf'
"""
parts = list(os.path.splitext(filename))
parts[1:1] = [content]
return ''.join(parts)
class DirectoryStack(list):
r"""
...
DirectoryStack includes a context manager function that can be used
to easily perform an operation in a separate directory.
>>> orig_dir = os.getcwd()
>>> stack = DirectoryStack()
>>> with stack.context('/'): context_dir = os.getcwd()
>>> orig_dir == os.getcwd()
True
>>> orig_dir == context_dir
False
>>> len(stack)
0
>>> stack.pushd('/')
>>> len(stack)
1
>>> os.getcwd() == os.path.abspath('/')
True
>>> last_dir = stack.popd()
>>> last_dir == context_dir
True
>>> os.getcwd() == orig_dir
True
"""
def pushd(self, new_dir):
self.append(os.getcwd())
os.chdir(new_dir)
def popd(self):
res = os.getcwd()
os.chdir(self.pop())
return res
@contextlib.contextmanager
def context(self, new_dir):
self.pushd(new_dir)
try:
yield
finally:
self.popd()
def recursive_glob(root, spec):
"""
Like iglob, but recurse directories
>>> any('path.py' in result for result in recursive_glob('.', '*.py'))
True
>>> all(result.startswith('.') for result in recursive_glob('.', '*.py'))
True
>>> len(list(recursive_glob('.', '*.foo')))
0
"""
specs = (
os.path.join(dirpath, dirname, spec)
for dirpath, dirnames, filenames in os.walk(root)
for dirname in dirnames
)
return itertools.chain.from_iterable(glob.iglob(spec) for spec in specs)
def encode(name, system='NTFS'):
"""
Encode the name for a suitable name in the given filesystem
>>> encode('Test :1')
'Test _1'
"""
assert system == 'NTFS', 'unsupported filesystem'
special_characters = r'<>:"/\|?*' + ''.join(map(chr, range(32)))
pattern = '|'.join(map(re.escape, special_characters))
pattern = re.compile(pattern)
return pattern.sub('_', name)
class save_to_file:
"""
A context manager for saving some content to a file, and then
cleaning up the file afterward.
>>> with save_to_file(b'foo') as filename:
... assert 'foo' == open(filename).read()
"""
def __init__(self, content):
self.content = content
def __enter__(self):
tf = tempfile.NamedTemporaryFile(delete=False)
tf.write(self.content)
tf.close()
self.filename = tf.name
return tf.name
def __exit__(self, type, value, traceback):
os.remove(self.filename)
@contextlib.contextmanager
def tempfile_context(*args, **kwargs):
"""
A wrapper around tempfile.mkstemp to create the file in a context and
delete it after.
"""
fd, filename = tempfile.mkstemp(*args, **kwargs)
os.close(fd)
try:
yield filename
finally:
os.remove(filename)
def replace_extension(new_ext, filename):
"""
>>> replace_extension('.pdf', 'myfile.doc')
'myfile.pdf'
"""
return os.path.splitext(filename)[0] + new_ext
def ExtensionReplacer(new_ext):
"""
A reusable function to replace a file's extension with another
>>> repl = ExtensionReplacer('.pdf')
>>> repl('myfile.doc')
'myfile.pdf'
>>> repl('myfile.txt')
'myfile.pdf'
>>> repl('myfile')
'myfile.pdf'
"""
return functools.partial(replace_extension, new_ext)
def ensure_dir_exists(func):
"wrap a function that returns a dir, making sure it exists"
@functools.wraps(func)
def make_if_not_present():
dir = func()
if not os.path.isdir(dir):
os.makedirs(dir)
return dir
return make_if_not_present
def read_chunks(file, chunk_size=2048, update_func=lambda x: None):
"""
Read file in chunks of size chunk_size (or smaller).
If update_func is specified, call it on every chunk with the amount
read.
"""
while True:
res = file.read(chunk_size)
if not res:
break
update_func(len(res))
yield res
def is_hidden(path):
"""
Check whether a file is presumed hidden, either because
the pathname starts with dot or because the platform
indicates such.
>>> is_hidden('.')
False
"""
full_path = os.path.abspath(path)
name = os.path.basename(full_path)
def no(path):
return False
platform_hidden = globals().get('is_hidden_' + platform.system(), no)
return name.startswith('.') or platform_hidden(full_path)
def is_hidden_Windows(path):
res = ctypes.windll.kernel32.GetFileAttributesW(path)
assert res != -1
return bool(res & 2)
def is_hidden_Darwin(path):
Foundation = importlib.import_module('Foundation')
url = Foundation.NSURL.fileURLWithPath_(path)
res = url.getResourceValue_forKey_error_(None, Foundation.NSURLIsHiddenKey, None)
return res[1]
FilesSpec = Dict[str, Union[str, bytes, 'FilesSpec']] # type: ignore
def build(spec: FilesSpec, prefix=pathlib.Path()):
"""
Build a set of files/directories, as described by the spec.
Each key represents a pathname, and the value represents
the content. Content may be a nested directory.
>>> spec = {
... 'README.txt': "A README file",
... "foo": {
... "__init__.py": "",
... "bar": {
... "__init__.py": "",
... },
... "baz.py": "# Some code",
... }
... }
>>> tmpdir = getfixture('tmpdir')
>>> build(spec, tmpdir)
"""
for name, contents in spec.items():
create(contents, pathlib.Path(prefix) / name)
@singledispatch
def create(content: dict, path):
path.mkdir(exist_ok=True)
build(content, prefix=path)
@create.register
def _(content: bytes, path):
path.write_bytes(content)
@create.register # type: ignore[no-redef]
def _(content: str, path):
path.write_text(content)
| StarcoderdataPython |
4807547 | <filename>tests/bot/test_handle_message.py
"""Test handle_message."""
import pytest
from phial import Message, Phial
from phial.errors import ArgumentValidationError
def test_handle_message_handles_none_correctly() -> None:
"""Test handle_message handle None correctly."""
def command() -> None:
raise Exception("Should not be called")
def middleware(message: Message) -> None:
raise Exception("Should not be called")
bot = Phial("token")
bot.add_command("test", command)
bot.add_middleware(middleware)
bot._handle_message(None)
def test_message_passed_to_middleware() -> None:
"""Test handle_message passes to middleware."""
def command() -> None:
raise Exception("Should not be called")
middleware_calls = [0]
def middleware(message: Message) -> None:
middleware_calls[0] += 1
bot = Phial("token")
bot.add_command("test", command)
bot.add_middleware(middleware)
message = Message("text", "channel", "user", "timestamp", "team")
bot._handle_message(message)
assert middleware_calls[0] == 1
def test_message_ignored_if_no_prefix() -> None:
"""Test message is ignored if it has no prefix."""
middleware_calls = [0]
command_calls = [0]
def command() -> None:
command_calls[0] += 1
def middleware(message: Message) -> Message:
middleware_calls[0] += 1
return message
bot = Phial("token")
bot.add_command("test", command)
bot.add_middleware(middleware)
message = Message("text", "channel", "user", "timestamp", "team")
bot._handle_message(message)
assert middleware_calls[0] == 1
assert command_calls[0] == 0
def test_message_calls_command_correctly() -> None:
"""Test message invokes a command correctly."""
middleware_calls = [0]
command_calls = [0]
def command() -> None:
command_calls[0] += 1
def middleware(message: Message) -> Message:
middleware_calls[0] += 1
return message
bot = Phial("token")
bot.add_command("test", command)
bot.add_middleware(middleware)
message = Message("!test", "channel", "user", "timestamp", "team")
bot._handle_message(message)
assert middleware_calls[0] == 1
assert command_calls[0] == 1
def test_message_calls_command_correctly_when_no_prefix() -> None:
"""Test message invokes a command correctly with no prefix."""
middleware_calls = [0]
command_calls = [0]
def command() -> None:
command_calls[0] += 1
def middleware(message: Message) -> Message:
middleware_calls[0] += 1
return message
bot = Phial("token", {"prefix": ""})
bot.add_command("test", command)
bot.add_middleware(middleware)
message = Message("test", "channel", "user", "timestamp", "team")
bot._handle_message(message)
assert middleware_calls[0] == 1
assert command_calls[0] == 1
def test_message_falls_back_correctly() -> None:
"""Test message hits fallback command correctly."""
middleware_calls = [0]
command_calls = [0]
fallback_calls = [0]
def command() -> None:
command_calls[0] += 1
def middleware(message: Message) -> Message:
middleware_calls[0] += 1
return message
def fallback(message: Message) -> None:
fallback_calls[0] += 1
bot = Phial("token")
bot.add_command("test", command)
bot.add_middleware(middleware)
bot.add_fallback_command(fallback)
message = Message("!test-fallback", "channel", "user", "timestamp", "team")
bot._handle_message(message)
assert middleware_calls[0] == 1
assert command_calls[0] == 0
assert fallback_calls[0] == 1
def test_argument_validation_works_correctly() -> None:
"""Test argument validation works correctly."""
command_calls = [0]
def command(name: str) -> None:
command_calls[0] += 1
bot = Phial("token")
bot.add_command("test", command)
message = Message("!test", "channel", "user", "timestamp", "team")
with pytest.raises(ArgumentValidationError):
bot._handle_message(message)
assert command_calls[0] == 0
def test_type_validation_works_correctly() -> None:
"""Test type validation works correctly."""
command_calls = [0]
def command(age: int) -> None:
command_calls[0] += 1
bot = Phial("token")
bot.add_command("test <age>", command)
message = Message("!test string", "channel", "user", "timestamp", "team")
bot._handle_message(message)
assert command_calls[0] == 0
| StarcoderdataPython |
1784413 | <filename>refla/db.py<gh_stars>0
from copy import copy
data = [
{
'id': 1,
'title': u'Discuss report with John',
'done': False
},
{
'id': 2,
'title': u'Get a haircut',
'done': True
},
{
'id': 3,
'title': u'Pay electricity bill',
'done': True
},
{
'id': 4,
'title': u'Check gym hours',
'done': False
}
]
def get_all():
return data
def get(item_id):
items = [item for item in data if item['id'] == item_id]
return items[0] if len(items) > 0 else None
def insert(item):
item = copy(item)
item['id'] = data[-1]['id'] + 1
data.append(item)
return item
def update(item_id, item):
new_item = copy(item)
new_item['id'] = item_id
globals()['data'] = map(lambda x: new_item if x['id'] == item_id else x, data)
return new_item
def delete(item_id):
data.remove(get(item_id))
| StarcoderdataPython |
1665032 | from auth.validate import validade
client = validade()
def status_update(tweet):
updateStatusRequest = client.request(
f'https://api.twitter.com/1.1/statuses/update.json?status={tweet}',
method='POST'
)
return updateStatusRequest
def home_timeline():
homeTimelineRequest = client.request(
'https://api.twitter.com/1.1/statuses/home_timeline.json'
)
return homeTimelineRequest
| StarcoderdataPython |
3309628 | <reponame>PureFunctor/amalgam-lisp
from __future__ import annotations
from contextlib import contextmanager
from typing import (
cast,
Dict,
Iterable,
Mapping,
Optional,
TYPE_CHECKING,
)
if TYPE_CHECKING: # pragma: no cover
from amalgam.amalgams import Amalgam
from amalgam.engine import Engine
Bindings = Mapping[str, Amalgam]
class TopLevelPop(Exception):
"""Raised at :meth:`Environment.env_pop`."""
class Environment:
"""
Class that manages and represents nested execution environments.
Attributes:
bindings (:class:`Dict[str, Amalgam]`): A mapping of
:class:`str` keys to :class:`.amalgams.Amalgam` values.
parent (:class:`Optional[Environment]`): The parent
:class:`Environment` instance to search into, forming a
linked list.
level (:class:`int`): The current length of the
:class:`Environment` linked list. If a
:attr:`~.Environment.parent` is provided, sets the current
value to the parent's :attr:`~.Environment.level` + 1.
search_depth (:class:`int`): The search depth when traversing
the :class:`Environment` linked list in the
:meth:`~.Environment.__contains__`,
:meth:`~.Environment.__delitem__`,
:meth:`~.Environment.__getitem__`, and
:meth:`~.Environment.__setitem__` methods.
name (:class:`str`): The name of the execution environment.
engine (:class:`Engine`): A reference to the engine managing the
:class:`.parser.Parser` instance and the global
:class:`.Environment` instance.
"""
def __init__(
self,
bindings: Bindings = None,
parent: Environment = None,
name: str = "unknown",
engine: Engine = None,
) -> None:
self.bindings: Dict[str, Amalgam] = {**bindings} if bindings else {}
self.parent: Optional[Environment] = parent
self.level: int = parent.level + 1 if parent else 0
self.search_depth: int = 0
self.name = name
self.engine = cast("Engine", engine)
@property
def search_chain(self) -> Iterable[Dict[str, Amalgam]]:
"""
Yields :attr:`bindings` of nested :class:`Environment`
instances.
"""
yield self.bindings
if self.parent is None:
return
if self.search_depth >= 0:
depth = self.search_depth
else:
depth = self.level
_self = self.parent
for _ in range(depth):
yield _self.bindings
_self = _self.parent
def __getitem__(self, item: str) -> Amalgam:
"""
Attempts to recursively obtain the provided `item`.
Searches with respect to the current :attr:`search_depth` of the
calling :class:`Environment` instance. If an existing `item`
is encountered at a certain depth less than the target depth,
returns that `item`, otherwise, raises :class:`SymbolNotFound`.
"""
for bindings in self.search_chain:
if item in bindings:
return bindings[item]
raise KeyError(item)
def __setitem__(self, item: str, value: Amalgam) -> None:
"""
Attempts to recursively set the provided `value` to an `item`.
Searches with respect to the current :attr:`search_depth` of the
calling :class:`Environment` instance. If an existing `item` is
encountered at a certain depth less than the target depth,
overrides that `item` instead.
"""
_search_chain = list(self.search_chain)
for bindings in _search_chain:
if item in bindings:
bindings[item] = value
break
else:
_search_chain[-1][item] = value
def __delitem__(self, item: str) -> None:
"""
Attempts to recursively delete the provided `item`.
Searches with respect to the current :attr:`search_depth` of the
calling :class:`Environment` instance. If an existing `item` is
encountered at a certain depth less than the target depth,
deletes that `item` instead.
"""
for bindings in self.search_chain:
if item in bindings:
del bindings[item]
break
else:
raise KeyError(item)
def __contains__(self, item: str) -> bool:
"""
Recursively checks whether an `item` exists.
Searches with respect to the current :attr:`search_depth` of the
calling :class:`Environment` instance. If the target `item` is
encountered at a certain depth less than the target depth,
immediately returns `True`, otherwise, returns `False`.
"""
for bindings in self.search_chain:
if item in bindings:
return True
return False
@contextmanager
def search_at(self, *, depth=0):
"""
Context manager for temporarily setting the lookup depth.
The provided `depth` argument must not exceed the :attr:`level`
of the calling :class:`Environment` instance, and will raise a
:class:`ValueError` if done so.
>>> env = Environment(FUNCTIONS)
>>>
>>> with env.search_at(depth=42):
... env["+"] # Raises ValueError
Any negative integer can be passed as a `depth` to signify
an infinite lookup until the top-most environment.
>>> env = Environment(FUNCTIONS)
>>> cl_env = env.env_push({...})
>>>
>>> with cl_env.search_at(depth=-1):
... cl_env["+"] # Searches `env`
"""
if depth > self.level:
exc = ValueError(
f"depth {depth} is greater than maximum level {self.level}"
)
raise exc
self.search_depth = depth
try:
yield self
finally:
self.search_depth = 0
def env_push(self, bindings: Bindings = None, name: str = None) -> Environment:
"""
Creates a new :class:`Environment` and binds the calling
instance as its parent environment.
"""
if name is None:
name = f"{self.name}-child"
return Environment(
bindings=bindings, parent=self, name=name, engine=self.engine,
)
def env_pop(self) -> Environment:
"""
Discards the current :class:`Environment` and returns the parent
:class:`Environment`.
"""
if self.parent is not None:
return self.parent
else:
raise TopLevelPop("cannot discard top-level Environment")
def __repr__(self) -> str: # pragma: no cover
return f"<Environment \"name={self.name}\" @ {hex(id(self))}>"
| StarcoderdataPython |
1773029 | """
Author: <NAME>
Date: Nov 25, 2020
Retrieve datasets from a URL, file of URLs, or GitHub directory URL
Usage: get_datasets.py -i=<input> -o=<output> [-u] [-f] [-g] [-v]
Options:
-i <input>, --input <input> Input URL
-o <output>, --output <output> The local output location to transfer the datasets
[-u] (Default) Retrieve the input file from the given URL
[-f] Retrieve the list of file URLs specified in the input file
[-g] Retrieve the entire file contents of a github directory
[-v] Report verbose output of dataset retrieval process
"""
import os
import sys
import json
import urllib.request
from docopt import docopt
args = docopt(__doc__)
def get_datasets():
# Starting dataset retrieval
print("\n\n##### get_datasets: Retrieving datasets")
if verbose: print(f"Running get_dataset with arguments: \n {args}")
assert input, "Empty input argument provided"
if not os.path.exists(output_path):
os.makedirs(output_path)
assert os.path.exists(output_path), "Invalid output path provided"
download_urls = []
if args['-g'] is True:
# If this is a github repo, construct GET api request to
# retrieve all repo directory datafiles
github_api_url = "https://api.github.com/repos/"
repo_url = args["--input"].split("/")
# These magic numbers parse out the unnecessary github url branch info
github_api_url = github_api_url + \
("/").join(repo_url[3:5] + ["contents"] + repo_url[7:])
try:
if verbose: print(f"Attempting to connect to: {github_api_url}")
response = urllib.request.urlopen(github_api_url).read()
except ConnectionError:
print(f"Failed to connect to: {github_api_url}. \nExiting")
sys.exit(os.EX_NOHOST)
if verbose: print("Connection Success!")
git_files = json.loads(response.decode('utf-8'))
for file in git_files:
download_urls.append(file["download_url"])
elif args['-f'] is True:
assert os.path.exists(input), "Input file is not valid"
input_fh = open(input, "r")
for line in input_fh:
download_urls.append(line.strip())
else:
download_urls.append(input)
# Download all files requested
for file_url in download_urls:
output_file = output_path + os.path.sep + os.path.basename(file_url)
try:
if verbose: print(f"Attempting to retrieve {file_url}")
transfer = urllib.request.urlretrieve(file_url, output_file)
if verbose: print(f"Successfully transferred to {output_file}")
except ConnectionError:
print(f"Unable to retrieve: {file_url}, continuing")
print("\n##### get_datasets: Finished retrieval")
if __name__ == "__main__":
input = args["--input"]
output_path = args["--output"]
verbose = args["-v"]
get_datasets()
| StarcoderdataPython |
4800490 | <gh_stars>1-10
from unittest.mock import Mock
from dagster import build_init_resource_context, ResourceDefinition
from hca_orchestration.contrib.data_repo.data_repo_service import DataRepoService
from hca_orchestration.resources.config.datasets import find_or_create_project_dataset
from hca_orchestration.models.hca_dataset import TdrDataset
def test_find_or_create_project_dataset_returns_existing_dataset():
data_repo_service = Mock(spec=DataRepoService)
data_repo_service.find_dataset = Mock(
return_value=TdrDataset(
dataset_name="fake_name",
dataset_id="fake_dataset_id",
project_id="fake_gcp_proj_id",
billing_profile_id="fake_billing_prof_id",
bq_location="fake_bq_location"
)
)
init_context = build_init_resource_context(
config={
"env": "dev",
"region": "US",
"policy_members": ["<EMAIL>"],
"billing_profile_id": "fake_billing_profile_id",
"qualifier": None
},
resources={
"hca_project_id": ResourceDefinition.hardcoded_resource("08BCA7FF-A15A-4D58-806B-7CD45979768B"),
"data_repo_service": ResourceDefinition.hardcoded_resource(data_repo_service),
"run_start_time": ResourceDefinition.hardcoded_resource(1639494828)
}
)
result = find_or_create_project_dataset(init_context)
data_repo_service.find_dataset.assert_called_once_with(
"hca_dev_08bca7ffa15a4d58806b7cd45979768b", None
)
assert result.dataset_id == "fake_dataset_id", "Should receive the matching dataset"
def test_find_or_create_project_dataset_creates_new_dataset():
data_repo_service = Mock(spec=DataRepoService)
data_repo_service.find_dataset = Mock(return_value=None)
init_context = build_init_resource_context(
config={
"env": "dev",
"region": "US",
"policy_members": ["<EMAIL>"],
"billing_profile_id": "fake_billing_profile_id",
"qualifier": None
},
resources={
"hca_project_id": ResourceDefinition.hardcoded_resource("08BCA7FF-A15A-4D58-806B-7CD45979768B"),
"data_repo_service": ResourceDefinition.hardcoded_resource(data_repo_service),
"run_start_time": ResourceDefinition.hardcoded_resource(1639494828)
}
)
find_or_create_project_dataset(init_context)
data_repo_service.find_dataset.assert_called_once_with(
"hca_dev_08bca7ffa15a4d58806b7cd45979768b", None
)
data_repo_service.create_dataset.assert_called_once_with(
'hca_dev_08bca7ffa15a4d58806b7cd45979768b__20211214',
'dev',
'fake_billing_profile_id',
['<EMAIL>'],
'US',
'{"hca_project_id": "08bca7ffa15a4d58806b7cd45979768b"}'
)
def test_find_or_create_project_dataset_creates_new_dataset_with_qualifier():
data_repo_service = Mock(spec=DataRepoService)
data_repo_service.find_dataset = Mock(return_value=None)
init_context = build_init_resource_context(
config={
"env": "dev",
"region": "US",
"policy_members": ["<EMAIL>"],
"billing_profile_id": "fake_billing_profile_id",
"qualifier": "test_qualifier"
},
resources={
"hca_project_id": ResourceDefinition.hardcoded_resource("08BCA7FF-A15A-4D58-806B-7CD45979768B"),
"data_repo_service": ResourceDefinition.hardcoded_resource(data_repo_service),
"run_start_time": ResourceDefinition.hardcoded_resource(1639494828)
}
)
find_or_create_project_dataset(init_context)
data_repo_service.find_dataset.assert_called_once_with(
"hca_dev_08bca7ffa15a4d58806b7cd45979768b", "test_qualifier"
)
data_repo_service.create_dataset.assert_called_once_with(
'hca_dev_08bca7ffa15a4d58806b7cd45979768b__20211214_test_qualifier',
'dev',
'fake_billing_profile_id',
['<EMAIL>'],
'US',
'{"hca_project_id": "08bca7ffa15a4d58806b7cd45979768b"}'
)
def test_find_or_create_project_dataset_transforms_real_prod_to_prod():
data_repo_service = Mock(spec=DataRepoService)
init_context = build_init_resource_context(
config={
"env": "real_prod",
"region": "US",
"policy_members": ["<EMAIL>"],
"billing_profile_id": "fake_billing_profile_id",
"qualifier": None
},
resources={
"hca_project_id": ResourceDefinition.hardcoded_resource("08BCA7FF-A15A-4D58-806B-7CD45979768B"),
"data_repo_service": ResourceDefinition.hardcoded_resource(data_repo_service),
"run_start_time": ResourceDefinition.hardcoded_resource(1639494828)
}
)
find_or_create_project_dataset(init_context)
data_repo_service.find_dataset.assert_called_once_with(
"hca_prod_08bca7ffa15a4d58806b7cd45979768b", None
)
| StarcoderdataPython |
187875 | <gh_stars>1-10
# coding=utf-8
# Copyright 2011 Foursquare Labs Inc. All Rights Reserved
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
import unittest
from foursquare.source_code_analysis.scala.scala_import_sorter import ScalaImportSorter
class ScalaImportRewriterTest(unittest.TestCase):
def _do_test_sorter(self, input_text, expected_text):
sorter = ScalaImportSorter(False, fancy=True)
sorted_text = sorter.apply_to_text('test.scala', input_text).new_text
self.assertEqual(expected_text, sorted_text)
def test_rewriter(self):
self._do_test_sorter(
"""
import scala.foo.Foo
import com.baz.Baz
import java.bar.Bar
""",
"""
import java.bar.Bar
import scala.foo.Foo
import com.baz.Baz
""")
self._do_test_sorter(
"""
import com.foursquare.base.{Foo, Bar}
import scala.foo.Foo
import javax.bar.Bar
import com.foursquare.base.Baz
import scalax.baz.Baz
import java.quux.{Quuux => Quux2}
import org.foo.bar.Baz
import scala.qux.Qux
import java.quux.Quux
""",
"""
import java.quux.{Quuux => Quux2, Quux}
import javax.bar.Bar
import scala.foo.Foo
import scala.qux.Qux
import scalax.baz.Baz
import com.foursquare.base.{Bar, Baz, Foo}
import org.foo.bar.Baz
""")
self._do_test_sorter(
"""
// Random stuff
package foo.bar
import scala.foo.{Foo => Foo1, Foo2}
import com.baz.Baz
import java.bar.Bar
blah blah blah
blah
if (cond) {
import javax.foo.Foo
import java.bar.Bar
}
more blah
""",
"""
// Random stuff
package foo.bar
import java.bar.Bar
import scala.foo.{Foo => Foo1, Foo2}
import com.baz.Baz
blah blah blah
blah
if (cond) {
import java.bar.Bar
import javax.foo.Foo
}
more blah
""")
self._do_test_sorter(
"""
import com.baz.{Baz2, Baz3}
import java.bar.Bar
import com.baz.Baz
""",
"""
import java.bar.Bar
import com.baz.{Baz, Baz2, Baz3}
""")
self._do_test_sorter(
"""
import com.baz.{Baz2, Baz3}
import java.bar.Bar
import com.baz.Baz.Qux._
""",
"""
import java.bar.Bar
import com.baz.{Baz2, Baz3}
import com.baz.Baz.Qux._
""")
self._do_test_sorter(
"""
import com.foursquare.legacyconfig.LegacyDynamicConfigDirectory
import net.liftweb.common.{Box, Empty}
import net.liftweb.http.Req
import com.foursquare.base.IpHelpers
""",
"""
import com.foursquare.base.IpHelpers
import com.foursquare.legacyconfig.LegacyDynamicConfigDirectory
import net.liftweb.common.{Box, Empty}
import net.liftweb.http.Req
""")
# Note that string is deliberately over the 120 column mark, to test that it gets shortened properly.
self._do_test_sorter(
"""
import com.foursquare.record.{BaseUserForeignKey, BitFlagEnum => BitFlagEnumAlias, OptionalLongBitFlagField, FSMongoRecord,FSMongoMetaRecord, FSPhoneField, FSOptionalStringField,
FSOptionalPhotoField, GeolocationMongo}
import com.foursquare.record.MongoPoint
import com.foursquare.record.{JodaDateTimeField, UserForeignKey, MongoEmbeddedObjectField, MongoEnumField,
NamedMongoIdentifier, OptionalJodaDateTimeField, PhoneFormatMode => PhoneFormatModeAlias,
RandomStringField, SaltedPasswordField, UpdateableRecord, UnpersistedFK}
""",
"""
import com.foursquare.record.{BaseUserForeignKey, BitFlagEnum => BitFlagEnumAlias, FSMongoMetaRecord, FSMongoRecord,
FSOptionalPhotoField, FSOptionalStringField, FSPhoneField, GeolocationMongo, JodaDateTimeField,
MongoEmbeddedObjectField, MongoEnumField, MongoPoint, NamedMongoIdentifier, OptionalJodaDateTimeField,
OptionalLongBitFlagField, PhoneFormatMode => PhoneFormatModeAlias, RandomStringField, SaltedPasswordField,
UnpersistedFK, UpdateableRecord, UserForeignKey}
""")
| StarcoderdataPython |
1682583 | #!/usr/bin/env python2
import string
import sys
_KEYWORDS = dict([ (j, i) for i, j in enumerate([
'for', 'while', 'until',
'if', 'unless', 'elif', 'else',
'switch', 'case',
'{', '}', '(', ')',
';', ',', '.', '='
]
)])
def keywords_substitution(tokens):
"""
Transforms an array of tokens (str) into array of recognized tokens (str, int).
"""
return [_KEYWORDS[t] if t in _KEYWORDS else t for t in tokens]
| StarcoderdataPython |
3210676 | """test_extensions.py."""
# Import a minimal text loader class, the functions for scrubber pipelines,
# and the scrubber function registry
from lexos.io.basic import Loader
from lexos.tokenizer import extensions, make_doc
# Load a text
data = ["tests/test_data/Austen_Pride.txt"]
loader = Loader()
loader.load(data)
# Make a spaCy doc
doc = make_doc(loader.texts[0][0:50])
for token in doc[0:5]:
print((token._.is_fruit, token._.get("is_fruit")))
| StarcoderdataPython |
1717794 | <reponame>ankitsumitg/python-projects<filename>gas_mileage/test_milesPerGallon.py<gh_stars>1-10
"""
Do Not Edit this file. You may and are encouraged to look at it for reference.
"""
import unittest
import gas_mileage
class TestMilesPerGallon(unittest.TestCase):
def test001_milesPerGallonExists(self):
self.assertTrue('milesPerGallon' in dir(gas_mileage),
'Function "milesPerGallon" is not defined, check your spelling')
return
def test002_milesPerGallonCalculate(self):
expected = 30.0
actual = gas_mileage.milesPerGallon(30.0, 1.0)
self.assertAlmostEqual(expected, actual, 2,
'Your milesToGallon function returned %s which is not close enough to %s' % (actual, expected))
return
def test003_milesPerGallonCalculate(self):
expected = 0.0
actual = gas_mileage.milesPerGallon(30.0, 0.0)
self.assertAlmostEqual(expected, actual, 2,
'Your milesToGallon function returned %s which is not close enough to %s' % (actual, expected))
return
def test004_milesPerGallonCalculate(self):
expected = 4.66
actual = gas_mileage.milesPerGallon(317.0, 68.0)
self.assertAlmostEqual(expected, actual, 2,
'Your milesToGallon function returned %s which is not close enough to %s' % (actual, expected))
return
def test005_milesPerGallonCalculate(self):
expected = 26.42
actual = gas_mileage.milesPerGallon(317.0, 12.0)
self.assertAlmostEqual(expected, actual, 2,
'Your milesToGallon function returned %s which is not close enough to %s' % (actual, expected))
return
def test006_milesPerGallonCalculate(self):
expected = 0.0
actual = gas_mileage.milesPerGallon(0.0, 0.0)
self.assertAlmostEqual(expected, actual, 2,
'Your milesToGallon function returned %s which is not close enough to %s' % (actual, expected))
return
def test007_milesPerGallonCalculate(self):
expected = 0.0
actual = gas_mileage.milesPerGallon(0.0, 1.0)
self.assertAlmostEqual(expected, actual, 2,
'Your milesToGallon function returned %s which is not close enough to %s' % (actual, expected))
return
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1756221 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-25 18:26
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Plink',
fields=[
('plink_id', models.CharField(max_length=64, primary_key=True, serialize=False)),
('description', models.CharField(blank=True, default=b'', max_length=256)),
('prefab_path', models.CharField(blank=True, default=b'', max_length=256)),
('options_str', models.CharField(blank=True, default=b'{}', max_length=256)),
],
),
migrations.CreateModel(
name='PlinkJob',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, default=b'', max_length=128, null=True)),
('status', models.CharField(choices=[(b'0', b'Job Not Ready'), (b'101', b'Job Pending'), (b'111', b'Job Ready'), (b'121', b'Job Scheduled'), (b'131', b'Job Processing'), (b'141', b'Job Error'), (b'888', b'Cancelled'), (b'999', b'All Completed')], default=b'0', max_length=64)),
('plink_id', models.CharField(blank=True, default=b'', max_length=64)),
('result_text', models.CharField(blank=True, default=b'', max_length=2048)),
],
),
migrations.CreateModel(
name='PlinkOption',
fields=[
('key', models.CharField(max_length=64, primary_key=True, serialize=False, unique=True)),
('value', models.CharField(blank=True, default=b'', max_length=255, null=True)),
('plink_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='plink.Plink')),
],
),
migrations.CreateModel(
name='PlinkPrefabs',
fields=[
('config_id', models.CharField(max_length=264, primary_key=True, serialize=False)),
('selected_prefab_id', models.CharField(max_length=64)),
('selected_prefab_path', models.CharField(max_length=264)),
],
),
]
| StarcoderdataPython |
165886 | <reponame>INDElab/conversationkg
# -*- coding: utf-8 -*-
import pickle
from urllib.parse import urlparse
from process_emails import extract_meta, parse_header, url_pattern,\
email_address_pattern, group_into_convos
class Ledger(dict):
def __init__(self):
super().__init__(self)
self.duplicate_entries = set()
def store(self, entity_or_fact, origin, mode=None):
if mode:
key = (entity_or_fact, mode)
else:
key = entity_or_fact
if key in self:
self.duplicate_entries.add(key)
self[key].append(origin)
else:
self[key] = [origin]
def recall(self, entity_or_fact, mode=None):
if mode:
key = (entity_or_fact, mode)
else:
key = entity_or_fact
return self[key]
def invert(self):
inverted_d = {}
for k, v in self.items():
for item in v:
if not item in inverted_d:
inverted_d[item] = []
inverted_d[item].append(k)
return inverted_d
def pickle(self, filename="ledger.pkl"):
with open(filename, "wb") as handle:
pickle.dump(self, handle)
@classmethod
def from_pickle(cls, filename="ledger.pkl"):
with open(filename, "rb") as handle:
loaded = pickle.load(handle)
return loaded
class Person:
def __init__(self, name, address, ledger):
self.name = name if name else ""
self.address = address if address else ""
self.org = Org.from_person(self)
ledger.store(self.org, self, mode="evidencedBy")
def __str__(self):
return self.name + " <" + self.address + ">"
def __repr__(self):
return str(self)
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
if not isinstance(other, Person):
return False
return hash(other) == hash(self)
class Org:
@classmethod
def from_address(cls, email_addr):
extracted_org = email_addr[email_addr.rfind("@")+1:]
return extracted_org if extracted_org else None
@classmethod
def from_person(cls, person):
if not person.address:
return None
return cls.from_address(person.address)
def __init__(self, domain_name, provenance=None):
self.domain_name = domain_name
def __hash__(self):
return hash(self.domain_name)
def __eq__(self, other):
if not isinstance(other, Org):
return False
return hash(self) == hash(other)
class Link:
def __init__(self, url):
self.url = url
try:
parsed = urlparse(url)
self.domain = parsed.netloc
self.path = parsed.path
except ValueError:
self.domain = None
self.path = None
def __str__(self):
return str(self.url)
def __repr__(self):
return str(self)
def __hash__(self):
return hash(str(self))
class Address:
def __init__(self, addr):
self.address = addr
self.org = self.extract_org()
def extract_org(self):
if self.address:
return self.address[self.address.rfind("@")+1:]
else:
None
def __str__(self):
return str(self.address)
def __repr__(self):
return str(self.address)
def __hash__(self):
return hash(str(self))
class Email:
def __init__(self, raw_text, ledger):
header_d, mail_body_raw = extract_meta(raw_text)
header_d = parse_header(header_d)
if header_d:
self.has_header = True
for k, v in header_d.items():
setattr(self, k, v)
self.sender = Person(self.name, self.email, ledger)
self.receiver = Person(*self.to, ledger)
ledger.store(self.sender, self, mode="EvidencedBy")
ledger.store(self.receiver, self, mode="EvidencedBy")
if self.cc:
self.cc = Person(*self.cc, ledger)
ledger.store(self.cc, self, mode="EvidencedBy")
else:
self.has_header = False
self.body_raw = mail_body_raw
self.links = self.extract_links()
for l in self.links: ledger.store(l, self, mode="MentionedBy")
self.addresses = self.extract_addresses()
for e in self.addresses: ledger.store(e, self, mode="MentionedBy")
def extract_from_to(self):
return (Person(self.name, self.email),
Person(*self.to))
def extract_links(self):
return [Link(l) for l in url_pattern.findall(self.body_raw)]
def extract_addresses(self):
return [Address(e) for e in email_address_pattern.findall(self.body_raw)]
def __hash__(self):
if self.has_header:
return hash(self.id)
else:
return hash(self.body_raw)
class Conversation:
@classmethod
def conversations_from_sorted_emails(cls, emails_in_temp_order, ledger):
convos = group_into_convos(emails_in_temp_order)
return [cls(mail_ls, ledger) for mail_ls in convos]
def __init__(self, list_of_emails, ledger):
if len(list_of_emails) > 1:
first, *_, last = list_of_emails
else:
first = last = list_of_emails[0]
self.start_time = first.sent
self.end_time = last.sent
self.emails = tuple(mail.id for mail in list_of_emails)
ledger.store(self, self.emails, mode="EvidencedBy")
self.senders = [m.sender for m in list_of_emails]
self.receivers = [m.receiver for m in list_of_emails]
self.interlocutors = set(self.senders + self.receivers)
self.observers = tuple(mail.cc for mail in list_of_emails)
self.orgs = tuple(person.org for person in self.interlocutors)
self.mentioned_links = tuple(l for mail in list_of_emails for l in mail.links)
self.mentioned_addresses = tuple(e for mail in list_of_emails for e in mail.addresses)
self.topic = "\n".join((m.subject for m in list_of_emails))
def __len__(self):
return len(self.emails)
def __hash__(self):
prod = 1
for mail_id in self.emails:
prod *= hash(mail_id)
return prod | StarcoderdataPython |
3250544 | <reponame>rsdoherty/azure-sdk-for-python
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import os
from azure.identity import DefaultAzureCredential
from azure.core.exceptions import HttpResponseError
from azure.digitaltwins.core import DigitalTwinsClient
# Scenario example of how to:
# - create a DigitalTwins Service Client using the DigitalTwinsClient constructor
# - create two models, one model and one component model
# - get created models by modelIds one by one
# - list all models by listing them using the paginated API
# - decomission the created models
# - delete the created models
#
# Preconditions:
# - Environment variables have to be set
# - DigitalTwins enabled device must exist on the ADT hub
#
# For the purpose of this example we will create temporary model and a temporay component model using random Ids.
# We have to make sure these model Ids are unique within the DT instance so we use generated UUIDs.
try:
model_id = 'dtmi:samples:examplemodel;1'
component_id = 'dtmi:samples:examplecomponent;1'
temporary_component = {
"@id": component_id,
"@type": "Interface",
"@context": "dtmi:dtdl:context;2",
"displayName": "Component1",
"contents": [
{
"@type": "Property",
"name": "ComponentProp1",
"schema": "string"
},
{
"@type": "Telemetry",
"name": "ComponentTelemetry1",
"schema": "integer"
}
]
}
temporary_model = {
"@id": model_id,
"@type": "Interface",
"@context": "dtmi:dtdl:context;2",
"displayName": "TempModel",
"contents": [
{
"@type": "Property",
"name": "Prop1",
"schema": "string"
},
{
"@type": "Component",
"name": "Component1",
"schema": component_id
},
{
"@type": "Telemetry",
"name": "Telemetry1",
"schema": "integer"
}
]
}
# DefaultAzureCredential supports different authentication mechanisms and determines
# the appropriate credential type based of the environment it is executing in.
# It attempts to use multiple credential types in an order until it finds a working credential.
# - AZURE_URL: The tenant ID in Azure Active Directory
url = os.getenv("AZURE_URL")
# DefaultAzureCredential expects the following three environment variables:
# - AZURE_TENANT_ID: The tenant ID in Azure Active Directory
# - AZURE_CLIENT_ID: The application (client) ID registered in the AAD tenant
# - AZURE_CLIENT_SECRET: The client secret for the registered application
credential = DefaultAzureCredential()
service_client = DigitalTwinsClient(url, credential)
# Create models
new_models = [temporary_component, temporary_model]
models = service_client.create_models(new_models)
print('Created Models:')
print(models)
# Get created models
get_component_model = service_client.get_model(component_id)
print('Get Component Models:')
print(get_component_model)
get_model = service_client.get_model(model_id)
print('Get Model:')
print(get_model)
# List all models
listed_models = service_client.list_models()
for model in listed_models:
print(model)
# Decomission models
service_client.decommission_model(model_id)
service_client.decommission_model(component_id)
# Delete models
service_client.delete_model(model_id)
service_client.delete_model(component_id)
except HttpResponseError as e:
print("\nThis sample has caught an error. {0}".format(e.message))
| StarcoderdataPython |
3352476 | <reponame>kyawmyomin/nornir
from typing import (
Any,
Callable,
Dict,
List,
Optional,
Set,
Union,
KeysView,
ValuesView,
ItemsView,
Iterator,
TypeVar,
)
from nornir.core.configuration import Config
from nornir.core.plugins.connections import (
ConnectionPlugin,
ConnectionPluginRegister,
)
from nornir.core.exceptions import ConnectionAlreadyOpen, ConnectionNotOpen
from mypy_extensions import Arg, KwArg
HostOrGroup = TypeVar("HostOrGroup", "Host", "Group")
class BaseAttributes(object):
__slots__ = ("hostname", "port", "username", "password", "platform")
def __init__(
self,
hostname: Optional[str] = None,
port: Optional[int] = None,
username: Optional[str] = None,
password: Optional[str] = None,
platform: Optional[str] = None,
) -> None:
self.hostname = hostname
self.port = port
self.username = username
self.password = password
self.platform = platform
@classmethod
def schema(cls) -> Dict[str, Any]:
return {
"hostname": "str",
"port": "int",
"username": "str",
"password": "<PASSWORD>",
"platform": "str",
}
def dict(self) -> Dict[str, Any]:
return {
"hostname": self.hostname,
"port": self.port,
"username": self.username,
"password": <PASSWORD>,
"platform": self.platform,
}
class ConnectionOptions(BaseAttributes):
__slots__ = ("extras",)
def __init__(
self,
hostname: Optional[str] = None,
port: Optional[int] = None,
username: Optional[str] = None,
password: Optional[str] = None,
platform: Optional[str] = None,
extras: Optional[Dict[str, Any]] = None,
) -> None:
self.extras = extras
super().__init__(
hostname=hostname,
port=port,
username=username,
password=password,
platform=platform,
)
@classmethod
def schema(self) -> Dict[str, Any]:
return {
"extras": {"$key": "$value"},
**super().schema(),
}
def dict(self) -> Dict[str, Any]:
return {
"extras": self.extras,
**super().dict(),
}
class ParentGroups(List["Group"]):
def __contains__(self, value: object) -> bool:
if isinstance(value, str):
return any([value == g.name for g in self])
else:
return any([value == g for g in self])
class InventoryElement(BaseAttributes):
__slots__ = ("groups", "data", "connection_options")
def __init__(
self,
hostname: Optional[str] = None,
port: Optional[int] = None,
username: Optional[str] = None,
password: Optional[str] = None,
platform: Optional[str] = None,
groups: Optional[ParentGroups] = None,
data: Optional[Dict[str, Any]] = None,
connection_options: Optional[Dict[str, ConnectionOptions]] = None,
) -> None:
self.groups = groups or ParentGroups()
self.data = data or {}
self.connection_options = connection_options or {}
super().__init__(
hostname=hostname,
port=port,
username=username,
password=password,
platform=platform,
)
@classmethod
def schema(self) -> Dict[str, Any]:
return {
"groups": ["$group_name"],
"data": {"$key": "$value"},
"connection_options": {"$connection_type": ConnectionOptions.schema()},
**super().schema(),
}
def dict(self) -> Dict[str, Any]:
return {
"groups": [g.name for g in self.groups],
"data": self.data,
"connection_options": {
k: v.dict() for k, v in self.connection_options.items()
},
**super().dict(),
}
class Defaults(BaseAttributes):
__slots__ = ("data", "connection_options")
def __init__(
self,
hostname: Optional[str] = None,
port: Optional[int] = None,
username: Optional[str] = None,
password: Optional[str] = None,
platform: Optional[str] = None,
data: Optional[Dict[str, Any]] = None,
connection_options: Optional[Dict[str, ConnectionOptions]] = None,
) -> None:
self.data = data or {}
self.connection_options = connection_options or {}
super().__init__(
hostname=hostname,
port=port,
username=username,
password=password,
platform=platform,
)
@classmethod
def schema(self) -> Dict[str, Any]:
return {
"data": {"$key": "$value"},
"connection_options": {"$connection_type": ConnectionOptions.schema()},
**super().schema(),
}
def dict(self) -> Dict[str, Any]:
return {
"data": self.data,
"connection_options": {
k: v.dict() for k, v in self.connection_options.items()
},
**super().dict(),
}
class Host(InventoryElement):
__slots__ = ("name", "connections", "defaults")
def __init__(
self,
name: str,
hostname: Optional[str] = None,
port: Optional[int] = None,
username: Optional[str] = None,
password: Optional[str] = None,
platform: Optional[str] = None,
groups: Optional[ParentGroups] = None,
data: Optional[Dict[str, Any]] = None,
connection_options: Optional[Dict[str, ConnectionOptions]] = None,
defaults: Optional[Defaults] = None,
) -> None:
self.name = name
self.defaults = defaults or Defaults(None, None, None, None, None, None, None)
self.connections: Dict[str, ConnectionPlugin] = {}
super().__init__(
hostname=hostname,
port=port,
username=username,
password=password,
platform=platform,
groups=groups,
data=data,
connection_options=connection_options,
)
def _resolve_data(self) -> Dict[str, Any]:
processed = []
result = {}
for k, v in self.data.items():
processed.append(k)
result[k] = v
for g in self.groups:
for k, v in g.items():
if k not in processed:
processed.append(k)
result[k] = v
for k, v in self.defaults.data.items():
if k not in processed:
processed.append(k)
result[k] = v
return result
@classmethod
def schema(cls) -> Dict[str, Any]:
return {
"name": "str",
"connection_options": {"$connection_type": ConnectionOptions.schema()},
**super().schema(),
}
def dict(self) -> Dict[str, Any]:
return {
"name": self.name,
"connection_options": {
k: v.dict() for k, v in self.connection_options.items()
},
**super().dict(),
}
def keys(self) -> KeysView[str]:
"""Returns the keys of the attribute ``data`` and of the parent(s) groups."""
return self._resolve_data().keys()
def values(self) -> ValuesView[Any]:
"""Returns the values of the attribute ``data`` and of the parent(s) groups."""
return self._resolve_data().values()
def items(self) -> ItemsView[str, Any]:
"""
Returns all the data accessible from a device, including
the one inherited from parent groups
"""
return self._resolve_data().items()
def has_parent_group(self, group: Union[str, "Group"]) -> bool:
"""Returns whether the object is a child of the :obj:`Group` ``group``"""
if isinstance(group, str):
return self._has_parent_group_by_name(group)
else:
return self._has_parent_group_by_object(group)
def _has_parent_group_by_name(self, group: str) -> bool:
for g in self.groups:
if g.name == group or g.has_parent_group(group):
return True
return False
def _has_parent_group_by_object(self, group: "Group") -> bool:
for g in self.groups:
if g is group or g.has_parent_group(group):
return True
return False
def __getitem__(self, item: str) -> Any:
try:
return self.data[item]
except KeyError:
for g in self.groups:
try:
r = g[item]
return r
except KeyError:
continue
r = self.defaults.data.get(item)
if r is not None:
return r
raise
def __getattribute__(self, name: str) -> Any:
if name not in ("hostname", "port", "username", "password", "platform"):
return object.__getattribute__(self, name)
v = object.__getattribute__(self, name)
if v is None:
for g in self.groups:
r = getattr(g, name)
if r is not None:
return r
return object.__getattribute__(self.defaults, name)
else:
return v
def __bool__(self) -> bool:
return bool(self.name)
def __setitem__(self, item: str, value: Any) -> None:
self.data[item] = value
def __len__(self) -> int:
return len(self._resolve_data().keys())
def __iter__(self) -> Iterator[str]:
return self.data.__iter__()
def __str__(self) -> str:
return self.name
def __repr__(self) -> str:
return "{}: {}".format(self.__class__.__name__, self.name or "")
def get(self, item: str, default: Any = None) -> Any:
"""
Returns the value ``item`` from the host or hosts group variables.
Arguments:
item(``str``): The variable to get
default(``any``): Return value if item not found
"""
if hasattr(self, item):
return getattr(self, item)
try:
return self.__getitem__(item)
except KeyError:
return default
def get_connection_parameters(
self, connection: Optional[str] = None
) -> ConnectionOptions:
if not connection:
d = ConnectionOptions(
hostname=self.hostname,
port=self.port,
username=self.username,
password=<PASSWORD>,
platform=self.platform,
extras={},
)
else:
r = self._get_connection_options_recursively(connection)
if r is not None:
d = ConnectionOptions(
hostname=r.hostname if r.hostname is not None else self.hostname,
port=r.port if r.port is not None else self.port,
username=r.username if r.username is not None else self.username,
password=<PASSWORD> if r.password is not None else self.password,
platform=r.platform if r.platform is not None else self.platform,
extras=r.extras if r.extras is not None else {},
)
else:
d = ConnectionOptions(
hostname=self.hostname,
port=self.port,
username=self.username,
password=<PASSWORD>,
platform=self.platform,
extras={},
)
return d
def _get_connection_options_recursively(
self, connection: str
) -> Optional[ConnectionOptions]:
p = self.connection_options.get(connection)
if p is None:
p = ConnectionOptions(None, None, None, None, None, None)
for g in self.groups:
sp = g._get_connection_options_recursively(connection)
if sp is not None:
p.hostname = p.hostname if p.hostname is not None else sp.hostname
p.port = p.port if p.port is not None else sp.port
p.username = p.username if p.username is not None else sp.username
p.password = p.password if p.password is not None else sp.password
p.platform = p.platform if p.platform is not None else sp.platform
p.extras = p.extras if p.extras is not None else sp.extras
sp = self.defaults.connection_options.get(connection, None)
if sp is not None:
p.hostname = p.hostname if p.hostname is not None else sp.hostname
p.port = p.port if p.port is not None else sp.port
p.username = p.username if p.username is not None else sp.username
p.password = p.password if p.password is not None else sp.password
p.platform = p.platform if p.platform is not None else sp.platform
p.extras = p.extras if p.extras is not None else sp.extras
return p
def get_connection(self, connection: str, configuration: Config) -> Any:
"""
The function of this method is twofold:
1. If an existing connection is already established for the given type return it
2. If none exists, establish a new connection of that type with default parameters
and return it
Raises:
AttributeError: if it's unknown how to establish a connection for the given type
Arguments:
connection: Name of the connection, for instance, netmiko, paramiko, napalm...
Returns:
An already established connection
"""
if connection not in self.connections:
conn = self.get_connection_parameters(connection)
self.open_connection(
connection=connection,
configuration=configuration,
hostname=conn.hostname,
port=conn.port,
username=conn.username,
password=<PASSWORD>,
platform=conn.platform,
extras=conn.extras,
)
return self.connections[connection].connection
def open_connection(
self,
connection: str,
configuration: Config,
hostname: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
port: Optional[int] = None,
platform: Optional[str] = None,
extras: Optional[Dict[str, Any]] = None,
default_to_host_attributes: bool = True,
) -> ConnectionPlugin:
"""
Open a new connection.
If ``default_to_host_attributes`` is set to ``True`` arguments will default to host
attributes if not specified.
Raises:
AttributeError: if it's unknown how to establish a connection for the given type
Returns:
An already established connection
"""
conn_name = connection
existing_conn = self.connections.get(conn_name)
if existing_conn is not None:
raise ConnectionAlreadyOpen(conn_name)
plugin = ConnectionPluginRegister.get_plugin(conn_name)
conn_obj = plugin()
if default_to_host_attributes:
conn_params = self.get_connection_parameters(conn_name)
hostname = hostname if hostname is not None else conn_params.hostname
username = username if username is not None else conn_params.username
password = password if password is not None else conn_params.password
port = port if port is not None else conn_params.port
platform = platform if platform is not None else conn_params.platform
extras = extras if extras is not None else conn_params.extras
conn_obj.open(
hostname=hostname,
username=username,
password=password,
port=port,
platform=platform,
extras=extras,
configuration=configuration,
)
self.connections[conn_name] = conn_obj
return conn_obj
def close_connection(self, connection: str) -> None:
""" Close the connection"""
conn_name = connection
if conn_name not in self.connections:
raise ConnectionNotOpen(conn_name)
conn_obj = self.connections.pop(conn_name)
if conn_obj is not None:
conn_obj.close()
def close_connections(self) -> None:
# Decouple deleting dictionary elements from iterating over connections dict
existing_conns = list(self.connections.keys())
for connection in existing_conns:
self.close_connection(connection)
class Group(Host):
pass
class Hosts(Dict[str, Host]):
pass
class Groups(Dict[str, Group]):
pass
TransformFunction = Callable[[Arg(Host), KwArg(Any)], None]
FilterObj = Callable[[Arg(Host), KwArg(Any)], bool]
class Inventory(object):
__slots__ = ("hosts", "groups", "defaults")
def __init__(
self,
hosts: Hosts,
groups: Optional[Groups] = None,
defaults: Optional[Defaults] = None,
transform_function: TransformFunction = None,
transform_function_options: Optional[Dict[str, Any]] = None,
) -> None:
self.hosts = hosts
self.groups = groups or Groups()
self.defaults = defaults or Defaults(None, None, None, None, None, None, None)
def filter(
self, filter_obj: FilterObj = None, filter_func: FilterObj = None, **kwargs: Any
) -> "Inventory":
filter_func = filter_obj or filter_func
if filter_func:
filtered = Hosts(
{n: h for n, h in self.hosts.items() if filter_func(h, **kwargs)}
)
else:
filtered = Hosts(
{
n: h
for n, h in self.hosts.items()
if all(h.get(k) == v for k, v in kwargs.items())
}
)
return Inventory(hosts=filtered, groups=self.groups, defaults=self.defaults)
def __len__(self) -> int:
return self.hosts.__len__()
def children_of_group(self, group: Union[str, Group]) -> Set[Host]:
"""
Returns set of hosts that belongs to a group including those that belong
indirectly via inheritance
"""
hosts: Set[Host] = set()
for host in self.hosts.values():
if host.has_parent_group(group):
hosts.add(host)
return hosts
@classmethod
def schema(cls) -> Dict[str, Any]:
"""
Return serialized dictionary of inventory
"""
return {
"hosts": {"$name": Host.schema()},
"groups": {"$group": Group.schema()},
"defaults": Defaults.schema(),
}
def dict(self) -> Dict[str, Any]:
"""
Return serialized dictionary of inventory
"""
return {
"hosts": {n: h.dict() for n, h in self.hosts.items()},
"groups": {n: g.dict() for n, g in self.groups.items()},
"defaults": self.defaults.dict(),
}
| StarcoderdataPython |
3350282 | <reponame>wearpants/scrapi
import logging
import functools
from itertools import islice
from datetime import date, timedelta
from celery import Celery
from scrapi import util
from scrapi import events
from scrapi import settings
from scrapi import registry
from scrapi import processing
from scrapi.util import timestamp
from scrapi.base.helpers import null_on_error
app = Celery()
app.config_from_object(settings)
logger = logging.getLogger(__name__)
def task_autoretry(*args_task, **kwargs_task):
def actual_decorator(func):
@app.task(*args_task, **kwargs_task)
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except kwargs_task.get('autoretry_on', Exception) as exc:
logger.info('Retrying with exception {}'.format(exc))
raise wrapper.retry(exc=exc)
return wrapper
return actual_decorator
@app.task
@events.creates_task(events.HARVESTER_RUN)
def run_harvester(harvester_name, start_date=None, end_date=None):
logger.info('Running harvester "{}"'.format(harvester_name))
start_date = start_date or date.today() - timedelta(settings.DAYS_BACK)
end_date = end_date or date.today()
normalization = begin_normalization.s(harvester_name)
start_harvest = harvest.si(harvester_name, timestamp(), start_date=start_date, end_date=end_date)
# Form and start a celery chain
(start_harvest | normalization).apply_async()
@app.task
@events.logged(events.HARVESTER_RUN)
def harvest(harvester_name, job_created, start_date=None, end_date=None):
harvest_started = timestamp()
harvester = registry[harvester_name]
start_date = start_date or date.today() - timedelta(settings.DAYS_BACK)
end_date = end_date or date.today()
logger.info('Harvester "{}" has begun harvesting'.format(harvester_name))
result = harvester.harvest(start_date=start_date, end_date=end_date)
# result is a list of all of the RawDocuments harvested
return result, {
'harvestFinished': timestamp(),
'harvestTaskCreated': job_created,
'harvestStarted': harvest_started,
}
@app.task
def begin_normalization(raw_docs_timestamps, harvester_name):
'''harvest_ret is harvest return value:
a tuple contaiing list of rawDocuments and
a dictionary of timestamps
'''
(raw_docs, timestamps) = raw_docs_timestamps
logger.info('Normalizing {} documents for harvester "{}"'
.format(len(raw_docs), harvester_name))
# raw is a single raw document
for raw in raw_docs:
spawn_tasks(raw, timestamps, harvester_name)
@events.creates_task(events.PROCESSING)
@events.creates_task(events.NORMALIZATION)
def spawn_tasks(raw, timestamps, harvester_name):
raw['timestamps'] = timestamps
raw['timestamps']['normalizeTaskCreated'] = timestamp()
chain = (normalize.si(raw, harvester_name) | process_normalized.s(raw))
chain.apply_async()
process_raw.delay(raw)
@task_autoretry(default_retry_delay=settings.CELERY_RETRY_DELAY, max_retries=settings.CELERY_MAX_RETRIES)
@events.logged(events.PROCESSING, 'raw')
def process_raw(raw_doc, **kwargs):
processing.process_raw(raw_doc, kwargs)
@task_autoretry(default_retry_delay=settings.CELERY_RETRY_DELAY, max_retries=settings.CELERY_MAX_RETRIES, throws=events.Skip)
@events.logged(events.NORMALIZATION)
def normalize(raw_doc, harvester_name):
normalized_started = timestamp()
harvester = registry[harvester_name]
normalized = null_on_error(harvester.normalize)(raw_doc)
if not normalized:
raise events.Skip('Did not normalize document with id {}'.format(raw_doc['docID']))
normalized['timestamps'] = util.stamp_from_raw(raw_doc, normalizeStarted=normalized_started)
return normalized # returns a single normalized document
@task_autoretry(default_retry_delay=settings.CELERY_RETRY_DELAY, max_retries=settings.CELERY_MAX_RETRIES, throws=events.Skip)
@events.logged(events.PROCESSING, 'normalized')
def process_normalized(normalized_doc, raw_doc, **kwargs):
if not normalized_doc:
raise events.Skip('Not processing document with id {}'.format(raw_doc['docID']))
processing.process_normalized(raw_doc, normalized_doc, kwargs)
@app.task
def migrate(migration, source_db=None, sources=tuple(), async=False, dry=True, group_size=1000, **kwargs):
source_db = source_db or settings.CANONICAL_PROCESSOR
documents = processing.get_processor(source_db).documents
doc_sources = sources or registry.keys()
docs = documents(*doc_sources)
if async:
segment = list(islice(docs, group_size))
while segment:
migration.s(segment, sources=sources, dry=dry, source_db=source_db, **kwargs).apply_async()
segment = list(islice(docs, group_size))
else:
for doc in docs:
migration((doc,), sources=sources, dry=dry, **kwargs)
if dry:
logger.info('Dry run complete')
logger.info('Documents processed for migration {}'.format(str(migration)))
| StarcoderdataPython |
1675232 | import re
import sqlite3
from json import load, loads
from django.db import models
# Create your models here.
class ChoiceQuestions(models.Model):
def __init__(self, data):
self.data = str(data)
def _remove_non_alphanum_char(self, string):
return re.sub(r'\W+', ' ', string)
def _translate_numbers(self, current, new, string):
translation_table = str.maketrans(current, new)
return string.translate(translation_table)
def normalize_string(self, student_data, intorstr=False):
"""gets a string and standardize it as following:
>> converts(removes others) all chars to persionChar or
>> converts(removes others) all chars to EnglishDigits"""
student_data = self._remove_non_alphanum_char(str(student_data))
student_data = student_data.upper()
persian_numerals = '۱۲۳۴۵۶۷۸۹۰'
arabic_numerals = '١٢٣٤٥٦٧٨٩٠'
english_numerals = '1234567890'
student_data = self._translate_numbers(
persian_numerals, english_numerals, student_data)
student_data = self._translate_numbers(
arabic_numerals, english_numerals, student_data)
if intorstr != False:
all_digit = "".join(re.findall("\d", student_data))
return int(all_digit)
else:
all_alpha = "".join(re.findall("[آ-ی- ]", student_data))
return all_alpha
def cmp(self, first_dict, second_dict):
"""Comparison first_dict and
second_dict and scoreing Input's"""
try:
# If The user enters something other then dictionary,
# the program says: Please Enter the dictionary.
score = 0
for num in first_dict:
if first_dict[num] == second_dict[num]:
score += 1
return score
except ValueError:
return "ValueError. Please Enter The dictionary"
def json_render_score(self, test_input):
"""json read in parent_file and Comparison """
# TODO: The User it self make this Json
parent_file = open("quez/json/valed.json", "r")
parent = load(parent_file)["test"]
return self.cmp(parent, test_input)
def get_data_json(self):
"""This Func work is get json data in client
and proccesing data. In the End, return Procced data's"""
try:
# call data's in json
data = f'{self.data}'
data = data.replace("'", '"')
data = loads(data)
# add data to varble
num = self.normalize_string(data["num"], intorstr=True)
user_name = self.normalize_string(data["stdudent_name"])
if " " in user_name:
user_name = None
test_input = data["test"]
print(num, user_name)
score = self.json_render_score(test_input)
# connect sqlite DB
conn = sqlite3.connect("score.db")
cur = conn.cursor()
# create DataBase and add data to DataBase
cur.execute('''CREATE TABLE IF NOT EXISTS students(
num INTEGER PRIMARY KEY,
user_name TEXT,
test_input JSON,
socre TEXT);
''')
# Add json File and socre to DB
query = f'INSERT INTO students VALUES("{num}", "{user_name}", "{test_input}", "{score}")'
cur.execute(query)
conn.commit()
conn.close()
return f"{num}, {user_name}, {test_input}, {score}"
except:
return '{"Success":"False"}'
| StarcoderdataPython |
1663169 | <reponame>blurks/wold2
# coding=utf-8
"""fix poly word
Revision ID: 28959d50de6
Revises: 207db8b425a8
Create Date: 2015-08-04 12:39:04.468959
"""
# revision identifiers, used by Alembic.
revision = '28959d50de6'
down_revision = '207db8b425a8'
import datetime
from alembic import op
import sqlalchemy as sa
def upgrade():
update_pmtype(['unit'], 'base', 'custom')
def downgrade():
update_pmtype(['unit'], 'custom', 'base')
def update_pmtype(tablenames, before, after):
for table in tablenames:
op.execute(sa.text('UPDATE %s SET polymorphic_type = :after '
'WHERE polymorphic_type = :before' % table
).bindparams(before=before, after=after))
| StarcoderdataPython |
62102 | <reponame>Kanavoy/UODS
# filepaths, except with dots instead of slashes and no file extension
from Algorithms.Update.relative_agreement import perform_update as wrapped_update
from Algorithms.Intervention.degree import intervene as wrapped_intervene
# make sure you set opts.intervention.numb to 0, otherwise
# it'll execute the interventions at the start as well
count = 0
def perform_update(graph, opts, agents):
global count
count += 1
if count == 1000:
wrapped_intervene(graph, opts)
wrapped_update(graph, opts, agents)
| StarcoderdataPython |
116314 | import sympy as sp
x, y, z = sp.symbols('x y z')
sp.init_printing()
# Here we simply give the input needed as The Following:
# ( X0 ) & ( X1 ) & ( X2 )
# ( Fx ) is the equation of the function
# ( n ) is the number of Iterations needed
x0 = 4.5
x1 = 5.5
x2 = 5
Fx = x**3 - 13*x - 12
n = 3
print("____________________________________________________________________________________________________________\n "
"Iteration | X3 | H0 | H1 | D0 | D1 | A | B | C | Error "
"\n------------------------------------------------------------------------------------------------------------")
for i in range(n):
# Finding F(x0) & F(x1) & F(x2)
fx0 = Fx.subs(x, x0).evalf(); fx1 = Fx.subs(x, x1).evalf(); fx2 = Fx.subs(x, x2).evalf();
# Finding H 0 & 1
H0 = x1 - x0; H1 = x2 - x1;
# Finding Delta 0 & 1
d0 = (fx1 - fx0)/(x1 - x0); d1 = (fx2 - fx1)/(x2 - x1);
# Finding A & B & C
A = (d1 - d0)/(H1 + H0); B = A*H1 + d1; C = fx2;
# To Calculate X3 we have two solutions one is when delta is positive and the other is when its negative
DeltaPos = (B + sp.sqrt(B**2 - (4*A*C)))
DeltaNeg = (B - sp.sqrt(B**2 - (4*A*C)))
if (abs(DeltaPos) > abs(DeltaNeg)): x3 = x2 + ((- 2*C)/DeltaPos)
else: x3 = x2 + ((- 2*C)/DeltaNeg)
Error = abs(((x3 - x2)/x3)*100)
print(" {0:2d} | {1:.5f} | {2:.5f} | {3:.5f} | {4:.5f} | {5:.4f} | {6:.4f} | {7:.4f} | {8:.4f} | {9:.6f} \n"
.format(i+1, x3, H0, H1, d0, d1, A, B, C, Error))
# Changing Variables for The Next Iteration
x0 = x1; x1 = x2; x2 = x3;
# Finally displaying the output with the value of every rule that was needed in order to get the final answer and the percentage of error after each iteration
| StarcoderdataPython |
1717243 | import json
import os
from distutils.dir_util import copy_tree
from unittest import TestCase
from osbot_aws.apis.Secrets import Secrets
from osbot_gsuite.apis.GDrive import GDrive
from osbot_gsuite.apis.GSheets import GSheets
from osbot_gsuite.apis.GSuite_Setup import GSuite_Setup
from osbot_utils.utils.Dev import Dev
from osbot_utils.utils.Files import file_contents
class test_GSuite_Setup(TestCase):
def setUp(self):
self.gsuite_setup = GSuite_Setup()
self.filename_with_gsuite_client_secret = 'client_secret_538397666918-ftcpk1s9u8gbgmc47c7ocb9kdll4m7j3.apps.googleusercontent.com.json'
self.file_with_credentials = f'{os.getenv("HOME")}/Downloads/{self.filename_with_gsuite_client_secret}'
self.scopes = [ 'https://www.googleapis.com/auth/calendar' ,
'https://www.googleapis.com/auth/documents' ,
'https://www.googleapis.com/auth/drive' ,
'https://www.googleapis.com/auth/presentations' ,
'https://www.googleapis.com/auth/spreadsheets' ]
def test_save_gsuite_client_secret_in_aws(self):
self.gsuite_setup.save_gsuite_client_secret_in_aws(self.file_with_credentials)
def test_create_auth_token_using_web_browser_flow(self):
self.gsuite_setup.create_auth_token_using_web_browser_flow(self.scopes)
def test_check_secret_works(self):
os.environ['AWS_REGION'] = 'london' # simulate AWS environment
gdrive = GDrive(self.gsuite_setup.secret_id_gsuite_token)
files = gdrive.files_all(10)
Dev.pprint(files)
#GSheets(self.gsuite_setup.secret_id_gsuite_token)
#assert len(GSheets(self.gsuite_setup.secret_id_gsuite_token).all_spreadsheets()) > 0
| StarcoderdataPython |
1638011 | from scapy.all import sniff
from threading import Thread, Event
class Sniffer(Thread):
''' Thread for incommig dns requests '''
def __init__(self, interface, sniff_filter, packet_handler, fin_action):
super().__init__()
self.interface = interface
self.SNIFF_FILTER = sniff_filter
self.packet_handler = packet_handler
self.stop_sniffer = Event()
self.fin_action = fin_action
def run(self):
''' Run Thread '''
sniff(prn=self.packet_handler, filter=self.SNIFF_FILTER, store=False, stop_filter=self.is_stopped, iface=self.interface)
def stop(self, timeout=None):
''' Stop Thread '''
self.stop_sniffer.set()
self.fin_action()
super().join(timeout)
def is_stopped(self, packet):
return self.stop_sniffer.isSet()
| StarcoderdataPython |
3354476 | <reponame>dmccloskey/SBaaS_quantification<filename>SBaaS_quantification/stage01_quantification_MQResultsTable_io.py
#system
import json
#SBaaS
from .stage01_quantification_MQResultsTable_query import stage01_quantification_MQResultsTable_query
#from .stage01_quantification_analysis_query import stage01_quantification_analysis_query
from ddt_python.ddt_container import ddt_container
from SBaaS_base.sbaas_template_io import sbaas_template_io
# Resources
from io_utilities.base_importData import base_importData
from io_utilities.base_exportData import base_exportData
from quantification_analysis.MQResultsTable import MQResultsTable
from ddt_python.ddt_container_filterMenuAndChart2dAndTable import ddt_container_filterMenuAndChart2dAndTable
import copy
class stage01_quantification_MQResultsTable_io(stage01_quantification_MQResultsTable_query,
sbaas_template_io):
def import_dataStage01MQResultsTable_add(self,filename):
'''table adds'''
##OPTION1:
#data = base_importData();
#data.read_csv(filename);
#data.format_data();
#self.add_dataStage01MQResultsTable(data.data);
#data.clear_data();
#OPTION2:
resultstable = MQResultsTable();
resultstable.import_resultsTable(filename);
self.add_dataStage01MQResultsTable(resultstable.resultsTable);
def import_dataStage01MQResultsTable_update(self,filename):
'''table updates'''
data = base_importData();
data.read_csv(filename);
data.format_data();
self.update_dataStage01MQResultsTable(data.data);
data.clear_data();
def export_dataStage01MQResultsTable_csv(self):
pass;
def export_dataStage01MQResultsTable_metricPlot_js(self,experiment_id_I,sample_names_I=[],component_names_I=[],measurement_I='calculated_concentration',data_dir_I="tmp"):
''' export a metric plot
INPUT:
experiment_id_I = experiment_id
sample_names_I = sample_names
component_names_I = component names
measurement_I = measurement to plot, supports calculated_concentration, height_ratio, area_ratio, height, area, rt'''
# get the data:
data_O = [];
cnt = 0;
#Query unique sample_name/component_name for the analysis
# get sample names
if sample_names_I:
sample_names = sample_names_I;
else:
sample_names = [];
sample_types = ['Quality Control','Unknown','Standard','Blank'];
for st in sample_types:
sample_names_tmp = [];
sample_names_tmp = self.get_sampleNames_experimentIDAndSampleType(experiment_id_I,st);
sample_names.extend(sample_names_tmp);
# create database table
for sn in sample_names:
# get component names
if component_names_I:
component_names = component_names_I;
else:
component_names = [];
component_names = self.get_componentsNames_experimentIDAndSampleName(experiment_id_I,sn);
for cn in component_names:
# get the row
rows = {};
rows = self.get_row_sampleNameAndComponentName(sn,cn);
if rows:
rows['acquisition_date_and_time'] = self.convert_datetime2string(rows['acquisition_date_and_time'])
rows['index_'] = cnt;
rows['experiment_id']=experiment_id_I;
data_O.append(rows);
cnt+=1;
# get the sample_names_I
# dump chart parameters to a js files
data1_keys = [
'experiment_id',
'sample_name',
'component_name',
measurement_I,
'acquisition_date_and_time',
'sample_type',
];
data1_nestkeys = ['component_name'];
data1_keymap = {'xdata':'index_',
'ydata':measurement_I,
'serieslabel':'component_name',
'featureslabel':'sample_name'};
parameters = {"chart1margin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 50 },
"chart1width":500,"chart1height":350,
"chart1title":"Metric Plot", "chart1x1axislabel":"sample_name","chart1y1axislabel":"measurement"}
# make the data object
dataobject_O = [{"data":data_O,"datakeys":data1_keys,"datanestkeys":data1_nestkeys},{"data":data_O,"datakeys":data1_keys,"datanestkeys":data1_nestkeys}];
# make the tile parameter objects
formtileparameters_O = {'tileheader':'Filter menu','tiletype':'html','tileid':"filtermenu1",'rowid':"row1",'colid':"col1",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-4"};
formparameters_O = {'htmlid':'filtermenuform1','htmltype':'form_01',"formsubmitbuttonidtext":{'id':'submit1','text':'submit'},"formresetbuttonidtext":{'id':'reset1','text':'reset'},"formupdatebuttonidtext":{'id':'update1','text':'update'}};
formtileparameters_O.update(formparameters_O);
svgparameters_O = {"svgtype":'scatterlineplot2d_01',"svgkeymap":[data1_keymap,data1_keymap],
'svgid':'svg1',
"svgmargin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 50 },
"svgwidth":500,"svgheight":350,
"svgx1axislabel":"sample_name","svgy1axislabel":"measurement_value",
'svgformtileid':'filtermenu1','svgresetbuttonid':'reset1','svgsubmitbuttonid':'submit1'};
svgtileparameters_O = {'tileheader':'Metric Plot','tiletype':'svg','tileid':"tile2",'rowid':"row1",'colid':"col2",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-8"};
svgtileparameters_O.update(svgparameters_O);
tableparameters_O = {"tabletype":'responsivetable_01',
'tableid':'table1',
"tablefilters":None,
"tableclass":"table table-condensed table-hover",
'tableformtileid':'filtermenu1','tableresetbuttonid':'reset1','tablesubmitbuttonid':'submit1'};
tabletileparameters_O = {'tileheader':'Metric plot','tiletype':'table','tileid':"tile3",'rowid':"row2",'colid':"col1",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-12"};
tabletileparameters_O.update(tableparameters_O);
parametersobject_O = [formtileparameters_O,svgtileparameters_O,tabletileparameters_O];
tile2datamap_O = {"filtermenu1":[0],"tile2":[0,1],"tile3":[0]};
# dump the data to a json file
ddtutilities = ddt_container(parameters_I = parametersobject_O,data_I = dataobject_O,tile2datamap_I = tile2datamap_O,filtermenu_I = None);
if data_dir_I=='tmp':
filename_str = self.settings['visualization_data'] + '/tmp/ddt_data.js'
elif data_dir_I=='data_json':
data_json_O = ddtutilities.get_allObjects_js();
return data_json_O;
with open(filename_str,'w') as file:
file.write(ddtutilities.get_allObjects());
def export_dataStage01MQResultsTable_js(
self,analysis_id_I,
features_I=[
'calculated_concentration',
'height_ratio',
'retention_time'],
include_IS_I=False,
data_dir_I="tmp"):
''' export a metric plot
INPUT:
experiment_id_I = experiment_id
sample_names_I = sample_names
component_names_I = component names
features_I = features to plot,
supports calculated_concentration, height_ratio,
area_ratio, height, area, retention_time
'''
#quantification_analysis_query = stage01_quantification_analysis_query(self.session,self.engine,self.settings)
# get the data:
data_O = [];
data_dict_O = {};
data_tmp = [];
data_tmp = self.getRowsJoin_analysisID_dataStage01QuantificationMQResultsTable(analysis_id_I);
# make a unique index for each component
component_names = list(set([d['component_name'] for d in data_tmp]));
component_names_dict = {d:0 for d in component_names};
for d in data_tmp:
tmp = {};
tmp['acquisition_date_and_time'] = self.convert_datetime2string(d['acquisition_date_and_time'])
tmp['sample_name'] = d['sample_name'];
tmp['sample_type'] = d['sample_type'];
#tmp['experiment_id'] = d['experiment_id'];
#tmp['analysis_id'] = d['analysis_id'];
tmp['analysis_id']=analysis_id_I;
tmp['component_name'] = d['component_name'];
tmp['component_group_name'] = d['component_group_name'];
tmp['component_index'] = component_names_dict[d['component_name']];
for m in features_I:
if not m in data_dict_O: data_dict_O[m]=[];
if m in d.keys() and not d[m] is None:
tmp1 = copy.copy(tmp);
tmp1['feature_name'] = m;
tmp1['feature_value'] = d[m];
data_dict_O[m].append(tmp1);
data_O.append(tmp1);
component_names_dict[d['component_name']]+=1;
# dump chart parameters to a js files
data1_keys = [
'analysis_id',
'sample_name',
'component_name',
'feature_name',
'acquisition_date_and_time',
'sample_type',
];
data1_nestkeys = ['component_name'];
data1_keymap = {'xdata':'component_index',
'ydata':'feature_value',
'serieslabel':'component_name',
'featureslabel':'sample_name'};
svgkeymap1 = [[data1_keymap,data1_keymap] for k in data_dict_O.keys()]
parameters = {"chart1margin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 50 },
"chart1width":500,"chart1height":350,
"chart1title":"Metric Plot", "chart1x1axislabel":"sample_name","chart1y1axislabel":"measurement"}
## make the data object
#dataobject_O = [{"data":data_O,"datakeys":data1_keys,"datanestkeys":data1_nestkeys},{"data":data_O,"datakeys":data1_keys,"datanestkeys":data1_nestkeys}];
## make the tile parameter objects
#formtileparameters_O = {'tileheader':'Filter menu','tiletype':'html','tileid':"filtermenu1",'rowid':"row1",'colid':"col1",
# 'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-4"};
#formparameters_O = {'htmlid':'filtermenuform1','htmltype':'form_01',"formsubmitbuttonidtext":{'id':'submit1','text':'submit'},"formresetbuttonidtext":{'id':'reset1','text':'reset'},"formupdatebuttonidtext":{'id':'update1','text':'update'}};
#formtileparameters_O.update(formparameters_O);
#svgparameters_O = {"svgtype":'scatterlineplot2d_01',"svgkeymap":[data1_keymap,data1_keymap],
# 'svgid':'svg1',
# "svgmargin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 50 },
# "svgwidth":500,"svgheight":350,
# "svgx1axislabel":"sample_name","svgy1axislabel":"measurement_value",
# 'svgformtileid':'filtermenu1','svgresetbuttonid':'reset1','svgsubmitbuttonid':'submit1'};
#svgtileparameters_O = {'tileheader':'Metric Plot','tiletype':'svg','tileid':"tile2",'rowid':"row1",'colid':"col2",
# 'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-8"};
#svgtileparameters_O.update(svgparameters_O);
#tableparameters_O = {"tabletype":'responsivetable_01',
# 'tableid':'table1',
# "tablefilters":None,
# "tableclass":"table table-condensed table-hover",
# 'tableformtileid':'filtermenu1','tableresetbuttonid':'reset1','tablesubmitbuttonid':'submit1'};
#tabletileparameters_O = {'tileheader':'Metric plot','tiletype':'table','tileid':"tile3",'rowid':"row2",'colid':"col1",
# 'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-12"};
#tabletileparameters_O.update(tableparameters_O);
#parametersobject_O = [formtileparameters_O,svgtileparameters_O,tabletileparameters_O];
#tile2datamap_O = {"filtermenu1":[0],"tile2":[0,1],"tile3":[0]};
## dump the data to a json file
#ddtutilities = ddt_container(parameters_I = parametersobject_O,data_I = dataobject_O,tile2datamap_I = tile2datamap_O,filtermenu_I = None);
#if data_dir_I=='tmp':
# filename_str = self.settings['visualization_data'] + '/tmp/ddt_data.js'
#elif data_dir_I=='data_json':
# data_json_O = ddtutilities.get_allObjects_js();
# return data_json_O;
#with open(filename_str,'w') as file:
# file.write(ddtutilities.get_allObjects());
nsvgtable = ddt_container_filterMenuAndChart2dAndTable();
nsvgtable.make_filterMenuAndChart2dAndTable(
data_filtermenu=data_O,
data_filtermenu_keys=data1_keys,
data_filtermenu_nestkeys=data1_nestkeys,
data_filtermenu_keymap=data1_keymap,
data_svg_keys=None,
data_svg_nestkeys=None,
data_svg_keymap=None,
data_table_keys=None,
data_table_nestkeys=None,
data_table_keymap=None,
data_svg=data_dict_O,
data_table=None,
#svgtype='scatterlineplot2d_01',
svgtype='scatterplot2d_01',
tabletype='responsivetable_01',
svgx1axislabel='',
svgy1axislabel='',
tablekeymap = [data1_keymap],
svgkeymap = [], #calculated on the fly
formtile2datamap=[0],
tabletile2datamap=[0],
svgtile2datamap=[], #calculated on the fly
svgfilters=None,
svgtileheader='Metric Plot',
tablefilters=None,
tableheaders=None,
svgparameters_I = parameters
);
if data_dir_I=='tmp':
filename_str = self.settings['visualization_data'] + '/tmp/ddt_data.js'
elif data_dir_I=='data_json':
data_json_O = nsvgtable.get_allObjects_js();
return data_json_O;
with open(filename_str,'w') as file:
file.write(nsvgtable.get_allObjects()); | StarcoderdataPython |
4831133 | # Generated by Django 3.1.6 on 2021-03-27 20:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('information_sharing', '0004_auto_20210327_2105'),
]
operations = [
migrations.CreateModel(
name='Discoverymethod',
fields=[
('discovery_method_id', models.AutoField(db_column='DiscoveryMethodID', primary_key=True, serialize=False)),
('discovery_method', models.CharField(db_column='DiscoveryMethod', max_length=128)),
('modify_time', models.DateTimeField(auto_now=True, db_column='ModifyTime', null=True)),
],
options={
'verbose_name': 'Discovery Method',
'verbose_name_plural': 'Discovery Methods',
'db_table': 'DiscoveryMethod',
'managed': False,
},
),
]
| StarcoderdataPython |
3320314 | """
Script to get fragment size distribution
@author: <NAME>
"""
##### IMPORT MODULES #####
# import necessary for python
import os
#import matplotlib as mpl
#mpl.use('PS')
import matplotlib.pyplot as plt
from pyatac.fragmentsizes import FragmentSizes
from pyatac.chunk import ChunkList
def get_sizes(args):
"""function to get fragment sizes
"""
if args.out is None:
args.out = '.'.join(os.path.basename(args.bam).split('.')[0:-1])
sizes = FragmentSizes(lower = args.lower, upper = args.upper, atac = args.atac)
if args.bed:
chunks = ChunkList.read(args.bed)
chunks.merge()
sizes.calculateSizes(args.bam, chunks)
else:
sizes.calculateSizes(args.bam)
sizes.save(args.out+'.fragmentsizes.txt')
if not args.no_plot:
#make figure
fig = plt.figure()
plt.plot(list(range(sizes.lower,sizes.upper)),sizes.get(sizes.lower,sizes.upper),label = args.out)
plt.xlabel("Fragment Size")
plt.ylabel("Frequency")
fig.savefig(args.out+'.fragmentsizes.pdf')
plt.close(fig)
| StarcoderdataPython |
1788714 | <reponame>djfkahn/MemberHubDirectoryTools
import unittest
from unittest.mock import patch
import os
import family
import hub_map_tools
import roster
import person
data_file_path = os.path.abspath("./family_tests/")
hub_file_name = data_file_path + "/hub_map.csv"
common_hub_map = hub_map_tools.ReadHubMapFromFile(hub_file_name)
with patch('builtins.input', side_effect=['y']):
common_RosterC = roster.Roster()
class UT_01_AddAdultsFromCombinedField(unittest.TestCase):
def test_01_two_parents(self):
result = family.RosterFamily(adults_raw_name='A and B C')
result.AddAdultsFromCombinedField('<NAME>', 'A and B C', common_hub_map, common_RosterC)
self.assertEqual(2, len(result.adults))
self.assertEqual('A', result.adults[0].first_name)
self.assertEqual('C', result.adults[0].last_name)
self.assertEqual(['0000'],result.adults[0].hubs)
self.assertEqual('Adult',result.adults[0].family_relation)
self.assertEqual('B', result.adults[1].first_name)
self.assertEqual('C', result.adults[1].last_name)
self.assertEqual(['0000'],result.adults[1].hubs)
self.assertEqual('Adult2',result.adults[1].family_relation)
self.assertEqual(0, len(result.children))
def test_02_one_parent(self):
result = family.RosterFamily(adults_raw_name='A C')
result.AddAdultsFromCombinedField('<NAME>', 'A C', common_hub_map, common_RosterC)
self.assertEqual(1, len(result.adults))
self.assertEqual('A', result.adults[0].first_name)
self.assertEqual('C', result.adults[0].last_name)
self.assertEqual(['1111'],result.adults[0].hubs)
self.assertEqual('Adult',result.adults[0].family_relation)
self.assertEqual(0, len(result.children))
class UT_02_Roster_AddToFamily(unittest.TestCase):
def test_01_two_parents(self):
result = family.RosterFamily(adults_raw_name='A and B C')
result.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '0',
adult_names = 'A and B C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertEqual(2, len(result.adults))
self.assertEqual('A', result.adults[0].first_name)
self.assertEqual('C', result.adults[0].last_name)
self.assertEqual(['0000'],result.adults[0].hubs)
self.assertEqual('Adult',result.adults[0].family_relation)
self.assertEqual('B', result.adults[1].first_name)
self.assertEqual('C', result.adults[1].last_name)
self.assertEqual(['0000'],result.adults[1].hubs)
self.assertEqual('Adult2',result.adults[1].family_relation)
self.assertEqual(1, len(result.children))
self.assertEqual('D', result.children[0].first_name)
self.assertEqual('C', result.children[0].last_name)
self.assertEqual(['0000'],result.children[0].hubs)
self.assertEqual('Child1',result.children[0].family_relation)
def test_02_one_parent(self):
result = family.RosterFamily(adults_raw_name='A C')
result.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '0',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertEqual(1, len(result.adults))
self.assertEqual('A', result.adults[0].first_name)
self.assertEqual('C', result.adults[0].last_name)
self.assertEqual(['1111'],result.adults[0].hubs)
self.assertEqual('Adult',result.adults[0].family_relation)
self.assertEqual(1, len(result.children))
self.assertEqual('D', result.children[0].first_name)
self.assertEqual('C', result.children[0].last_name)
self.assertEqual(['1111'],result.children[0].hubs)
self.assertEqual('Child1',result.children[0].family_relation)
def test_03_6th_grader(self):
result = family.RosterFamily(adults_raw_name='A C')
result.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '6',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertEqual(1, len(result.adults))
self.assertEqual('A', result.adults[0].first_name)
self.assertEqual('C', result.adults[0].last_name)
self.assertEqual(['6666'],result.adults[0].hubs)
self.assertEqual('Adult',result.adults[0].family_relation)
self.assertEqual(1, len(result.children))
self.assertEqual('D', result.children[0].first_name)
self.assertEqual('C', result.children[0].last_name)
self.assertEqual(['6666'],result.children[0].hubs)
self.assertEqual('Child1',result.children[0].family_relation)
def test_04_8th_grader(self):
result = family.RosterFamily(adults_raw_name='A C')
result.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '8',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertEqual(1, len(result.adults))
self.assertEqual('A', result.adults[0].first_name)
self.assertEqual('C', result.adults[0].last_name)
self.assertEqual(['8888'],result.adults[0].hubs)
self.assertEqual('Adult',result.adults[0].family_relation)
self.assertEqual(1, len(result.children))
self.assertEqual('D', result.children[0].first_name)
self.assertEqual('C', result.children[0].last_name)
self.assertEqual(['8888'],result.children[0].hubs)
self.assertEqual('Child1',result.children[0].family_relation)
def test_05_9th_grader(self):
result = family.RosterFamily(adults_raw_name='A C')
result.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '9',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertEqual(0, len(result.adults))
self.assertEqual(0, len(result.children))
def test_06_Unknown_Teacher(self):
result = family.RosterFamily(adults_raw_name='A C')
result.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '5',
adult_names = 'A C',
teacher_name = 'Unknown Teacher',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertEqual(0, len(result.adults))
self.assertEqual(0, len(result.children))
class UT_03_Directory_AddToFamily(unittest.TestCase):
def test_01_adult_input(self):
result = family.DirectoryFamily('5678')
result.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
self.assertEqual(1, len(result.adults))
self.assertEqual('A', result.adults[0].first_name)
self.assertEqual('C', result.adults[0].last_name)
self.assertEqual('1234', result.adults[0].person_id)
self.assertEqual('5678', result.adults[0].family_id)
self.assertEqual('Adult',result.adults[0].family_relation)
self.assertEqual('email',result.adults[0].email)
self.assertEqual(['0000'],result.adults[0].hubs)
self.assertEqual(0, len(result.children))
def test_02_child_input(self):
result = family.DirectoryFamily('5678')
result.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
self.assertEqual(0, len(result.adults))
self.assertEqual(1, len(result.children))
def test_03_adult_lower_input(self):
result = family.DirectoryFamily('5678')
result.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
self.assertEqual(1, len(result.adults))
self.assertEqual(0, len(result.children))
def test_04_child_lower_input(self):
result = family.DirectoryFamily('5678')
result.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
self.assertEqual(0, len(result.adults))
self.assertEqual(1, len(result.children))
def test_05_other_input(self):
result = family.DirectoryFamily('5678')
result.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Other',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
self.assertEqual(0, len(result.adults))
self.assertEqual(0, len(result.children))
class UT_04_IsSameFamily(unittest.TestCase):
def test_01_same_family(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='A C')
that.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '0',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertTrue(this.IsSameFamily(that))
def test_02_same_adult_different_child(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='A C')
that.AddToFamily(child_first = 'E',
child_last = 'C',
grade = '0',
adult_names = '<NAME>',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertTrue(this.IsSameFamily(that))
def test_03_directory_orphan(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='A C')
that.AddToFamily(child_first = 'E',
child_last = 'C',
grade = '0',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertFalse(this.IsSameFamily(that))
def test_04_roster_orphan(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name=' ')
that.AddToFamily(child_first = 'E',
child_last = 'C',
grade = '0',
adult_names = ' ',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertFalse(this.IsSameFamily(that))
def test_05_different_adult_same_child(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='E C')
that.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '0',
adult_names = 'E C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertFalse(this.IsSameFamily(that))
def test_06_more_adults_in_directory(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1236',
last_name = 'C',
first_name = 'B',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='A C')
that.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '0',
adult_names = '<NAME>',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertTrue(this.IsSameFamily(that))
def test_07_more_adults_in_roster(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='A C')
that.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '0',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertTrue(this.IsSameFamily(that))
class UT_05_HasNewChildren(unittest.TestCase):
def test_01_same_family(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='A C')
that.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '0',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertFalse(this.HasNewChildren(that))
def test_02_same_adult_different_child(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='A C')
that.AddToFamily(child_first = 'E',
child_last = 'C',
grade = '0',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertTrue(this.HasNewChildren(that))
def test_03_directory_orphan(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='A C')
that.AddToFamily(child_first = 'E',
child_last = 'C',
grade = '0',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertTrue(this.HasNewChildren(that))
def test_04_roster_orphan(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='A C')
that.AddToFamily(child_first = 'E',
child_last = 'C',
grade = '0',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertTrue(this.HasNewChildren(that))
def test_05_different_adult_same_child(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='A C')
that.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '0',
adult_names = '<NAME>',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertFalse(this.HasNewChildren(that))
def test_06_more_adults_in_directory(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1236',
last_name = 'C',
first_name = 'B',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='A C')
that.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '0',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertFalse(this.HasNewChildren(that))
def test_07_more_adults_in_roster(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='A C')
that.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '0',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
self.assertFalse(this.HasNewChildren(that))
class UT_06_FormFamilyWithNewChildren(unittest.TestCase):
def test_01_family_with_new_child(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='A C')
that.AddToFamily(child_first = 'E',
child_last = 'C',
grade = '0',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
result = family.Family()
result.FormFamilyWithNewChildren(this, that)
self.assertEqual(result.adults, this.adults)
self.assertEqual(1,len(result.children))
self.assertEqual('E',result.children[0].first_name)
self.assertEqual('C',result.children[0].last_name)
def test_02_family_without_new_child(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='A C')
that.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '0',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
result = family.Family()
result.FormFamilyWithNewChildren(this, that)
self.assertEqual(result.adults, this.adults)
self.assertEqual(0,len(result.children))
class UT_07_CombineWith(unittest.TestCase):
def test_01_add_new_child(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='A C')
that.AddToFamily(child_first = 'E',
child_last = 'C',
grade = '0',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
this.CombineWith(that)
self.assertEqual(2,len(this.children))
self.assertEqual('D',this.children[0].first_name)
self.assertEqual('C',this.children[0].last_name)
self.assertEqual('E',this.children[1].first_name)
self.assertEqual('C',this.children[1].last_name)
def test_02_existing_child(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='A C')
that.AddToFamily(child_first = 'D',
child_last = 'C',
grade = '0',
adult_names = 'A C',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
this.CombineWith(that)
self.assertEqual(1,len(this.children))
self.assertEqual('D',this.children[0].first_name)
self.assertEqual('C',this.children[0].last_name)
def test_03_different_family(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'B',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
that = family.RosterFamily(adults_raw_name='A D')
that.AddToFamily(child_first = 'E',
child_last = 'C',
grade = '0',
adult_names = 'A D',
teacher_name = '<NAME>',
hub_map = common_hub_map,
rosterC = common_RosterC)
this.CombineWith(that)
self.assertEqual(1,len(this.children))
self.assertEqual('B',this.children[0].first_name)
self.assertEqual('C',this.children[0].last_name)
class UT_08_IsChildless(unittest.TestCase):
def test_01_parent_and_chlid(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
self.assertFalse(this.IsChildless())
def test_02_parent_no_chlid(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
self.assertTrue(this.IsChildless())
def test_03_teacher(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Teachers'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
self.assertFalse(this.IsChildless())
def test_04_Staff(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Staff'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
self.assertFalse(this.IsChildless())
def test_05_volunteer(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Volunteers'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
self.assertFalse(this.IsChildless())
class UT_09_IsOrphan(unittest.TestCase):
def test_01_child_and_parent(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
self.assertFalse(this.IsOrphan())
def test_02_child_no_parent(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
self.assertTrue(this.IsOrphan())
class UT_10_FindAdultInFamily(unittest.TestCase):
def test_01_one_adult_one_match(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
to_find = person.RosterPerson('C', 'A', 'Adult', '<NAME>', common_hub_map)
self.assertIsNotNone(this.FindAdultInFamily(to_find))
def test_02_two_adult_one_match(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
to_find = person.RosterPerson('C', 'A', 'Adult', '<NAME>', common_hub_map)
self.assertIsNotNone(this.FindAdultInFamily(to_find))
def test_03_three_adult_one_match(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1236',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
to_find = person.RosterPerson('C', 'A', 'Adult', '<NAME>', common_hub_map)
self.assertIsNotNone(this.FindAdultInFamily(to_find))
def test_04_two_adult_no_match(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
to_find = person.RosterPerson('C', 'E', 'Adult', '<NAME>', common_hub_map)
self.assertIsNone(this.FindAdultInFamily(to_find))
def test_05_no_adult(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
to_find = person.RosterPerson('C', 'A', 'Adult', '<NAME>', common_hub_map)
self.assertIsNone(this.FindAdultInFamily(to_find))
class UT_11_FindChildInFamily(unittest.TestCase):
def test_01_one_child_one_match(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
to_find = person.RosterPerson('C', 'A', 'Child', '<NAME>', common_hub_map)
self.assertIsNotNone(this.FindChildInFamily(to_find))
def test_02_two_child_one_match(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'B',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
to_find = person.RosterPerson('C', 'A', 'Child', '<NAME>', common_hub_map)
self.assertIsNotNone(this.FindChildInFamily(to_find))
def test_03_three_child_one_match(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'B',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1236',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
to_find = person.RosterPerson('C', 'A', 'Child', '<NAME>', common_hub_map)
self.assertIsNotNone(this.FindChildInFamily(to_find))
def test_04_three_child_no_match(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'B',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1236',
last_name = 'C',
first_name = 'D',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Child',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
to_find = person.RosterPerson('C', 'E', 'Child', '<NAME>', common_hub_map)
self.assertIsNone(this.FindChildInFamily(to_find))
def test_05_no_child(self):
this = family.DirectoryFamily('5678')
this.AddToFamily(person_id = '1234',
last_name = 'C',
first_name = 'A',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
this.AddToFamily(person_id = '1235',
last_name = 'C',
first_name = 'B',
middle_name = '',
suffix = '',
email = 'email',
family_id = '5678',
family_relation = 'Adult',
hub_name_list = 'Kinder (Room 0)'.split(';'),
account_created = '',
account_updated = '',
hub_map = common_hub_map)
to_find = person.RosterPerson('C', 'A', 'Child', '<NAME>', common_hub_map)
self.assertIsNone(this.FindChildInFamily(to_find))
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
3354940 | import datetime
import great_expectations as ge
import great_expectations.jupyter_ux
from great_expectations.checkpoint import LegacyCheckpoint
from great_expectations.data_context.types.resource_identifiers import ValidationResultIdentifier
context = ge.data_context.DataContext()
# Feel free to change the name of your suite here. Renaming this will not
# remove the other one.
expectation_suite_name = "taxi.demo"
suite = context.get_expectation_suite(expectation_suite_name)
suite.expectations = []
batch_kwargs = {'path': '/Users/kevinkho/Work/demos/conferences/dremio-subsurface/great_expectations/.././test-file.csv', 'datasource': '.__dir', 'data_asset_name': 'test-file'}
batch = context.get_batch(batch_kwargs, suite)
batch.save_expectation_suite(discard_failed_expectations=False)
results = LegacyCheckpoint(
name="_temp_checkpoint",
data_context=context,
batches=[
{
"batch_kwargs": batch_kwargs,
"expectation_suite_names": [expectation_suite_name]
}
]
).run()
run_info_at_end = True
validation_results_page_renderer = (
ge.render.renderer.ValidationResultsPageRenderer(
run_info_at_end=run_info_at_end
)
)
rendered_document_content_list = (
validation_results_page_renderer.render_validation_operator_result(
validation_operator_result=results
)
)
markdown_artifact = " ".join(
ge.render.view.DefaultMarkdownPageView().render(
rendered_document_content_list
)
) | StarcoderdataPython |
3202311 | <filename>research/multi_representation_adversary/multi_representation_adversary/trainer.py
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training loop for adversarial training in multiple representation spaces."""
from absl import logging
import gin
from multi_representation_adversary import data
from multi_representation_adversary import helper
from multi_representation_adversary import resnet
from multi_representation_adversary import selectors
import tensorflow.compat.v2 as tf
@gin.configurable
def learning_rate_scheduler(epoch, values=(0.1, 0.01, 0.001),
breakpoints=(100, 150)):
"""Piecewise constant schedule for learning rate."""
idx = sum(1 if epoch > b else 0 for b in breakpoints)
return values[idx]
@gin.configurable
def train(ckpt_dir=None,
summary_dir=None,
epochs=200,
steps_per_epoch=351, # 45000 / 128 for CIFAR-10
global_batch_size=128,
model_fn=resnet.build_resnet_v1,
lr_scheduler=learning_rate_scheduler,
representation_list=(("identity", "none"),)):
"""Train a model with adversarial training in multiple representation spaces.
Args:
ckpt_dir: The directory to store model checkpoints.
summary_dir: The directory to store training summaries.
epochs: Maximum number of epochs to train for.
steps_per_epoch: Number of training steps in each epoch.
global_batch_size: Batch size across all processors/accelerators for each
training step.
model_fn: A callable which builds the model structure.
lr_scheduler: A callable which returns the learning rate at any given epoch.
representation_list: A list of (transform, attack) tuples representing the
adversaries that this model should consider.
"""
# Set up distributed training strategy first because all variables (model,
# optimizer, etc) have to be created in the strategy's scope.
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
model = model_fn(return_logits=True) # Other params are set in gin
optimizer = tf.keras.optimizers.SGD(learning_rate=lr_scheduler(0),
momentum=0.9)
loss_obj = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE)
def loss_fn(label, logit):
# Normalize by global_batch_size, which is different from usual
# (per-replica) batch size in a distributed training environment.
return tf.nn.compute_average_loss(loss_obj(label, logit),
global_batch_size=global_batch_size)
metrics = [
tf.keras.metrics.SparseCategoricalCrossentropy("loss",
from_logits=True),
tf.keras.metrics.SparseCategoricalAccuracy("accuracy")]
# Compile a tf.function for training and eval (validation) steps for each
# (transform, attack) tuple.
representation_names = []
train_step_fns, eval_step_fns = [], []
for transform_name, attack_name in representation_list:
representation_names.append(f"{transform_name}_{attack_name}")
attack_fn = helper.build_attack_fn(model, transform_name, attack_name)
train_step_fns.append(helper.build_train_step_fn(
model, optimizer, loss_fn, metrics, attack_fn))
eval_step_fns.append(helper.build_eval_step_fn(model, metrics, attack_fn))
selector = selectors.construct_representation_selector(representation_names)
# Create checkpoint object for saving model weights and selector state.
checkpoint = tf.train.Checkpoint(model=model, selector=selector)
ckpt_mgr = tf.train.CheckpointManager(checkpoint, ckpt_dir,
max_to_keep=None)
restored_path = ckpt_mgr.restore_or_initialize()
if restored_path:
logging.info("Restored checkpoint %s", restored_path)
start_epoch = int(restored_path.rsplit("-", 1)[-1]) # path like "ckpt-N"
total_steps = start_epoch * steps_per_epoch
else:
logging.info("Model initialized")
start_epoch, total_steps = 0, 0
ckpt_mgr.save(0)
train_dataset = data.get_training_dataset(global_batch_size)
valid_dataset = data.get_validation_dataset(global_batch_size)
with tf.summary.create_file_writer(summary_dir).as_default():
for epoch in range(start_epoch + 1, epochs + 1):
logging.info("Epoch %d", epoch)
# Learning rate decay
if lr_scheduler(epoch) != optimizer.learning_rate:
optimizer.learning_rate = lr_scheduler(epoch)
logging.info("New learning rate: %g", optimizer.learning_rate)
# Training
dist_dataset = strategy.experimental_distribute_dataset(
train_dataset.take(steps_per_epoch))
for x, y in dist_dataset:
selected_idx = selector.select(total_steps)
train_step_fn = train_step_fns[selected_idx]
per_replica_loss = strategy.run(train_step_fn, args=(x, y))
loss_value = strategy.reduce(tf.distribute.ReduceOp.SUM,
per_replica_loss, axis=None)
if total_steps % 50 == 0:
tf.summary.scalar("train/batch_loss", loss_value, step=total_steps)
total_steps += 1
for metric in metrics:
tf.summary.scalar(f"train/{metric.name}", metric.result(), step=epoch)
metric.reset_states()
# Maybe update the selector's state
if selector.should_update(epoch):
logging.info("Evaluate on validation set and update selector state")
validation_losses = []
dist_val_dataset = strategy.experimental_distribute_dataset(
valid_dataset)
for i, eval_step_fn in enumerate(eval_step_fns):
for x, y in dist_val_dataset:
strategy.run(eval_step_fn, args=(x, y))
validation_losses.append(metrics[0].result()) # Crossentropy loss
for metric in metrics:
name = f"validation/{metric.name}/{representation_names[i]}"
tf.summary.scalar(name, metric.result(), step=epoch)
metric.reset_states()
selector.update(epoch, validation_losses)
# Save a checkpoint
ckpt_mgr.save(epoch)
| StarcoderdataPython |
3349706 | import nextcord
async def option_management(game, player_list):
def check(msg):
return msg.author == game.mj
while True:
em = nextcord.Embed(title="Option", description="```diff\noption:\n{}```".format(
"\n".join("{} {} : {}".format('+' if j else '-', i, repr(j))
for i, j in game.option.items())))
em.set_footer(text="\"OK\" pour valider, \"{option}={valeur}\" pour modifier")
notif = await game.channel.send(embed=em)
message = await game.client.wait_for('message', check=check)
await message.delete()
if message.content == 'OK':
em.set_footer(text="Options verrouillées")
await notif.edit(embed=em)
break
av = message.content.split('=')
if len(av) != 2:
await game.channel.send("Syntaxe invalide\nAttendu: ``option=valeur``", delete_after=10)
await notif.delete()
continue
if av[0] not in game.option:
await game.channel.send("Syntaxe invalide\nL'option demandé n'existe pas``", delete_after=10)
await notif.delete()
continue
value = av[1]
if value.isdigit(): value = int(value)
elif value.lower() == 'true': value = True
elif value.lower() == 'false': value = False
game.option[av[0]] = value
await game.channel.send("Option modifiée o7", delete_after=10)
await notif.delete()
if game.option['death_channel_id']:
game.death_channel = game.client.get_channel(game.option['death_channel_id']) | StarcoderdataPython |
1632015 | <filename>server.py<gh_stars>0
from object import Object
OK = 1
NOK = 0
class Server():
def __init__(self, name):
self.name = name
self.counter = 0
self.objects = []
def sync_to_client(self, counter):
objects = [o for o in self.objects if o.lastupdate_counter > counter]
result = {
'statuscode': OK,
'servercounter': self.counter,
'objects': objects
}
return result
def sync_from_client(self, objects):
result = {}
for sob in objects:
exists = False
conflict = False
for o in self.objects:
if o.guid == sob.guid:
exists = True
o.value = sob.value
o.deleted = sob.deleted
self.counter += 1
o.lastupdate_counter = self.counter
elif o.pk == sob.pk:
# pk conflict: do nothing because this should not occur on server
# client will always sync from server to client first and handle
# any pk conflicts before syncing from client to server
exists = True
conflict = True
result['statuscode'] = NOK
if not exists:
no = Object(pk=sob.pk, name=sob.name, value=sob.value, guid=sob.guid)
no.deleted = sob.deleted
self.counter += 1
no.lastupdate_counter = self.counter
self.objects.append(no)
if 'statuscode' not in result:
result['statuscode'] = OK
result['servercounter'] = self.counter
return result
def add_object(self, pk, name, value):
''' Create object on server (do not use this function to add
object from a client sync '''
obj = Object(pk=pk, name=name, value=value)
self.counter += 1
obj.lastupdate_counter = self.counter
self.objects.append(obj)
def update_object(self, pk, value):
''' Update object on server (do not use this function to update
object from a client sync '''
for o in self.objects:
if o.pk == pk:
o.update(value)
self.counter += 1
o.lastupdate_counter = self.counter
def delete_object(self, pk):
''' Delete object on server (do not use this function to delete
object from a client sync '''
for o in self.objects:
if o.pk == pk:
o.delete()
self.counter += 1
o.lastupdate_counter = self.counter
def display(self):
text = []
text.append(" State of server: " + self.name + " - ")
text.append("Counter: " + str(self.counter) + "\n")
self.debug_output("".join(text), self.objects)
def debug_output(self, text, objects=None):
print "{}\n".format(text)
for o in self.objects:
o.display()
print " -----------------------------------"
print ""
| StarcoderdataPython |
3249565 | <filename>quadpy/sphere/stroud.py
# -*- coding: utf-8 -*-
#
import numpy
import sympy
from .helpers import cartesian_to_spherical
from .albrecht_collatz import AlbrechtCollatz
from .mclaren import McLaren
from ..nsphere.stroud1969 import Stroud1969
class Stroud(object):
"""
<NAME>,
Approximate Calculation of Multiple Integrals,
Prentice Hall, 1971.
"""
def __init__(self, index, symbolic=False):
self.name = "Stroud_U3({})".format(index)
if index == "U3 3-1":
self.set_data(McLaren(1, symbolic=symbolic))
elif index == "U3 5-1":
self.set_data(AlbrechtCollatz(1, symbolic=symbolic))
elif index == "U3 5-2":
self.set_data(AlbrechtCollatz(2, symbolic=symbolic))
elif index == "U3 5-3":
self.set_data(AlbrechtCollatz(3, symbolic=symbolic))
elif index == "U3 5-4":
self.set_data(AlbrechtCollatz(4, symbolic=symbolic))
elif index == "U3 5-5":
self.set_data(McLaren(2, symbolic=symbolic))
elif index == "U3 7-1":
self.set_data(McLaren(3, symbolic=symbolic))
elif index == "U3 7-2":
self.set_data(AlbrechtCollatz(5, symbolic=symbolic))
elif index == "U3 8-1":
self.set_data(McLaren(4, symbolic=symbolic))
elif index == "U3 9-1":
self.set_data(McLaren(5, symbolic=symbolic))
elif index == "U3 9-2":
self.set_data(McLaren(6, symbolic=symbolic))
elif index == "U3 9-3":
self.set_data(McLaren(7, symbolic=symbolic))
elif index == "U3 11-1":
self.set_data(McLaren(8, symbolic=symbolic))
elif index == "U3 11-2":
scheme = Stroud1969(3, symbolic=symbolic)
self.degree = scheme.degree
self.weights = scheme.weights
pi = sympy.pi if symbolic else numpy.pi
self.weights /= 4 * pi
self.points = scheme.points
self.azimuthal_polar = cartesian_to_spherical(self.points)
elif index == "U3 11-3":
self.set_data(McLaren(9, symbolic=symbolic))
else:
assert index == "U3 14-1", "Illegal index {}.".format(index)
self.set_data(McLaren(10, symbolic=symbolic))
return
def set_data(self, scheme):
self.degree = scheme.degree
self.weights = scheme.weights
self.points = scheme.points
self.azimuthal_polar = scheme.azimuthal_polar
return
| StarcoderdataPython |
1686014 | <filename>callnumber_app/tests.py
# -*- coding: utf-8 -*-
import logging, pprint
from django.test import TestCase
log = logging.getLogger(__name__)
TestCase.maxDiff = None
class ClientTest( TestCase ):
""" Tests views via Client. """
def test_callnumber_response(self):
""" Checks two submitted callnumbers. """
response = self.client.get( '/v2/', { 'callnumber': 'TP1085,PJ 1001'} )
log.debug( 'response.__dict__, ```%s```' % pprint.pformat(response.__dict__) )
self.assertEqual( 200, response.status_code )
content = response.content.decode('utf-8')
self.assertTrue( '"normalized_call_number": "TP 1085"' in content )
self.assertTrue( '"normalized_call_number": "PJ 1001"' in content )
def test_problematic_callnumber_response(self):
""" Checks callnumber request that generated an error. """
response = self.client.get( '/v2/', { 'callnumber': 'BB .S7333 1777 5'} )
log.debug( 'response.__dict__, ```%s```' % pprint.pformat(response.__dict__) )
self.assertEqual( 200, response.status_code )
content = response.content.decode('utf-8')
self.assertTrue( '"normalized_call_number": "BB 0000000S7333 000 000 17775"' in content )
## end class ClientTest()
class RootUrlTest( TestCase ):
""" Checks root urls. """
def test_root_url_no_slash(self):
""" Checks '/root_url'. """
response = self.client.get( '' ) # project root part of url is assumed
self.assertEqual( 302, response.status_code ) # permanent redirect
redirect_url = response._headers['location'][1]
self.assertEqual( '/info/', redirect_url )
def test_root_url_slash(self):
""" Checks '/root_url/'. """
response = self.client.get( '/' ) # project root part of url is assumed
self.assertEqual( 302, response.status_code ) # permanent redirect
redirect_url = response._headers['location'][1]
self.assertEqual( '/info/', redirect_url )
# end class RootUrlTest()
## TODO:
## - test views_helper.DumpParamHandler.prep_points()
| StarcoderdataPython |
1767547 | <reponame>FarmVivi/kodihikvision
import xbmcaddon
def getSettingString(setting_id):
return xbmcaddon.Addon(id='plugin.farmvivi.hikvision').getSettingString(setting_id)
def getSettingInt(setting_id):
return xbmcaddon.Addon(id='plugin.farmvivi.hikvision').getSettingInt(setting_id)
| StarcoderdataPython |
6619 | #!/usr/bin/python
"""
Firewall for munkireport.
By Tuxudo
Will return all details about how the firewall is configured
"""
import subprocess
import os
import sys
import platform
import re
import plistlib
import json
sys.path.insert(0,'/usr/local/munki')
sys.path.insert(0, '/usr/local/munkireport')
from munkilib import FoundationPlist
def get_firewall_info():
'''Uses system profiler to get firewall info for the machine.'''
cmd = ['/usr/sbin/system_profiler', 'SPFirewallDataType', '-xml']
proc = subprocess.Popen(cmd, shell=False, bufsize=-1,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(output, unused_error) = proc.communicate()
try:
plist = plistlib.readPlistFromString(output)
# system_profiler xml is an array
firewall_dict = plist[0]
items = firewall_dict['_items']
return items
except Exception:
return {}
def flatten_firewall_info(array):
'''Un-nest firewall info, return array with objects with relevant keys'''
firewall = {}
for obj in array:
for item in obj:
if item == '_items':
out = out + flatten_firewall_info(obj['_items'])
elif item == 'spfirewall_services':
for service in obj[item]:
if obj[item][service] == "spfirewall_allow_all":
obj[item][service] = 1
else:
obj[item][service] = 0
firewall['services'] = json.dumps(obj[item])
elif item == 'spfirewall_applications':
for application in obj[item]:
if obj[item][application] == "spfirewall_allow_all":
obj[item][application] = 1
else:
obj[item][application] = 0
firewall['applications'] = json.dumps(obj[item])
return firewall
def get_alf_preferences():
pl = FoundationPlist.readPlist("/Library/Preferences/com.apple.alf.plist")
firewall = {}
for item in pl:
if item == 'allowdownloadsignedenabled':
firewall['allowdownloadsignedenabled'] = to_bool(pl[item])
elif item == 'allowsignedenabled':
firewall['allowsignedenabled'] = to_bool(pl[item])
elif item == 'firewallunload':
firewall['firewallunload'] = to_bool(pl[item])
elif item == 'globalstate':
firewall['globalstate'] = to_bool(pl[item])
elif item == 'stealthenabled':
firewall['stealthenabled'] = to_bool(pl[item])
elif item == 'loggingenabled':
firewall['loggingenabled'] = to_bool(pl[item])
elif item == 'loggingoption':
firewall['loggingoption'] = pl[item]
elif item == 'version':
firewall['version'] = pl[item]
return firewall
def to_bool(s):
if s == True:
return 1
else:
return 0
def merge_two_dicts(x, y):
z = x.copy()
z.update(y)
return z
def main():
"""Main"""
# Skip manual check
if len(sys.argv) > 1:
if sys.argv[1] == 'manualcheck':
print 'Manual check: skipping'
exit(0)
# Create cache dir if it does not exist
cachedir = '%s/cache' % os.path.dirname(os.path.realpath(__file__))
if not os.path.exists(cachedir):
os.makedirs(cachedir)
# Set the encoding
# The "ugly hack" :P
reload(sys)
sys.setdefaultencoding('utf8')
# Get results
result = dict()
info = get_firewall_info()
result = merge_two_dicts(flatten_firewall_info(info), get_alf_preferences())
# Write firewall results to cache
output_plist = os.path.join(cachedir, 'firewall.plist')
FoundationPlist.writePlist(result, output_plist)
#print FoundationPlist.writePlistToString(result)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3256977 | <filename>app/models.py
from . import login_manager
from . import database
from .exceptions import ValidationError
from datetime import datetime, timezone
from itsdangerous import BadHeader, SignatureExpired
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy import and_, not_
from flask import current_app, url_for
from flask_login import UserMixin, AnonymousUserMixin
from functools import partial
from werkzeug.security import generate_password_hash, check_password_hash
utc_now = partial(datetime.now, tz=timezone.utc)
@login_manager.user_loader
def load_user(user_id):
"""
Set up current user.
"""
return User.query.get(int(user_id))
def format_date(date):
"""
Return a string representation of the given date.
:param date: DateTime instance
:returns: string
"""
date_format = '%A, %B %d %Y %H:%M'
return date.strftime(date_format)
def add_test_users():
"""
Load data to the database
for testing purposes.
"""
database.create_all()
arthur = User(username='Arthur', email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
morgain = User(username='Morgain', email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
clair = User(username='Clair', email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
merlin = User(username='Merlin', email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
ophelia = User(username='Ophelia', email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
lancelot = User(username='Lancelot', email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
guinevere = User(username='Guinevere',
email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
uther = User(username='Uther',
email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
mordred = User(username='Mordred',
email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
percival = User(username='Percival',
email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
dinadan = User(username='Dinadan',
email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
gingalain = User(username='Gingalain',
email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
galahad = User(username='Galahad',
email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
pelleas = User(username='Pelleas',
email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
pellinore = User(username='Pellinore',
email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
tristan = User(username='Tristan',
email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
branor = User(username='Branor',
email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
accolon = User(username='Accolon',
email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
blanchefleur = User(username='Blanchefleur',
email='<EMAIL>ur',
password='<PASSWORD>', confirmed=True)
brangaine = User(username='Brangaine',
email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
cailia = User(username='Caelia',
email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
dindrane = User(username='Dindrane',
email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
enide = User(username='Enide',
email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
database.session.add_all([arthur, morgain, clair, merlin, ophelia,
lancelot, guinevere, mordred, percival,
dinadan, gingalain, galahad, pelleas,
pellinore, tristan, branor, accolon, cailia,
blanchefleur, brangaine, dindrane, enide,
uther])
database.session.commit()
# association table for many-to-many relationship
# between User model and Chat model
UserChatTable = database.Table(
'user_chat_link',
database.Column('user_id',
database.Integer,
database.ForeignKey('users.id',
ondelete="CASCADE"),
primary_key=True),
database.Column('chat_id',
database.Integer,
database.ForeignKey('chats.id',
ondelete="CASCADE"),
primary_key=True)
)
class RemovedChat(database.Model):
"""
Association table
to keep track of which users
mark which chats as removed.
"""
__tablename__ = 'removed_chats'
user_id = database.Column(database.Integer,
database.ForeignKey('users.id',
ondelete="CASCADE"),
primary_key=True)
chat_id = database.Column(database.Integer,
database.ForeignKey('chats.id',
ondelete="CASCADE"),
primary_key=True)
class Role(database.Model):
"""
Represents user role for managing
permission.
Static methods defined here:
insert_roles(roles=None)
"""
__tablename__ = 'roles'
id = database.Column(database.Integer, primary_key=True)
name = database.Column(database.String(64), unique=True)
is_default = database.Column(database.Boolean,
default=False,
index=True,
nullable=False)
permissions = database.Column(database.Integer)
users = database.relationship('User', backref='role', lazy='dynamic')
def __repr__(self):
return f'Role(id={self.id}, name={self.name}'
# TODO
# change admin permissions to the disjunction of all the available
# permissions
@staticmethod
def insert_roles(roles=None):
"""
Insert the given roles to the database.
Insert a default set of roles if
called with no parameters.
:param roles: dictionary of roles
in the form
{'role_name': {'permissions': int,
'is_default': bool}}
"""
if roles is None:
roles = {'Admin': {'permissions': 0xff,
'is_default': False},
'User': {'permissions': 0,
'is_default': True}
}
for role in roles:
new_role = Role.query.filter_by(name=role).first()
if new_role is None:
new_role = Role(name=role)
new_role.permissions = roles[role]['permissions']
new_role.is_default = roles[role]['is_default']
database.session.add(new_role)
database.session.commit()
class Contact(database.Model):
"""
Association table
representing many-to-many relationship
among User model instances.
"""
__tablename__ = 'contacts'
user_id = database.Column(database.Integer,
database.ForeignKey('users.id'),
primary_key=True)
contact_id = database.Column(database.Integer,
database.ForeignKey('users.id'),
primary_key=True)
contact_group = database.Column(database.String(16), nullable=True)
_date_created = database.Column(database.DateTime(timezone=True),
default=utc_now)
@hybrid_property
def date_created(self):
return self._date_created.astimezone(timezone.utc)
@date_created.expression
def date_created(self):
return self._date_created
@date_created.setter
def date_created(self, value):
self._data_created = value
class Chat(database.Model):
"""
Represents a chat,
which is defined as a collection of
users and messages.
Methods defined here:
to_json(user)
get_name(user)
add_users(users)
delete_users(users)
Static methods defined here:
from_json(json_object)
search_chats_query(chat_name, user)
Class methods defined here:
get_chat(users)
"""
__tablename__ = 'chats'
id = database.Column(database.Integer, primary_key=True)
name = database.Column(database.String(64))
is_group_chat = database.Column(database.Boolean, default=False)
_date_created = database.Column(database.DateTime(timezone=True),
default=utc_now)
_date_modified = database.Column(database.DateTime(timezone=True),
default=utc_now,
onupdate=utc_now)
removed_users = database.relationship('RemovedChat',
backref='chat',
lazy='dynamic',
cascade='all, delete-orphan')
@hybrid_property
def date_created(self):
return self._date_created.astimezone(timezone.utc)
@date_created.expression
def date_created(self):
return self._date_created
@date_created.setter
def date_created(self, value):
self._date_created = value
@hybrid_property
def date_modified(self):
return self._date_modified.astimezone(timezone.utc)
@date_modified.expression
def date_modified(self):
return self._date_modified
@date_modified.setter
def date_modified(self, value):
self._date_modified = value
def to_json(self, user):
"""
Return a JSON representation
of current chat.
:param user: current user (needed to get chat name)
:returns: Chat model instance turned to dictionary
"""
chat = {'chat_name': self.get_name(user),
'is_group_chat': self.is_group_chat,
'date_created': self.date_created,
'date_modified': self.date_modified,
'messages': url_for('api.get_messages',
chat_id=self.id,
_external=True)
}
return chat
def get_name(self, user):
"""
Return current chat's 'name' if present,
otherwise return 'username'
of the first user of current chat's 'users' attribute
which is not equal to the given user's username.
:param user: User model instance
:returns: string
"""
if self.name:
return self.name
recipient = (self
.users
.filter(User.username != user.username)
.first())
return recipient.username
def add_users(self, users):
"""
Add the given users to current chat.
:param users: sequence of User model instances
"""
for user in users:
if not user in self.users.all():
self.users.append(user)
database.session.commit()
def remove_users(self, users):
"""
Delete the given users from current chat.
:param users: sequence of User model instances
"""
for user in users:
self.users.remove(user)
database.session.commit()
@staticmethod
def from_json(json_chat, current_user):
"""
Return a Chat model instance
created from the given json_chat dictionary.
:param json_chat: dictionary
:param current_user: current user (needed to get chat name)
:returns: Message model instance
"""
chat = Chat()
chat_name = json_chat.get('chat_name')
usernames = json_chat.get('users')
users = User.query.filter(User.username.in_(usernames)).all()
if len(users) > 1:
chat.is_group_chat = True
if not chat_name:
raise ValidationError('Chat name or recipient name\
must be present.')
chat.add_users(users)
chat.add_users([current_user])
return chat
@staticmethod
def search_chats_query(chat_name, user):
"""
Return a query of chats
where each chat either:
- contains the given chat_name in 'name' column;
- has only two users ('is_group_chat' is False and 'name' is None),
and the user with 'username' not equal to (the given user).username
contains the given chat_name in 'username'.
:param chat_name: string to search for
:param user: user whose 'username' is excluded from search,
i.e. if
chat.users == [User(username='bob'),
User(username='arthur')]
and chat.name == None,
then search_chats('bob', User(username='bob'))
:returns: Chat model query
"""
subquery_current = (User
.query
.filter(User
.username == user.username)
.subquery())
subquery_pattern = (User
.query
.filter(User.username != user.username,
User
.username
.ilike('%' + chat_name + '%'))
.subquery())
subquery_current_chats = (database
.session
.query(UserChatTable.c.chat_id)
.join(subquery_current,
UserChatTable
.c
.user_id == subquery_current.c.id)
.subquery())
subquery_pattern_chats = (database
.session
.query(UserChatTable.c.chat_id)
.join(subquery_pattern,
UserChatTable
.c
.user_id == subquery_pattern.c.id)
.subquery())
chats = (database
.session
.query(Chat)
.join(subquery_current_chats,
Chat.id == subquery_current_chats.c.chat_id)
.join(subquery_pattern_chats,
subquery_current_chats
.c
.chat_id == subquery_pattern_chats.c.chat_id))
return (database
.session
.query(Chat)
.filter(Chat.users.contains(user),
Chat.name.ilike('%' + chat_name + '%'))
.union(chats))
@classmethod
def get_chat(cls, users):
"""
Return the chat of users in the given sequence.
:param user: sequence of User model instances
:returns: Chat model instance
"""
chat = cls.query
for user in users:
chat = chat.filter(cls.users.contains(user))
return chat.first()
class User(UserMixin, database.Model):
"""
User model.
Implements UserMixin,
used as a default authentication model by Flask-Login.
Methods defined here:
get_updated_chats(current_user, session)
get_chat_query(user_ids)
get_removed_query(chat_query=None)
get_removed_chats_query(user_ids)
mark_chats_as_removed(chats)
unmark_chats_as_removed(chats)
has_permission(permission)
verify_password(password)
generate_auth_token(expiration=3600)
generate_confirmation_token(expiration=3600)
confirm(token)
has_contact(user)
is_contacted_by(user)
add_contacts(users, contact_group=None)
delete_contacts(users)
get_other_users_query()
get_available_chats_query()
get_messages(chat)
get_unread_messages_query(chat)
search_users_query(username, users_query)
Static methods defined here:
verify_auth_token(token)
"""
__tablename__ = 'users'
id = database.Column(database.Integer, primary_key=True)
confirmed = database.Column(database.Boolean, default=False)
last_seen = database.Column(database.DateTime(timezone=True),
nullable=True)
role_id = database.Column(database.Integer,
database.ForeignKey('roles.id'))
_date_created = database.Column(database.DateTime(timezone=True),
nullable=False,
default=utc_now)
username = database.Column(database.String(64),
unique=True,
index=True,
nullable=False)
email = database.Column(database.String(64),
unique=True,
index=True,
nullable=False)
password_hash = database.Column(database.String(128), nullable=False)
contacts = database.relationship('Contact',
foreign_keys=[Contact.user_id],
backref=database.backref('user',
lazy='joined'),
lazy='dynamic',
cascade='all, delete-orphan')
contacted = database.relationship('Contact',
foreign_keys=[Contact.contact_id],
backref=database.backref('contact',
lazy='joined'),
lazy='dynamic',
cascade='all, delete-orphan')
messages_from = (database
.relationship('Message',
primaryjoin='User.id==Message.sender_id'))
messages_to = (database
.relationship('Message',
primaryjoin='User.id==Message.recipient_id'))
chats = database.relationship('Chat',
secondary=UserChatTable,
backref=database.backref('users',
lazy='dynamic'))
removed_chats = database.relationship('RemovedChat',
backref='user',
lazy='dynamic',
cascade='all, delete-orphan')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.role is None:
if self.email == current_app.config['ADMIN_MAIL']:
self.role = Role.query.filter_by(permissions = 0xff).first()
else:
self.role = Role.query.filter_by(is_default=True).first()
def __repr__(self):
return (f'User(id={self.id}, username={self.username}, '
+ f'date_created={self.date_created}, '
+ f'confirmed={self.confirmed})')
@hybrid_property
def date_created(self):
return self._date_created.astimezone(timezone.utc)
@date_created.expression
def date_created(self):
return self._date_created
@date_created.setter
def date_created(self, value):
self._date_created = value
@property
def is_admin(self):
"""
Check if current user has admin permissions.
:returns: True if current user has admin permission,
False otherwise
"""
return self.role and self.has_permission(Permission.ADMINISTRATION)
@property
def password(self):
raise AttributeError('Password is not a readable attribute')
@password.setter
def password(self, password):
"""
Assign the given password to current user.
:param password: string
"""
self.password_hash = generate_password_hash(password)
def get_updated_chats(self, current_user, session):
"""
Return information about the user's updated chats,
if there are any.
:param current_user: the user currently logged in
:param session: flask session
:returns: dictionary with the keys
'chats', 'current_chat_messages', 'current_username'
or None
"""
available_chats = self.get_available_chats_query().all()
chats = []
messages = []
for chat in available_chats:
unread_messages_query = self.get_unread_messages_query(chat)
count = unread_messages_query.count()
if count:
chats.append({'chat_id': str(chat.id),
'unread_messages_count': count,
'chat_name': chat.get_name(self)})
current_chat_id = session.get((current_user.id,
'current_chat_id'))
if current_chat_id == chat.id:
messages = (Message
.get_messages_list(unread_messages_query))
if chats:
data = {'chats': chats,
'current_chat_messages': messages,
'current_username': self.username}
return data
def get_chat_query(self, user_ids):
"""
Return a query of current user's chats
with users identified by the given user_ids.
:param user_ids: sequence of integers
:returns: Chat model query
"""
return (Chat
.query
.filter(Chat.users.contains(self))
.join(UserChatTable,
and_(UserChatTable.c.chat_id == Chat.id,
UserChatTable.c.user_id.in_(user_ids)
)
)
)
def get_removed_query(self, chat_query=None):
"""
Return RemovedChat query for currrent user
(based on the chat query, if given).
:param chat_query: Chat model query
:returns: RemovedChat model query
"""
if chat_query:
return (RemovedChat
.query
.filter(RemovedChat.user == self,
RemovedChat
.chat_id
.in_([chat.id
for chat
in chat_query.all()]
)
)
)
else:
return (RemovedChat
.query
.filter(RemovedChat.user==self))
def get_removed_chats_query(self, user_ids):
"""
Return a query of chats
with users having
the given user_ids
which are marked as removed by current user.
:param user_ids: sequence of integers
:returns: Chat model query
"""
chat_query = self.get_chat_query(user_ids)
removed_chat_query = self.get_removed_query(chat_query)
result = (chat_query
.join(removed_chat_query.subquery(),
Chat
.id
.in_([removed.chat_id
for removed
in removed_chat_query]
)
)
)
return result
def mark_chats_as_removed(self, chats):
"""
Add RemovedChat record
for each chat in the given chats.
:param chats: sequence of Chat model instances
"""
for chat in chats:
removed_chat = RemovedChat()
removed_chat.user = self
removed_chat.chat = chat
database.session.add(removed_chat)
database.session.commit()
def unmark_chats_as_removed(self, chats):
"""
Delete RemovedChat record
for each chat in the given chats.
:param chats: sequence of Chat model instances
"""
chat_ids = [chat.id for chat in chats]
removed_chats_query = (RemovedChat
.query
.filter(RemovedChat.chat_id.in_(chat_ids),
RemovedChat.user_id == self.id))
removed_chats_query.delete(synchronize_session='fetch')
def has_permission(self, permission):
"""
Check if current user has the given permission.
:param permission: integer representing permission
:returns: True if current user has a role
and the role has permission,
False otherwise
"""
return (self.role is not None
and (self.role.permissions & permission == permission))
def verify_password(self, password):
"""
Check if the given password matches current user's password.
:param password: string
:returns: True if password matches current user's password,
False otherwise
"""
return check_password_hash(self.password_hash, password)
def generate_auth_token(self, expiration=3600):
"""
Return an authentication token for current user.
:param expiration: Time in seconds after which token expires
:returns: TimedJSONWebSignature
"""
serializer = Serializer(current_app.config['SECRET_KEY'], expiration)
return serializer.dumps({'id': self.id})
def generate_confirmation_token(self, expiration=3600):
"""
Return a confirmation token for current user.
:param expiration: Time in seconds after which token expires
:returns: TimedJSONWebSignature
"""
serializer = Serializer(current_app.config['SECRET_KEY'], expiration)
return serializer.dumps({'confirm': self.id})
def confirm(self, token):
"""
Check that the given token belongs to current user
and set current user's 'confirmed' column to True
if it does.
:param token: TimedJSONWebSignature instance or string
:returns: True if the given token belongs to current user,
False otherwise
"""
serializer = Serializer(current_app.config['SECRET_KEY'])
try:
data = serializer.loads(token)
except (BadHeader, SignatureExpired):
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
database.session.add(self)
return True
def has_contact(self, user):
"""
Check if current user has the given user as a contact.
:param user: User model instance
:returns: True if current user has user as a contact,
False otherwise
"""
return bool(self.contacts.filter_by(contact_id=user.id).first())
def is_contacted_by(self, user):
"""
Check if the given user has current user as a contact.
:param user: User model instance
:returns: True if user has current user as a contact,
False otherwise
"""
return bool(self.contacted.filter_by(user_id=user.id).first())
def add_contacts(self, users, contact_group=None):
"""
Add the given users to current user's contacts.
:param users: list of User model instances
:param contact_group: name of contact group
"""
for user in users:
if not self.has_contact(user):
relation = Contact(user=self,
contact=user,
contact_group=contact_group)
database.session.add(relation)
def delete_contacts(self, users):
"""
Delete the given users from contacts of current user.
:param users: sequence of User model instances
"""
for user in users:
if self.has_contact(user):
relation = self.contacts.filter_by(contact_id=user.id).first()
database.session.delete(relation)
def get_other_users_query(self):
"""
Return a query of users not including current user
ordered by column 'username' in ascending order.
:returns: User model query
"""
return (User
.query
.filter(User.id != self.id)
.order_by(User.username))
def get_available_chats_query(self):
"""
Return a query of current user's chats
not marked as removed ordered by modification date
in descending order.
:returns: User model query
"""
removed_chats = self.get_removed_query()
return (Chat
.query
.filter(Chat.users.contains(self))
.filter(not_(Chat
.id
.in_(removed_chats
.with_entities(RemovedChat.chat_id))))
.order_by(Chat.date_modified.desc()))
def get_messages(self, chat):
"""
Return a list of dictionaries with keys
'text', 'date_created', 'sender_username', 'recipient_username'
sorted by creation date in ascending order.
:param chat: Chat model instance
:returns: list of dictionaries
"""
messages = chat.messages.order_by(Message.date_created).all()
message_dict_list = []
for message in messages:
sender = message.sender
recipient = message.recipient
sender_name = sender.username if sender else None
recipient_name = recipient.username if recipient else None
message_dict = {'text': message.text,
'date_created': message.date_created.isoformat(),
'sender_username': sender_name,
'recipient_username': recipient_name}
message_dict_list.append(message_dict)
return message_dict_list
def get_unread_messages_query(self, chat):
"""
Return a query of unread messages from the given chat.
:param chat: Chat model instance
:returns: Message model query
"""
return (chat
.messages
.filter(Message.sender != self,
not_(Message.was_read)))
def search_users_query(self, username, users_query):
"""
Return a query of users (except current user)
from the given users_query
containing the given username string in the 'username' column
in ascending lexicographical order by 'username'.
:param username: string to search in 'username' columns
:param users_query: User model query to search
:returns: User model query
"""
query = (users_query
.filter(User.username.ilike('%' + username + '%')))
return query
@staticmethod
def verify_auth_token(token):
"""
Check the valididty of given token.
:param token: <PASSWORD>
:returns: True if token is valid,
False otherwise
"""
print('user verify function')
serializer = Serializer(current_app.config['SECRET_KEY'])
try:
print('hey')
data = serializer.loads(token)
print('data', data)
except Exception as exception:
print(exception)
return None
print(data)
return User.query.get(data['id'])
class Message(database.Model):
"""
Message model.
Methods defined here:
to_json(user)
Static methods defined here:
get_messages_list(message_query)
from_json(json_message)
flush_messages(message_query)
"""
__tablename__ = 'messages'
id = database.Column(database.Integer, primary_key = True)
was_read = database.Column(database.Boolean,
default=False)
text = database.Column(database.Text)
_date_created = database.Column(database.DateTime(timezone=True),
nullable=False,
default=utc_now)
sender_id = database.Column(database.Integer,
database.ForeignKey('users.id'),
nullable=False)
recipient_id = database.Column(database.Integer,
database.ForeignKey('users.id'),
nullable=False)
chat_id = database.Column(database.Integer,
database.ForeignKey('chats.id'),
nullable=False)
sender = database.relationship('User', foreign_keys=[sender_id])
recipient = database.relationship('User', foreign_keys=[recipient_id])
chat = database.relationship('Chat',
backref=database.backref('messages',
lazy='dynamic'),
foreign_keys=[chat_id])
def __repr__(self):
return (f'Message(id={self.id}, text={self.text}, '
+ f'sender={self.sender}, '
+ f'recipient={self.recipient}, '
+ f'was_read={self.was_read}, '
+ f'text={self.text}, '
+ f'chat={self.chat}, '
+ f'date_created={self.date_created})'
)
@hybrid_property
def date_created(self):
return self._date_created.astimezone(timezone.utc)
@date_created.expression
def date_created(self):
return self._date_created
@date_created.setter
def date_created(self, value):
self._date_created = value
def to_json(self, user):
"""
Return a dictionary representation
of current message.
:param user: current user (needed to get the chat name)
:returns: Message model instance turned into a dictionary
"""
if not self.recipient:
recipient_username = ''
else:
recipient_username = self.recipient.username
message = {'id': self.id,
'chat_id': self.chat_id,
'was_read': self.was_read,
'date_created': self.date_created,
'text': self.text,
'sender_username': self.sender.username,
'recipient_username': recipient_username,
'chat_name': self.chat.get_name(user)}
return message
@staticmethod
def get_messages_list(message_query):
"""
Return a list of dictionaries with keys
'text', 'sender_username', 'date_created'
for the messages from the given message_query
sorted by modification date in ascending order.
:param message_query: Message model query
:returns: list of dictionaries
"""
message_dict_list = []
for message in message_query.order_by(Message.date_created).all():
sender = message.sender
sender_username = sender.username if sender else None
date_created = message.date_created
message_dict = {'text': message.text,
'sender_username': sender_username,
'date_created': date_created.isoformat()}
message_dict_list.append(message_dict)
return message_dict_list
@staticmethod
def from_json(json_message):
"""
Return a Message model instance
created from the give json_message dictionary.
:param json_message: dictionary
:returns: Message model instance
"""
try:
text = str(json_message.get('text')).rstrip()
if text:
message = Message()
message.text = text[:current_app.config['MAX_STRING_LENGTH']]
return message
except (LookupError, ValueError):
pass
@staticmethod
def flush_messages(message_query):
"""
Set 'was_read' column to True for all messages
from the given message_query.
:param message_query: Message model query
"""
message_query.update({'was_read': True}, synchronize_session=False)
database.session.commit()
class AnonymousUser(AnonymousUserMixin):
def has_permission(self, permission):
return False
@property
def is_admin(self):
return False
class Permission:
ADMINISTRATION = 1
login_manager.anonymous_user = AnonymousUser
| StarcoderdataPython |
3309450 | <reponame>th2-net/th2-common-py<gh_stars>0
# Copyright 2020-2021 Exactpro (Exactpro Systems Limited)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from th2_common.schema.configuration.abstract_configuration import AbstractConfiguration
from typing import List
from th2_common.schema.message.configuration.message_configuration import FieldFilterConfiguration, \
RouterFilterConfiguration
class GrpcRawRobinStrategy:
def __init__(self, endpoints, name) -> None:
self.endpoints = endpoints
self.name = name
class GrpcServiceConfiguration(AbstractConfiguration):
pass
class GrpcServerConfiguration(AbstractConfiguration):
def __init__(self, attributes, host, port, workers, **kwargs) -> None:
self.attributes = attributes
self.host = host
self.port = port
self.workers = workers
self.check_unexpected_args(kwargs)
class GrpcEndpointConfiguration(AbstractConfiguration):
def __init__(self, host, port, attributes, **kwargs) -> None:
self.host = host
self.port = port
self.attributes = attributes
self.check_unexpected_args(kwargs)
class GrpcConfiguration(AbstractConfiguration):
def __init__(self, services, server=None, **kwargs) -> None:
self.services = services
if server is not None:
self.serverConfiguration = GrpcServerConfiguration(**server)
self.check_unexpected_args(kwargs)
class GrpcRouterFilterConfiguration(RouterFilterConfiguration):
def __init__(self, endpoint: str, metadata, message, **kwargs) -> None:
self.metadata = metadata
self.message = message
self.endpoint = endpoint
self.check_unexpected_args(kwargs)
def get_metadata(self) -> List[FieldFilterConfiguration]:
return self.metadata
def get_message(self) -> List[FieldFilterConfiguration]:
return self.message
class GrpcRawFilterStrategy:
def __init__(self, filters) -> None:
self.filters = [GrpcRouterFilterConfiguration(**filter_configuration) for filter_configuration in filters]
class GrpcRouterConfiguration(AbstractConfiguration):
def __init__(self, workers=5):
self.workers = int(workers)
| StarcoderdataPython |
Subsets and Splits