id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
4919109
|
<gh_stars>0
import torch
import torch.nn.functional as F
import torch.nn as nn
class SiamusicLoss(nn.Module):
def __init__(self,dim=1):
super().__init__()
self.dim = dim
def neg_cos_sim(self,p,z):
z = z.detach()
p = F.normalize(p,dim=self.dim) # default : L2 norm
z = F.normalize(z,dim=self.dim)
return -torch.mean(torch.sum(p*z,dim=self.dim))
def forward(self,p1,z2,p2,z1):
L = self.neg_cos_sim(p1,z2)/2 + self.neg_cos_sim(p2,z1)/2
return L
if __name__ == '__main__':
p1 = torch.randn((16,2048))
p2 = torch.randn((16,2048))
z1 = torch.randn((16,2048))
z2 = torch.randn((16,2048))
criterion = SiamusicLoss()
loss = criterion(p1,z2,p2,z1)
print(loss.item())
|
StarcoderdataPython
|
1683812
|
<filename>src/mask.py
import math
import numpy as np
import torch
from PIL import Image, ImageDraw
def generate_random_mask(height: int = 256,
width: int = 256,
min_lines: int = 1,
max_lines: int = 4,
min_vertex: int = 5,
max_vertex: int = 13,
mean_angle: float = 2/5 * math.pi,
angle_range: float = 2/15 * math.pi,
min_width: float = 6,
max_width: float = 20):
"""
Generate random mask for GAN. Each pixel of mask
if 1 or 0, 1 means pixel is masked.
Parameters
----------
height : int
Height of mask.
width : int
Width of mask.
min_lines : int
Miniumal count of lines to draw on mask.
max_lines : int
Maximal count of lines to draw on mask.
min_vertex : int
Minimal count of vertexes to draw.
max_vertex : int
Maximum count of vertexes to draw.
mean_angle : float
Mean value of angle between edges.
angle_range : float
Maximum absoulte deviation of angle from mean value.
min_width : int
Minimal width of edge to draw.
max_width : int
Maximum width of edge to draw.
"""
# init mask and drawing tool
mask = Image.new('1', (width, height), 0)
draw = ImageDraw.Draw(mask)
# calculate mean radius to draw lines and count of lines
num_lines = np.random.randint(min_lines, max_lines)
average_radius = math.sqrt(height * height + width * width) / 8
# drawing lines
for _ in range(num_lines):
angle_min = mean_angle - np.random.uniform(0, angle_range)
angle_max = mean_angle + np.random.uniform(0, angle_range)
num_vertex = np.random.randint(min_vertex, max_vertex)
# line parameters
angles = []
vertex = []
# generating line angles
for i in range(num_vertex - 1):
random_angle = np.random.uniform(angle_min, angle_max)
if i % 2 == 0:
random_angle = 2 * np.pi - random_angle
angles.append(random_angle)
# start point
start_x = np.random.randint(0, width)
start_y = np.random.randint(0, height)
vertex.append((start_x, start_y))
# generating next points
for i in range(num_vertex - 1):
radius = np.random.normal(loc=average_radius, scale=average_radius / 2)
radius = np.clip(radius, 0, 2 * average_radius)
new_x = np.clip(vertex[-1][0] + radius * math.cos(angles[i]), 0, width)
new_y = np.clip(vertex[-1][1] + radius * math.sin(angles[i]), 0, height)
vertex.append((int(new_x), int(new_y)))
# drawing line
line_width = np.random.uniform(min_width, max_width)
line_width = int(line_width)
draw.line(vertex, fill=1, width=line_width)
# smoothing angles
for node in vertex:
x_ul = node[0] - line_width // 2
x_br = node[0] + line_width // 2
y_ul = node[1] - line_width // 2
y_br = node[1] + line_width // 2
draw.ellipse((x_ul, y_ul, x_br, y_br), fill=1)
# random vertical flip
if np.random.normal() > 0:
mask.transpose(Image.FLIP_LEFT_RIGHT)
# random horizontal flip
if np.random.normal() > 0:
mask.transpose(Image.FLIP_TOP_BOTTOM)
mask = np.asarray(mask, np.float32)
return torch.from_numpy(mask)
|
StarcoderdataPython
|
294941
|
import pytest
from coalescenceml.artifacts import DataArtifact
from coalescenceml.producers.exceptions import ProducerInterfaceError
from coalescenceml.producers.base_producer import BaseProducer
from coalescenceml.producers.producer_registry import register_producer_class
class TestProducer(BaseProducer):
__test__ = False
TYPES = (int,)
def test_producer_raises_an_exception_if_associated_types_are_no_classes():
"""Tests that a producer can only define classes as associated types."""
with pytest.raises(ProducerInterfaceError):
@register_producer_class
class InvalidProducer(BaseProducer):
TYPES = ("not_a_class",)
def test_producer_raises_an_exception_if_associated_artifact_types_are_no_artifacts():
"""Tests that a producer can only define `BaseArtifact` subclasses as
associated artifact types."""
with pytest.raises(ProducerInterfaceError):
@register_producer_class
class InvalidProducer(BaseProducer):
TYPES = (int,)
ARTIFACT_TYPES = (DataArtifact, int, "not_a_class")
def test_producer_raises_an_exception_when_asked_to_read_unfamiliar_type():
"""Tests that a producer fails if it's asked to read the artifact to a
non-associated type."""
producer = TestProducer(artifact=DataArtifact())
with pytest.raises(TypeError):
producer.handle_input(data_type=str)
def test_producer_raises_an_exception_when_asked_to_write_unfamiliar_type():
"""Tests that a producer fails if it's asked to write data of a
non-associated type."""
producer = TestProducer(artifact=DataArtifact())
with pytest.raises(TypeError):
producer.handle_return(data="some_string")
|
StarcoderdataPython
|
6664114
|
<gh_stars>0
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
app = Flask(__name__)
app.config['SECRET_KEY'] = '<KEY>'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///site.db'
db = SQLAlchemy(app)
login_manager = LoginManager(app)
from seriously_portfolio import routes
|
StarcoderdataPython
|
36936
|
import speedtest
def perform_test():
s = speedtest.Speedtest()
best_server = s.get_best_server()
print('Best server: ')
print(best_server['name'])
print('Perform upload app:')
result = s.upload()
print('Done:' + str(result / 1024 / 1024) + ' MBit/s')
print('Perform download app:')
result = s.download()
print('Done:' + str(result / 1024 / 1024) + ' MBit/s')
print(s.results)
return s.results
|
StarcoderdataPython
|
5034817
|
<filename>metadeploy/api/migrations/0113_builtin_jsonfield.py
# Generated by Django 3.1.12 on 2021-07-29 19:54
import django.core.serializers.json
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("api", "0112_merge_20201130_1757"),
]
operations = [
migrations.AlterField(
model_name="job",
name="results",
field=models.JSONField(blank=True, default=dict),
),
migrations.AlterField(
model_name="plan",
name="preflight_checks",
field=models.JSONField(blank=True, default=list),
),
migrations.AlterField(
model_name="preflightresult",
name="results",
field=models.JSONField(blank=True, default=dict),
),
migrations.AlterField(
model_name="scratchorg",
name="config",
field=models.JSONField(
blank=True,
encoder=django.core.serializers.json.DjangoJSONEncoder,
null=True,
),
),
migrations.AlterField(
model_name="step",
name="source",
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name="step",
name="task_config",
field=models.JSONField(blank=True, default=dict),
),
migrations.AlterField(
model_name="user",
name="first_name",
field=models.CharField(
blank=True, max_length=150, verbose_name="<NAME>"
),
),
]
|
StarcoderdataPython
|
9707937
|
<reponame>oseme-techguy/python-pdf-annotation-api-demo
"""Utilities
"""
from re import match
from urllib import parse
import datetime
import calendar
import time
import json
import requests
import uuid
from app.helpers.json_converter import JSONConverter
class Utilities:
"""Utilities
"""
@staticmethod
def generate_id():
"""Generates a uuid - suitable for DB primary column
"""
return uuid.uuid4().hex
@staticmethod
def convert_unserializable_fields(json_object):
"""Converts unserializable fields in json
"""
return json.loads(json.dumps(json_object, cls=JSONConverter))
@staticmethod
def decode_string(text):
"""Decode a URL encoded strinf
Arguments:
text {str} -- URL encoded string to decode
"""
return parse.unquote(text)
@staticmethod
def url_generator(url, params):
"""A method to generate a url from the params input to this method
Arguments:
url {string} -- URL of request
params {object} -- the dict containg the query parameters
Returns:
str -- the generated url string
"""
params = parse.urlencode(params)
if parse.urlparse(url)[4]:
return url + '&' + params
return url + '?' + params
@staticmethod
def get_request(url, headers=None, query=None, timeout=5, is_json=True):
"""API GET request
Arguments:
url {string} -- URL of request
Keyword Arguments:
headers {dict} -- Headers for the HTTP request (default: {None})
query {dict} -- HTTP query parameters (default: {None})
timeout {int} -- Request timeout in seconds (default: {5})
is_json {bool} -- If request is JSON (default: {True})
Returns:
object -- the response from the request
"""
request_headers = {'content-type':'text/plain'} if not headers else headers
req = requests.get(url, headers=request_headers, params=query, timeout=timeout)
if is_json:
return req.json()
return req
@staticmethod
def post_request(url, payload, headers=None, timeout=5, is_json=True):
"""API POST request
Arguments:
url {string} -- URL of request
payload {dict} -- POST body
Keyword Arguments:
headers {dict} -- Headers for the HTTP request (default: {None})
timeout {int} -- Request timeout in seconds (default: {5})
is_json {bool} -- If request is JSON (default: {True})
Returns:
object -- the response from the request
"""
request_headers = {'content-type':'application/json'} if not headers else headers
# JSON.stringify() the payload
if not isinstance(payload, str):
payload_str = json.dumps(payload)
else:
payload_str = payload
req = requests.post(url, data=payload_str, headers=request_headers, timeout=timeout)
if is_json:
return req.json()
return req
@staticmethod
def get_day_time(month='', day='', year=''):
"""Get the day time stamp for the day, month and year
Keyword Arguments:
month {str} -- the month int (default: {''})
day {str} -- the day int (default: {''})
year {str} -- the year int (default: {''})
Returns:
{int} -- the int of the day timestamp
"""
day = time.strftime("%d") if day == '' else day
month = time.strftime("%m") if month == '' else month
year = time.strftime("%Y") if year == '' else year
year = int(year)
month = int(month)
day = int(day)
date_time = datetime.datetime(year=year, month=month, day=day)
return int(time.mktime(date_time.timetuple()))
@staticmethod
def get_month_time(month='', year=''):
"""Get the month time stamp from the month and year
Keyword Arguments:
month {str} -- the month int (default: {''})
year {str} -- the year int (default: {''})
Returns:
{int} -- the int of the month timestamp
"""
month = time.strftime("%m") if month == '' else month
year = time.strftime("%Y") if year == '' else year
month = int(month)
month -= 1
year = int(year)
if month == 0:
month = 12
year -= 1
month = max(1, month)
month_range = calendar.monthrange(year, month)
day = month_range[1]
date_time = datetime.datetime(year=year, month=month, day=day)
return int(time.mktime(date_time.timetuple()))
|
StarcoderdataPython
|
3212601
|
# Packages
import numpy as np
from scipy.misc import imresize
import keras.backend as K
from keras import losses,metrics
from keras.models import load_model
def rmse(y_true,y_pred):
'''Accepts true labels and predictions. Returns Root mean squared error'''
return K.sqrt(K.mean(K.square(y_pred-y_true)))
def load_galaxy_model():
'''Loads stored keras model with rmse loss and metric'''
#Before prediction
K.clear_session()
# Uses custom loss/metrics
losses.rmse = rmse
metrics.rmse = rmse
return load_model(f'./app/galaxy_morphology_predictor.h5')
def process_image(image):
'''Accepts an image of any shape and reshapes for use in model predictions'''
# Resize and Reshape Image
image = imresize(image,(169,169))
image = image.reshape((1,)+image.shape)
# Scale Image
means = np.load(f'./app/means.npy')
stds = np.load(f'./app/stds.npy')
return (image - means)/stds
|
StarcoderdataPython
|
4990720
|
<gh_stars>0
#! /usr/bin/env python3
"""
Thermal decomposition in the n-,s-C5H11 system
(two-well, two-channel, 1D ME as a function of E)
Steady-state decomposition of equilibrated mixture of n- and s-C5H11 and eigenvalues
sample output (c5h11_2b_me1d_E_eig.dat):
T[K] p[bar] w1-k2(dis) w2-k2(dis) ktot[s-1] x(w1) x(w2) ev1 ev2 ev3 ev4 ev5
1000.0 1.000e+02 1.6080e+06 8.0638e+06 9.6718e+06 2.3558e-01 7.6442e-01 -9.6718e+06 -1.3293e+07 -8.9940e+09 -9.5226e+09 -1.5845e+10
1000.0 1.000e+01 1.0101e+06 6.4007e+06 7.4107e+06 1.8928e-01 8.1072e-01 -7.4107e+06 -1.0432e+07 -1.0473e+09 -1.1134e+09 -2.0894e+09
1000.0 1.000e+00 3.7006e+05 3.2826e+06 3.6527e+06 1.3847e-01 8.6153e-01 -3.6527e+06 -5.5958e+06 -1.4219e+08 -1.5109e+08 -3.0697e+08
1000.0 1.000e-01 8.6884e+04 1.0617e+06 1.1486e+06 1.0787e-01 8.9213e-01 -1.1486e+06 -2.0244e+06 -2.0391e+07 -2.3291e+07 -4.5239e+07
"""
from me2d import ME1DMW
maxE = 60000 # cm^-1
# list of (name, rrkm_filename, relative_energy)
well_list = [("w1", "c5h11_1_rrkmE_nc5h11_dE10.dat", 948.38),
("w2", "c5h11_1_rrkmE_sc5h11_dE10.dat", 0.)]
# list of ((name, ch), (name, ch))
connections = [(("w1", 1), ("w2", 1))]
outfn = "c5h11_2b_me1d_E_eig.dat"
solver = "EigIter,cg" # ARPACK (invert mode) with conjugate gradient
reactant = None # no reactant specification for EigIter
neig = 5 # find 5 least-negative eigenvalues
bandpcrit = 1e-9 # truncation threshold for banded matrix
nthreads = 2
maxmemGB = 1
verbose = True
T = 1000. # K
pl = [100., 10., 1., 0.1] # bar
y = 0.5
alpha_w1 = 54.8 # cm^-1
Z_w1 = 1.36e-09 # cm^3 molecule^-1 s^-1
alpha_w2 = 54.4 # cm^-1
Z_w2 = 1.43e-09 # cm^3 molecule^-1 s^-1
memw = ME1DMW.read_from(well_list, connections, maxE=maxE)
memw["w1"].set_params(Z_w1, y, alpha_w1)
memw["w2"].set_params(Z_w2, y, alpha_w2)
outfp = open(outfn, "w")
kstrl, xstrl = memw.get_channel_strings()
if neig > 1: addstrl = ["ev%s" % (i+1) for i in range(neig)]
else: addstrl = []
outfp.write(" T[K] p[bar] %s\n" %
(" ".join("%12s" % x for x in kstrl+["ktot[s-1]"]+xstrl+addstrl)))
ga = None
for p in pl:
ktot, kchl, ga, popl, vals, vec = \
memw.solve(T, p, gguess=ga, solver=solver, reactant=reactant,
neig=neig, verbose=verbose, bandpcrit=bandpcrit,
nthreads=nthreads, maxmemGB=maxmemGB)
if neig > 1: addl = list(vals)
else: addl = []
outfp.write("%8.1f %.3e %s\n" % (T, p, " ".join("%12.4e" % x for x in kchl+[ktot]+popl+addl)))
outfp.flush()
|
StarcoderdataPython
|
3376571
|
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from Platforms.Discord.main_discord import PhaazebotDiscord
from Platforms.Web.main_web import PhaazebotWeb
import discord
import html
from aiohttp.web import Response
from Utils.Classes.extendedrequest import ExtendedRequest
from Utils.Classes.htmlformatter import HTMLFormatter
from Platforms.Web.index import PhaazeWebIndex
from Platforms.Web.utils import getNavbar
@PhaazeWebIndex.get("/discord/quotes/{guild_id:\d+}")
async def discordQuotes(cls:"PhaazebotWeb", WebRequest:ExtendedRequest) -> Response:
"""
Default url: /discord/quotes/{guild_id:\d+}
"""
PhaazeDiscord:"PhaazebotDiscord" = cls.BASE.Discord
if not PhaazeDiscord:
return await cls.Tree.errors.notAllowed(cls, WebRequest, msg="Discord module is not active")
guild_id:str = WebRequest.match_info.get("guild_id", "")
Guild:discord.Guild = discord.utils.get(PhaazeDiscord.guilds, id=int(guild_id if guild_id.isdigit() else 0))
if not Guild:
return await cls.Tree.Discord.discordinvite.discordInvite(cls, WebRequest, msg=f"Phaaze is not on this Server", guild_id=guild_id)
DiscordQuote:HTMLFormatter = HTMLFormatter("Platforms/Web/Content/Html/Discord/quotes.html")
DiscordQuote.replace(
guild_name=html.escape(Guild.name),
guild_id=str(Guild.id),
)
site:str = cls.HTMLRoot.replace(
replace_empty=True,
title=f"Phaaze | Discord - Quotes: {Guild.name}",
header=getNavbar(active="discord"),
main=DiscordQuote
)
return cls.response(
body=site,
status=200,
content_type='text/html'
)
|
StarcoderdataPython
|
3521721
|
<reponame>1029127253/Product-Title-Classification
from __future__ import print_function, unicode_literals
import pandas as pd
from collections import Counter
import re
def process(our_data):
our_data=our_data.lower()
return list(our_data)
def is_right(uchar):
if uchar >= u'\u4e00' and uchar <= u'\u9fa5':
return True
elif uchar >= u'\u0061' and uchar <= u'\u007a':
return True
else:
return False
def is_eng(word):
flag=True
for uchar in word:
if uchar < u'\u0061' or uchar > u'\u007a':
flag=False
return flag
def format_str(content):
content_str = ''
for i in content:
if is_right(i):
content_str = content_str +i
return content_str
import jieba
import jieba.posseg as pseg
#jieba.load_userdict("vocab-correct.txt")
'''
fin=open('plus-vocabs.txt')
for line in fin.readlines():
word=line.strip().split()[0]
jieba.add_word(word,100)
fin.close()
'''
#jieba.set_dictionary('vocab-correct.txt')
#dict_path='dict.txt'
#jieba.load_userdict(dict_path)
def fenci(datas):
#cut_words=nlp.tag(datas)
#return cut_words[0]['word']
cut_words=jieba.cut(datas,cut_all=False)
return cut_words
#os.chdir('data')
fcontent=open('test-content.txt','w')
fin=open('test-ubuntu.tsv')
readlines=fin.readlines()[1:]
for i in range(len(readlines)):
line=readlines[i]
if i%10000==0:
print (i)
content=line.strip()
result=[]
for part in re.split(r'[:-]',content):
for word in part.split():
result.extend(fenci(format_str(process(word))))
if len(result)==0:
print (i)
fcontent.write(' '.join(result)+'\n')
fcontent.close()
|
StarcoderdataPython
|
223128
|
import datetime
import dateutil.parser
from sqlalchemy import func
import ckan.model as ckan_model
import ckan.plugins.toolkit as tk
import ckanext.requestdata.model as requestdata_model
import ckanext.ytp.request.model as membership_model
get_action = tk.get_action
class MembershipRequestsDao(object):
def __init__(self, model, userobj, is_sysadmin):
'''
:param model:
:type model: ckan_model
:param userobj:
:type userobj: ckan_model.User
:param is_sysadmin:
:type is_sysadmin: bool
'''
self.is_sysadmin = is_sysadmin
self.userobj = userobj
self.model = model if model else ckan_model
def fetch_membership_requests_with_org_info(self):
model = self.model
query = model.Session.query(model.Group.name, model.Group.title,
func.count(model.Member.id).label('count'),
func.max(membership_model.MemberExtra.value).label('last_date_str')
) \
.filter(model.Member.group_id == model.Group.id) \
.filter(model.Member.id == membership_model.MemberExtra.member_id) \
.filter(membership_model.MemberExtra.key == 'created') \
.filter(model.Group.state == 'active') \
.filter(model.Member.table_name == "user") \
.filter(model.Member.state == 'pending')
if not self.is_sysadmin:
admin_in_groups = model.Session.query(model.Member) \
.filter(model.Member.state == "active") \
.filter(model.Member.table_name == "user") \
.filter(model.Member.capacity == 'admin') \
.filter(model.Member.table_id == self.userobj.id)
if admin_in_groups.count() <= 0:
return []
query = query.filter(model.Member.group_id.in_(admin_in_groups.values(model.Member.group_id)))
query = query.group_by(model.Group.name, model.Group.title)
return query.all()
def fetch_org_names_where_user_is_admin(self):
model = self.model
admin_in_groups = model.Session.query(model.Group.name) \
.filter(model.Member.group_id == model.Group.id) \
.filter(model.Group.state == 'active') \
.filter(model.Group.type == 'organization') \
.filter(model.Member.state == 'active') \
.filter(model.Member.table_name == 'user') \
.filter(model.Member.capacity == 'admin') \
.filter(model.Member.table_id == self.userobj.id)
return admin_in_groups.all()
class RequestDataDao(object):
def __init__(self, model, userobj, is_sysadmin):
self.is_sysadmin = is_sysadmin
self.userobj = userobj
self.model = model if model else ckan_model
def fetch_requestdata_requests(self):
model = self.model
query = model.Session.query(func.max(requestdata_model.ckanextRequestdata.modified_at).label('last_date'),
func.count(requestdata_model.ckanextRequestdata.id).label('count')) \
.filter(requestdata_model.ckanextRequestdata.package_id == model.Package.id) \
.filter(requestdata_model.ckanextRequestdata.state == 'new') \
.filter(model.Package.state == 'active') \
.filter(model.Package.private == False) \
.filter(model.Package.maintainer == self.userobj.id)
return query.all()
def fetch_requestdata_requests_for_sysadmins(self):
model = self.model
if self.is_sysadmin:
query = model.Session.query(model.Group.name, model.Group.title,
func.max(requestdata_model.ckanextRequestdata.modified_at).label('last_date'),
func.count(requestdata_model.ckanextRequestdata.id).label('count')) \
.filter(model.Package.owner_org == model.Group.id) \
.filter(model.Group.state == 'active') \
.filter(requestdata_model.ckanextRequestdata.package_id == model.Package.id) \
.filter(requestdata_model.ckanextRequestdata.state == 'new') \
.filter(model.Package.state == 'active') \
.filter(model.Package.private == False) \
.group_by(model.Group.name, model.Group.title)
return query.all()
return []
class ExpiredDatasetsDao(object):
def __init__(self, userobj, is_sysadmin):
self.is_sysadmin = is_sysadmin
self.userobj = userobj
def fetch_expired_datasets(self):
now = datetime.datetime.utcnow().replace(minute=0, second=0, microsecond=0)
now_date_for_solr = now.isoformat() + 'Z'
query_string = '(maintainer:{} AND {}:[* TO {}])'.format(self.userobj.id, 'due_date', now_date_for_solr)
query_params = {
'include_private': True,
'start': 0,
'rows': 10,
'sort': 'due_date desc',
'fq': query_string
}
search_result = get_action('package_search')({}, query_params)
num_of_datasets = search_result.get('count', 0)
last_date = None
if num_of_datasets > 0:
last_date_str = search_result['results'][0].get('due_date')
last_date = dateutil.parser.parse(last_date_str[0:-1])
return num_of_datasets, last_date
class QuarantinedDatasetsDao(object):
def __init__(self, model, userobj, is_sysadmin):
self.model = model
self.is_sysadmin = is_sysadmin
self.userobj = userobj
self.org_names = None
def __populate_org_names_for_user(self):
if not self.org_names:
model = self.model
org_names_query = model.Session.query(model.Group.name) \
.filter(model.Member.group_id == model.Group.id) \
.filter(model.Group.state == 'active') \
.filter(model.Group.type == 'organization') \
.filter(model.Member.state == 'active') \
.filter(model.Member.table_name == 'user') \
.filter(model.Member.capacity.in_(['admin', 'editor'])) \
.filter(model.Member.table_id == self.userobj.id)
self.org_names = org_names_query.all()
def __perform_solr_query(self, query_string):
query_params = {
'start': 0,
'rows': 100,
'sort': 'last_modified desc',
'fq': query_string
}
search_result = get_action('package_search')({}, query_params)
num_of_datasets = search_result.get('count', 0)
results = search_result.get('results')
for dataset in results:
dataset['last_date'] = dateutil.parser.parse(dataset.get('metadata_modified')[0:-1])
return search_result['results'], num_of_datasets
def fetch_quarantined_datasets_for_user(self):
'''
:return: list of datasets (max, 100) and total number of available datasets (maybe not all fetched)
:rtype: (list, integer)
'''
query_string = 'res_extras_in_quarantine:"true"'
self.__populate_org_names_for_user()
if self.org_names:
org_names_string = " OR ".join((o[0] for o in self.org_names))
query_string += ' AND organization:({})'.format(org_names_string)
return self.__perform_solr_query(query_string)
return None, 0
def fetch_all_quarantined_datasets(self):
'''
:return: list of datasets (max, 100) and total number of available datasets (maybe not all fetched)
:rtype: (list, integer)
'''
query_string = 'res_extras_in_quarantine:"true"'
self.__populate_org_names_for_user()
if self.org_names:
org_names_string = " OR ".join((o[0] for o in self.org_names))
query_string += ' -organization:({})'.format(org_names_string)
return self.__perform_solr_query(query_string)
|
StarcoderdataPython
|
5110302
|
<filename>github-100-exercises/day9.py
"""
Define a function which can compute the sum of two numbers.
def sum_num(a,b):
print(f'{a} + {b} = {a+b}')
if __name__ == '__main__':
num1=int(input('please enter first number: '))
num2=int(input('please enter second number: '))
sum_num(num1,num2)
"""
"""
Define a function that can convert a integer into a string and print it in console.
def sum_num(a,b):
print(f'{type(a)} = {a} , {type(str(a))} = {str(a)}')
print(f'{type(b)} = {b} , {type(str(b))} = {str(b)}')
if __name__ == '__main__':
num1=int(input('please enter first number: '))
num2=int(input('please enter second number: '))
sum_num(num1,num2)
"""
"""
Define a function that can receive two integer numbers in string form and compute their sum
and then print it in console.
def sum_num(a,b):
print(f'{a} + {b} = {a+b}')
if __name__ == '__main__':
num1=int(input('please enter first number: '))
num2=int(input('please enter second number: '))
sum_num(num1,num2)
"""
"""
Define a function that can accept two strings as input and concatenate them and then print it in console.
def sum_num(a,b):
print(f'{a} + {b} = {a+b}')
if __name__ == '__main__':
num1=input('please enter first string: ')
num2=input('please enter second string: ')
sum_num(num1,num2)
"""
"""
Define a function that can accept two strings as input and print the string with maximum length in console. If two strings have the same length, then the function should print all strings line by line.
def big_string(a,b):
if len(a)>len(b):
print(a)
elif len(a)<len(b):
print(b)
else:
print(f'{a}\n{b}')
if __name__ == '__main__':
str1=input('please enter first string: ')
str2=input('please enter second string: ')
big_string(str1,str2)
"""
|
StarcoderdataPython
|
6447565
|
<filename>api/schedules/stats_farm.py
#
# Performs an hourly insert of latest stats for the farm summary
#
import datetime
import sqlite3
import traceback
from flask import g
from common.config import globals
from common.utils import converters
from api import app
from api.commands import chia_cli
DATABASE = '/root/.chia/machinaris/dbs/stats.db'
def get_db():
db = getattr(g, '_stats_database', None)
if db is None:
db = g._stats_database = sqlite3.connect(DATABASE)
return db
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_stats_database', None)
if db is not None:
db.close()
# TODO: Currently only Chia stats are saved to local DB. This needs to flow via REST API from fullnodes to controller.
def collect():
if not globals.farming_enabled():
app.logger.debug(
"Skipping farm summary stats collection as not farming on this Machinaris instance.")
return
with app.app_context():
app.logger.debug("Collecting stats about farms.")
current_datetime = datetime.datetime.now().strftime("%Y%m%d%H%M")
for blockchain in globals.enabled_blockchains():
farm_summary = chia_cli.load_farm_summary(blockchain)
db = get_db()
cur = db.cursor()
try:
cur.execute("INSERT INTO stat_plot_count (value, created_at) VALUES (?,?)",
(farm_summary.plot_count,current_datetime,))
except:
app.logger.info(traceback.format_exc())
try:
cur.execute("INSERT INTO stat_plots_size (value, created_at) VALUES (?,?)",
(converters.str_to_gibs(farm_summary.plots_size),current_datetime,))
except:
app.logger.info(traceback.format_exc())
if farm_summary.status == "Farming": # Only collect if fully synced
try:
cur.execute("INSERT INTO stat_total_chia (blockchain, value, created_at) VALUES ('chia',?,?)",
(farm_summary.total_coins,current_datetime,))
except:
app.logger.info(traceback.format_exc())
try:
cur.execute("INSERT INTO stat_netspace_size (blockchain, value, created_at) VALUES ('chia',?,?)",
(converters.str_to_gibs(farm_summary.netspace_size),current_datetime,))
except:
app.logger.info(traceback.format_exc())
try:
cur.execute("INSERT INTO stat_time_to_win (blockchain, value, created_at) VALUES ('chia',?,?)",
(converters.etw_to_minutes(farm_summary.time_to_win),current_datetime,))
except:
app.logger.info(traceback.format_exc())
db.commit()
|
StarcoderdataPython
|
9683817
|
<reponame>xloem/kivy-launcher<filename>main.py
# -*- coding: utf-8 -*-
def run_entrypoint(entrypoint):
import runpy
import sys
import os
entrypoint_path = os.path.dirname(entrypoint)
sys.path.append(os.path.realpath(entrypoint_path))
runpy.run_path(
entrypoint,
run_name="__main__")
def run_launcher(tb=None):
from launcher.app import Launcher
Launcher().run()
def dispatch():
import os
# desktop launch
print("dispathc!")
entrypoint = os.environ.get("KIVYLAUNCHER_ENTRYPOINT")
if entrypoint is not None:
return run_entrypoint(entrypoint)
# try android
try:
from jnius import autoclass
activity = autoclass("org.kivy.android.PythonActivity").mActivity
intent = activity.getIntent()
entrypoint = intent.getStringExtra("entrypoint")
orientation = intent.getStringExtra("orientation")
if orientation == "portrait":
# SCREEN_ORIENTATION_PORTRAIT
activity.setRequestedOrientation(0x1)
elif orientation == "landscape":
# SCREEN_ORIENTATION_LANDSCAPE
activity.setRequestedOrientation(0x0)
elif orientation == "sensor":
# SCREEN_ORIENTATION_SENSOR
activity.setRequestedOrientation(0x4)
if entrypoint is not None:
try:
return run_entrypoint(entrypoint)
except Exception:
import traceback
traceback.print_exc()
return
except Exception:
import traceback
traceback.print_exc()
run_launcher()
if __name__ == "__main__":
dispatch()
|
StarcoderdataPython
|
9681500
|
<gh_stars>10-100
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import io
from stestr.commands import load
from stestr.tests import base
class TestLoadCommand(base.TestCase):
def test_empty_with_pretty_out(self):
stream = io.BytesIO()
output = io.BytesIO()
res = load.load(in_streams=[('subunit', stream)], pretty_out=True,
stdout=output)
self.assertEqual(1, res)
|
StarcoderdataPython
|
1819230
|
<filename>grove/grove_gpio.py<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Grove Base Hat for the Raspberry Pi, used to connect grove sensors.
# Copyright (C) 2018 Seeed Technology Co.,Ltd.
'''
'''
import time
from grove.gpio import GPIO
__all__ = ["GroveGpio"]
class GroveGpio(GPIO):
'''
Class for Grove - Relay
Args:
pin(int): number of digital pin the relay connected.
'''
def __init__(self, pin):
super(GroveGpio, self).__init__(pin, GPIO.OUT)
def on(self):
'''
enable/on the relay
'''
self.write(1)
def off(self):
'''
disable/off the relay
'''
self.write(0)
Grove = GroveGpio
def main():
from grove.helper import SlotHelper
sh = SlotHelper(SlotHelper.GPIO)
pin = sh.argv2pin()
io = GroveGpio(pin)
while True:
try:
io.on()
time.sleep(1)
io.off()
time.sleep(1)
except KeyboardInterrupt:
io.off()
print("exit")
exit(1)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1858582
|
# -*- coding: UTF-8 -*-
import logging
from typing import List
from echoscope.config import config
from echoscope.util import str_util, log_util, clickhouse_util
from echoscope.model import ds_model, config_model
from echoscope.source import source
class ClickhouseSource(source.Source):
def __init__(self):
self.excludesDb = ['default', 'system']
def export_model(self, conf: config_model.DataSourceConfig) -> ds_model.DataSourceModel:
clickhouseUtil = clickhouse_util.get_clickhouse_util(
host=conf.host, port=conf.port, user=conf.user, passwd=conf.passwd, db=conf.db, charset=conf.charset)
ver = self.get_db_version(clickhouseUtil)
if ver == '':
logging.error(' clickhouse conn fail. ')
return
dsm = ds_model.DataSourceModel(
name='%s:%d' % (conf.host, conf.port), dbType=config.DsClickHouse, version=ver)
dsm.dbs = self.get_export_dbs(clickhouseUtil, conf.includes, conf.excludes)
dsm = self.fill_table_fields(clickhouseUtil, dsm)
return dsm
def get_db_version(self, conn: clickhouse_util.ClickhouseUtil) -> str:
"""获取mysql版本
Args:
cursor (clickhouse cursor): [description]
Returns:
str: [description]
"""
sql = 'select version() as ver;'
cols = ['ver']
ver = conn.find_one(sql, None, cols)
return '' if ver == None else str_util.format_bytes_to_str(ver.get('ver', ''))
def get_export_dbs(self, conn: clickhouse_util.ClickhouseUtil, includes: List[str] = [], excludes: List[str] = []) -> List[ds_model.DbModel]:
"""获取需要导出结构的数据库列表
Args:
conn (clickhouse_util.ClickhouseUtil): 数据库连接
includes (List[str], optional): 需要包含的数据库列表. Defaults to [].
excludes (List[str], optional): 需要排除的数据库列表. Defaults to [].
Returns:
List[ds_model.DbModel]: 需要导出的数据库列表
"""
sql = 'select name, engine from `system`.databases '
cols = ['name', 'engine']
data = conn.find_all(sql, None, cols)
dbs = []
for d in data:
db_name = str_util.format_bytes_to_str(d['name'])
if db_name in self.excludesDb or db_name in excludes:
# 需要过滤
continue
if len(includes) > 0 and db_name not in includes:
# 不包含在include中
continue
charset = ''
collation_name = ''
dbModel = ds_model.DbModel(
name=db_name, charset=charset, collation_name=collation_name)
dbs.append(dbModel)
return dbs
def fill_table_fields(self, conn: clickhouse_util.ClickhouseUtil, dsModel: ds_model.DataSourceModel) -> ds_model.DataSourceModel:
"""获取数据库中的表信息
Args:
conn (clickhouse_util.ClickhouseUtil): 数据库连接
dsModel (ds_model.DataSourceModel): 数据源,包含数据库列表
Returns:
ds_model.DataSourceModel: 数据源
"""
sql = ''' select database , name , engine , create_table_query , engine_full , partition_key , sorting_key , primary_key , total_rows , total_bytes , comment from `system`.tables WHERE database = %(dbName)s '''
cols = ['database', 'name', 'engine', 'create_table_query', 'engine_full',
'partition_key', 'sorting_key', 'primary_key', 'total_rows', 'total_bytes', 'comment']
for db in dsModel.dbs:
data = conn.find_all(sql, {'dbName': db.name}, cols)
tables: ds_model.TableModel = []
for d in data:
tableName = str_util.format_bytes_to_str(d['name'])
comment = str_util.format_bytes_to_str(d['comment'])
collation_name = ''
engine = str_util.format_bytes_to_str(d['engine'])
create_script = self.get_create_script(conn, db.name, tableName)
table = ds_model.TableModel(
name=tableName, comment=comment, collation_name=collation_name, engine=engine, create_script=create_script)
logging.info('load table:%s fields.' % tableName)
table.fields = self.get_fields(conn, db.name, tableName)
tables.append(table)
db.tables = tables
return dsModel
def get_create_script(self, conn: clickhouse_util.ClickhouseUtil, dbName: str, tableName: str) -> str:
"""获取表的创建脚本
Args:
conn (clickhouse_util.ClickhouseUtil): 数据库连接
dbName (str): 数据库名称
tableName (str): 表名称
Returns:
str: 创建脚本
"""
sql = ''' SHOW CREATE TABLE %(dbName)s.%(tableName)s ''' % {
'dbName': dbName, 'tableName': tableName}
cols = ['statement']
data = conn.find_one(sql, None, cols)
return '' if data == None else str_util.format_bytes_to_str(data.get('statement', ''))
def get_fields(self, conn: clickhouse_util.ClickhouseUtil, dbName: str, tableName: str) -> List[ds_model.FieldModel]:
"""获取数据表中列信息
Args:
conn (clickhouse_util.ClickhouseUtil): 数据库连接
dbName (str): 数据库名
tableName (str): 表名
Returns:
List[ds_model.FieldModel]: 列列表
"""
sql = ''' select database , `table` , name , `type` , `position`, default_expression, comment , is_in_partition_key , is_in_sorting_key , is_in_primary_key , is_in_sampling_key from `system`.columns c where database = %(dbName)s and `table` = %(tableName)s ORDER BY `position` ASC '''
cols = ['database', 'table', 'name', 'type', 'position', 'default_expression', 'comment',
'is_in_partition_key', 'is_in_sorting_key', 'is_in_primary_key', 'is_in_sampling_key']
data = conn.find_all(sql, {'dbName': dbName, 'tableName': tableName}, cols)
fields = []
for d in data:
fname = str_util.format_bytes_to_str(d['name'])
ftype = str_util.format_bytes_to_str(d['type'])
length = None
scale = None
# on update CURRENT_TIMESTAMP
default = str_util.format_bytes_to_str(d['default_expression'])
nullFlag = False
comment = str_util.format_bytes_to_str(d['comment'])
charset = ''
collation_name = ''
indexFlag = 0
is_in_sorting_key = int(str_util.format_bytes_to_str(d['is_in_sorting_key']))
if is_in_sorting_key == 1:
indexFlag = 1
is_in_partition_key = int(str_util.format_bytes_to_str(d['is_in_partition_key']))
is_in_primary_key = int(str_util.format_bytes_to_str(d['is_in_primary_key']))
is_in_sampling_key = int(str_util.format_bytes_to_str(d['is_in_sampling_key']))
indexName = ''
autoInc = False
field = ds_model.FieldModel(name=fname, ftype=ftype, length=length, scale=scale, default=default, nullFlag=nullFlag,
comment=comment, charset=charset, collation_name=collation_name, indexFlag=indexFlag, indexName=indexName, autoInc=autoInc, in_partition_key_flag=is_in_partition_key, in_sorting_key_flag=is_in_sorting_key, in_primary_key_flag=is_in_primary_key, in_sampling_key_flag=is_in_sampling_key)
fields.append(field)
return fields
|
StarcoderdataPython
|
3353748
|
#python 3 compatibility
from __future__ import print_function
from .gridbase import Grid
from .dataset import DataSetException
from .grid2d import Grid2D
import abc
from collections import OrderedDict
class MultiGrid(Grid):
def __init__(self,layers,descriptions=None):
"""
Construct a semi-abstract MultiGrid object, which can contain many 2D layers of gridded data, all at the
same resolution and with the same extent.
:param layers:
OrderedDict of Grid2D objects.
:param descriptions:
list of layer descriptions, or None
:raises DataSetException:
When:
- length of descriptions (when not None) does not match length of layers
- input layers is not an OrderedDict
"""
if not isinstance(layers,OrderedDict):
raise DataSetException('Input layers must be of type OrderedDict.')
if descriptions is None:
descriptions = ['' for l in layers.keys()]
if len(descriptions) != len(layers):
raise DataSetException('List of descriptions does not match length of layers.')
lnames = list(layers.keys())
self._layers = OrderedDict()
self._descriptions = OrderedDict()
for i in range(0,len(lnames)):
layername = lnames[i]
layer = layers[layername]
desc = descriptions[i]
geodict = layer.getGeoDict()
self._layers[layername] = layer
self._descriptions[layername] = desc
self._geodict = geodict
@abc.abstractmethod
def save(self,filename):
"""
Save layers of data to a file
:param filename:
File to save data to.
"""
raise NotImplementedError("save method not implemented in MultiGrid")
#subclassed implementation should be a @classmethod
@abc.abstractmethod
def load(self,filename):
"""
Load layers of data from a file.
:param filename:
File to load data from.
"""
raise NotImplementedError("save method not implemented in MultiGrid")
def setLayer(self,name,data,desc=None):
"""
Add a 2D layer of data to a MultiGrid object.
:param name:
String which will be used to retrieve the data.
:param data:
2D numpy array of data
:param desc:
Optional text description of layer
:raises DataSetException:
If the data layer dimensions don't match the geodict.
"""
nr,nc = data.shape
if nr != self._geodict.ny or nc != self._geodict.nx:
raise DataSetException("Data layer dimensions don't match those already in the grid")
self._layers[name] = Grid2D(data,self._geodict.copy())
self._descriptions[name] = desc
def getLayer(self,name):
"""
Retrieve the 2D associated with a layer name.
:param name:
Name of data layer.
:returns:
Grid2D object.
:raises DataSetException:
When name is not found in list of layer names.
"""
if name not in self._layers.keys():
raise DataSetException('Layer "%s" not in list of layers.' % name)
return self._layers[name]
def getData(self):
"""
Return the OrderedDict of data layers contained in MultiGrid.
:returns:
OrderedDict of Grid2D objects.
"""
return self._layers
def setData(self,layers,descriptions=None):
"""
Return the OrderedDict of data layers contained in MultiGrid.
:param layers:
OrderedDict of Grid2D objects.
"""
self._layers = layers
layernames = layers.keys()
self._geodict = layers[layernames[0]].getGeoDict().copy()
def getGeoDict(self):
"""
Return the geodict object which defines the extent and resolution of all the grids.
:returns:
geodict dictionary (see constructor)
"""
return self._geodict
def getBounds(self):
"""
Return the lat/lon range of the data.
:returns:
Tuple of (lonmin,lonmax,latmin,latmax)
"""
return (self._geodict.xmin,self._geodict.xmax,self._geodict.ymin,self._geodict.ymax)
def trim(self,geodict,resample=False,method='linear'):
"""
Trim all data layers to a smaller set of bounds, resampling if requested. If not resampling,
data will be trimmed to smallest grid boundary possible.
:param geodict:
GeoDict used to specify subset bounds and resolution (if resample is selected)
:param resample:
Boolean indicating whether the data should be resampled to *exactly* match input bounds.
:param method:
If resampling, method used, one of ('linear','nearest','cubic','quintic')
"""
for (layername,layer) in self._layers.items():
layer.trim(geodict,resample=resample,method=method)
self._geodict = layer.getGeoDict().copy()
def getLayerNames(self):
"""
Return the list of layer names contained in the MultiGrid.
:returns:
List of layer names.
"""
return self._layers.keys()
def getValue(self,lat,lon,layername,method='nearest',default=None):
"""Return numpy array at given latitude and longitude (using nearest neighbor).
:param lat:
Latitude (in decimal degrees) of desired data value.
:param lon:
Longitude (in decimal degrees) of desired data value.
:param layername:
Name of layer from which to retrieve data.
:param method:
Interpolation method, one of ('nearest','linear','cubic','quintic')
:param default:
Default value to return when lat/lon is outside of grid bounds.
:return:
Value at input latitude,longitude position.
"""
return self._layers[layername].getValue(lat,lon,method=method,default=default)
def getLatLon(self,row,col):
"""Return geographic coordinates (lat/lon decimal degrees) for given data row and column.
:param row:
Row dimension index into internal data array.
' :param col:
Column dimension index into internal data array.
:returns:
Tuple of latitude and longitude.
"""
layernames = self._layers.keys()
return self._layers[layernames[0]].getLatLon(row,col)
def getRowCol(self,lat,lon,returnFloat=False):
"""Return data row and column from given geographic coordinates (lat/lon decimal degrees).
:param lat:
Input latitude.
:param lon:
Input longitude.
:param returnFloat:
Boolean indicating whether floating point row/col coordinates should be returned.
:returns:
Tuple of row and column.
"""
layernames = self._layers.keys()
return self._layers[layernames[0]].getRowCol(lat,lon,returnFloat=returnFloat)
def subdivide(self,finerdict,cellFill='max'):
"""Subdivide the cells of the host grid into finer-resolution cells.
:param finerdict:
GeoDict object defining a grid with a finer resolution than the host grid.
:param cellFill:
String defining how to fill cells that span more than one host grid cell.
Choices are:
'max': Choose maximum value of host grid cells.
'min': Choose minimum value of host grid cells.
'mean': Choose mean value of host grid cells.
:returns:
MultiGrid instance with host grid values subdivided onto finer grid.
:raises DataSetException:
When finerdict is not a) finer resolution or b) does not intersect.x or cellFill is not valid.
"""
layers = OrderedDict()
for (layername,layer) in self._layers.items():
layers[layername] = layer.subdivide(finerdict,cellFill=cellFill)
return MultiGrid(layers)
def interpolateToGrid(self,geodict,method='linear'):
"""
Given a geodict specifying another grid extent and resolution, resample all grids to match.
:param geodict:
geodict dictionary from another grid whose extents are inside the extent of this grid.
:param method:
Optional interpolation method - ['linear', 'cubic','quintic','nearest']
:raises DataSetException:
If the Grid object upon which this function is being called is not completely contained by the
grid to which this Grid is being resampled.
:raises DataSetException:
If the resulting interpolated grid shape does not match input geodict.
This function modifies the internal griddata and geodict object variables.
"""
layers = OrderedDict()
for (layername,layer) in self._layers.items():
#layer.interpolateToGrid(geodict,method=method)
layers[layername] = layer.interpolateToGrid(geodict,method=method)
#self._geodict = layer.getGeoDict().copy()
return MultiGrid(layers)
|
StarcoderdataPython
|
1897567
|
#!/usr/bin/env python3
import argparse
import asyncio
from aiohttp import ClientSession, BasicAuth, ClientTimeout
import os
import sys
import logging
import aiohttp_github_helpers as h
GITHUB_USER = os.environ['GITHUB_USER']
GITHUB_PASS = os.environ['GITHUB_PASS']
TIMEOUT = ClientTimeout(total=20)
AUTH = BasicAuth(GITHUB_USER, GITHUB_PASS)
ORG = "metwork-framework"
async def valid_status(owner, repo, sha):
async with ClientSession(auth=AUTH, timeout=TIMEOUT) as session:
r = await h.github_create_status(session, owner, repo, sha, "success",
"http://metwork-framework.org:9000/"
"github_webhook_ready_to_merge",
"pr ready to merge logic",
"pr ready to merge")
if not(r):
logging.critical("Impossible to create status for %s/%s@%s" %
(owner, repo, sha))
sys.exit(1)
print("status for %s/%s@%s: ok" % (owner, repo, sha))
parser = argparse.ArgumentParser(description='valid merge logic status')
parser.add_argument('REPO', type=str, help='repo name (without owner)')
parser.add_argument('SHA', type=str, help='sha to valid')
args = parser.parse_args()
loop = asyncio.get_event_loop()
loop.run_until_complete(valid_status(ORG, args.REPO, args.SHA))
loop.close()
|
StarcoderdataPython
|
4947450
|
from time import sleep
from json import dumps
from confluent_kafka import avro
from confluent_kafka.avro import AvroProducer
# Define Amazon MSK Brokers
brokers='<YOUR_MSK_BROKER_1>,<YOUR_MSK_BROKER_2>'
# Define Schema Registry
schema_registry='<YOUR_SCHEMA_REGISTRY>'
# Define Kafka topic to be produced to
kafka_topic='<YOUR_KAFKA_TOPIC>'
# Value Schema
value_schema_str = """
{
"type":"record",
"name":"value_schema",
"fields":[
{
"name":"id",
"type":[
"null",
"int"
],
"default":null
}
]
}
"""
# Key Schema
key_schema_str = """
{
"type":"record",
"name":"key_schema",
"fields":[
{
"name":"id",
"type":"int"
}
]
}
"""
def delivery_report(err, msg):
"""
Called once for each message produced to indicate delivery result.
Triggered by poll() or flush().
"""
if err is not None:
print('Message delivery failed: {}'.format(err))
else:
print('Message delivered to {} [{}]'.format(msg.topic(), msg.partition()))
value_schema = avro.loads(value_schema_str)
key_schema = avro.loads(key_schema_str)
key = {"id": 1}
avroProducer = AvroProducer({
'bootstrap.servers': brokers,
'on_delivery': delivery_report,
'schema.registry.url': schema_registry
}, default_key_schema=key_schema, default_value_schema=value_schema)
avroProducer.produce(topic=kafka_topic, key=key)
avroProducer.flush()
|
StarcoderdataPython
|
5030557
|
<reponame>ryu57/pyHalo
from pyHalo.Halos.halo_base import Halo
import numpy as np
class NFWFieldHalo(Halo):
"""
The main class for an NFW field halo profile without truncation
See the base class in Halos/halo_base.py for the required routines for any instance of a Halo class
"""
def __init__(self, mass, x, y, r3d, mdef, z,
sub_flag, lens_cosmo_instance, args, unique_tag):
"""
See documentation in base class (Halos/halo_base.py)
"""
self._lens_cosmo = lens_cosmo_instance
super(NFWFieldHalo, self).__init__(mass, x, y, r3d, mdef, z, sub_flag,
lens_cosmo_instance, args, unique_tag)
@property
def lenstronomy_ID(self):
"""
See documentation in base class (Halos/halo_base.py)
"""
return ['NFW']
@property
def lenstronomy_params(self):
"""
See documentation in base class (Halos/halo_base.py)
"""
(concentration) = self.profile_args
Rs_angle, theta_Rs = self._lens_cosmo.nfw_physical2angle(self.mass, concentration, self.z)
x, y = np.round(self.x, 4), np.round(self.y, 4)
Rs_angle = np.round(Rs_angle, 10)
theta_Rs = np.round(theta_Rs, 10)
kwargs = [{'alpha_Rs': self._rescale_norm * theta_Rs, 'Rs': Rs_angle,
'center_x': x, 'center_y': y}]
return kwargs, None
@property
def profile_args(self):
"""
See documentation in base class (Halos/halo_base.py)
"""
if not hasattr(self, '_profile_args'):
concentration = self._lens_cosmo.NFW_concentration(self.mass,
self.z,
self._args['mc_model'],
self._args['mc_mdef'],
self._args['log_mc'],
self._args['c_scatter'],
self._args['c_scatter_dex'],
self._args['kwargs_suppression'],
self._args['suppression_model'])
self._profile_args = (concentration)
return self._profile_args
class NFWSubhhalo(NFWFieldHalo):
"""
The main class for an NFW subhalo profile without truncation
See the base class in Halos/halo_base.py for the required routines for any instance of a Halo class
"""
@property
def profile_args(self):
"""
See documentation in base class (Halos/halo_base.py)
"""
if not hasattr(self, '_profile_args'):
if self._args['evaluate_mc_at_zlens']:
z_eval = self.z
else:
z_eval = self.z_infall
concentration = self._lens_cosmo.NFW_concentration(self.mass,
z_eval,
self._args['mc_model'],
self._args['mc_mdef'],
self._args['log_mc'],
self._args['c_scatter'],
self._args['c_scatter_dex'],
self._args['kwargs_suppression'],
self._args['suppression_model'])
self._profile_args = (concentration)
return self._profile_args
|
StarcoderdataPython
|
6573751
|
<filename>Game(Stone,paper,scissor).py<gh_stars>1-10
import random
li=['stone','paper','scissor']
n=1
while(n==1):
user1=random.choice(li)
user2=input("Enter Your Choice: ")
print("Computer Choice is: ",user1)
print("Your choice is: ",user2)
if(user1=='stone'):
if(user2==scissor):
print("You loss")
elif(user1==user2):
print("Tie")
else:
print("You Win")
elif(user1=='paper'):
if(user1==user2):
print("Tie")
elif(user2== "scissor"):
print("You Win")
else:
print("You Loss")
else:
if(user1==user2):
print("Tie")
elif(user2=='paper'):
print("You loss")
else:
print("You Win")
print("Do You want To Continue")
n=int(input("if yes press 1 otherwise press 0 :"))
|
StarcoderdataPython
|
8171782
|
# -*- coding: utf-8 -*-
from NLU.consult.dinning_nlu import dinning_nlu_rule
from NLG.consult.dinning_nlg import nlg_confirm_conditions, nlg_recommend_restaurant, nlg_confirm_each_slot, dinning_reply
def judge_confirm_each_slot(state_tracker_obj, last_slot_state, current_slot, yes_no):
if last_slot_state in current_slot and yes_no:
if yes_no == "positive":
if last_slot_state in state_tracker_obj.get_state():
state_tracker_obj.update_confidence(last_slot_state, 1)
state_tracker_obj.get_current_slot(current_slot)
response_utterance, last_slot_state = dinning_reply(state_tracker_obj, current_slot)
if last_slot_state != -1:
state_tracker_obj.update_last_slot_state(last_slot_state)
return response_utterance
if yes_no == "negative":
if last_slot_state in state_tracker_obj.get_state():
state_tracker_obj.update_slot_value(last_slot_state, "no")
state_tracker_obj.update_confidence(last_slot_state, 1)
state_tracker_obj.get_current_slot(current_slot)
else:
state_tracker_obj.add_one_state(last_slot_state, "no", 1)
state_tracker_obj.get_current_slot(current_slot)
response_utterance, last_slot_state = dinning_reply(state_tracker_obj, current_slot)
if last_slot_state:
state_tracker_obj.update_last_slot_state(last_slot_state)
return response_utterance
def final_confirm(ie_values_dict, current_slot, state_tracker_obj, last_slot_state, yes_no, db_obj, collection_name):
if yes_no == "positive" and last_slot_state == "done":
result_ls = db_obj.search_db(collection_name, current_slot)
state_tracker_obj.update_last_slot_state("done")
return nlg_recommend_restaurant(result_ls, last_slot_state)
if yes_no == "negative" and last_slot_state == "done":
if ie_values_dict:
response_utterance, last_slot_state = dinning_reply(state_tracker_obj, current_slot)
state_tracker_obj.update_last_slot_state(last_slot_state)
state_tracker_obj.update_all_state(ie_values_dict) # update dialogue state for all slots
state_tracker_obj.get_current_slot(current_slot) # fill and get current slots
return response_utterance
else:
state_tracker_obj.update_last_slot_state("change")
return "Do you have any other requirements?"
def change_slot(state_tracker_obj, current_slot, ie_values_dict, last_slot_state, yes_no, db_obj, collection_name):
if yes_no == "positive" and last_slot_state == "change":
if not ie_values_dict:
state_tracker_obj.update_last_slot_state(None)
return "your requirements?"
# response_utterance, last_slot_state = dinning_reply(current_slot)
# if last_slot_state:
# state_tracker_obj.update_last_slot_state(last_slot_state)
# return response_utterance
if yes_no == "negative" and last_slot_state == "change":
result_ls = db_obj.search_db(collection_name, current_slot)
return nlg_recommend_restaurant(result_ls, last_slot_state)
def confirm_each_slot(state_tracker_obj, last_slot_state):
print("last_slot_state", last_slot_state)
need_confirm_slot_ls = state_tracker_obj.get_need_to_confirm()
if need_confirm_slot_ls: # need to confirm for each slot
if last_slot_state not in need_confirm_slot_ls: # in start
state_tracker_obj.update_last_slot_state(need_confirm_slot_ls[0])
return nlg_confirm_each_slot(need_confirm_slot_ls[0], # TODO: change last state
state_tracker_obj.get_slot_value(need_confirm_slot_ls[0]))
else:
return nlg_confirm_each_slot(last_slot_state, state_tracker_obj.get_slot_value(last_slot_state))
def dinning_handle(current_slot, customer_utterance, state_tracker_obj, db_obj, collection_name):
last_slot_state = state_tracker_obj.get_last_slot_state()
print("top last_slot_state", last_slot_state)
ie_values_dict, yes_no = dinning_nlu_rule(customer_utterance)
response_utterance = judge_confirm_each_slot(state_tracker_obj, last_slot_state, current_slot, yes_no)
if response_utterance:
return response_utterance
response_utterance = final_confirm(ie_values_dict, current_slot, state_tracker_obj, last_slot_state, yes_no, db_obj, collection_name)
if response_utterance:
return response_utterance
response_utterance = change_slot(state_tracker_obj, current_slot, ie_values_dict, last_slot_state, yes_no, db_obj, collection_name)
if response_utterance:
return response_utterance
# print(state_tracker_obj.get_state())
state_tracker_obj.update_all_state(ie_values_dict) # update dialogue state for all slots
state_tracker_obj.get_current_slot(current_slot) # fill and get current slots
# print(state_tracker_obj.get_state())
# print(current_slot)
response_utterance = confirm_each_slot(state_tracker_obj, last_slot_state)
if response_utterance:
return response_utterance
state = state_tracker_obj.judge_dialogue_state() # judge whether the filling slots process is done
if state is True: # all slots are done
condition_confirm_utterance = nlg_confirm_conditions(current_slot)
state_tracker_obj.update_last_slot_state("done")
return condition_confirm_utterance
else: # not done
response_utterance, last_slot_state = dinning_reply(state_tracker_obj, current_slot)
state_tracker_obj.update_last_slot_state(last_slot_state)
return response_utterance
|
StarcoderdataPython
|
3360317
|
<reponame>markbeep/Lecturfier
import aiohttp
import discord
from discord.ext import commands, tasks
import random
import asyncio
import os
from helper import image2queue as im2q
from helper.sql import SQLFunctions
from PIL import Image, ImageDraw, ImageFont
import PIL
import io
from discord.ext.commands.cooldowns import BucketType
def rgb2hex(r, g, b):
return '#{:02x}{:02x}{:02x}'.format(r, g, b)
def loading_bar_draw(a, b):
prog = int(10 * a / b)
return "<:green_box:764901465948684289>" * prog + (10 - prog) * "<:grey_box:764901465592037388>"
def modifiers(img: im2q.PixPlace, mods: tuple) -> int:
drawn = 0
start = -1
end = -1
for i in range(len(mods)):
m = mods[i]
last = i == len(mods) - 1
if m.startswith("p"): # percent start
if not last:
if mods[i + 1].isnumeric():
start = int(mods[i + 1])
i += 1
if m.startswith("e"): # percent end
if not last:
if mods[i + 1].isnumeric():
end = int(mods[i + 1])
i += 1
elif m.startswith("f"): # flip
img.flip()
elif m.startswith("c"): # center
img.center_first()
elif m.startswith("r"): # low to high def
img.low_to_high_res()
elif m.startswith("l"): # left to right
img.left_to_right()
if start != -1 or end != -1:
print("Modfiers", start, end)
if start != -1 != end:
drawn = img.perc_to_perc(start, end)
elif start != -1:
drawn = img.resume_progress(start)
else:
img.end_at(end)
return drawn
async def create_buffer(ctx, x1, x2, y1, y2):
async with aiohttp.ClientSession() as cs:
async with cs.get(ctx.message.attachments[0].url) as r:
buffer = io.BytesIO(await r.read())
im = Image.open(buffer)
x1 = int(x1)
x2 = int(x2)
y1 = int(y1)
y2 = int(y2)
width, height = im.size
if x2 - x1 != width or y2 - y1 != height:
im = im.resize((x2 - x1, y2 - y1), PIL.Image.NEAREST)
buff = io.BytesIO()
im.save(buff, format="PNG")
buff.seek(0)
return buff
buffer.seek(0)
return buffer
def is_valid_msg(msg):
filt = "!\"#$%&'()*+, -./:;<=>?@[\\]^_`{|}~ "
if len(msg) > 200 or len(msg) < 15:
return False
count = 0
for c in msg:
if c in filt:
count += 1
if count / len(msg) > 0.7:
return False
return True
class Draw(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.cancel_all = False
self.cancel_draws = []
self.pause_draws = False
self.progress = {}
self.image = None
self.queue = []
self.background_draw.start()
self.db_path = "./data/discord.db"
self.place_path = "./place/"
self.conn = SQLFunctions.connect()
self.LINE_HEIGHT = 62 # amount of lines which fit on the place canvas
self.CHAR_WIDTH = 166 # amount of chars which fit in a line on the place canvas
self.font = ImageFont.truetype("./data/nk57-monospace-cd-rg.ttf", 12)
self.userToCopyTextFrom = 155419933998579713
self.last_line = SQLFunctions.get_config("Draw_Last_Line", self.conn)
if len(self.last_line) == 0:
self.last_line = 0
else:
self.last_line = self.last_line[0]
self.last_char = SQLFunctions.get_config("Draw_Last_Char", self.conn)
if len(self.last_char) == 0:
self.last_char = 0
else:
self.last_char = self.last_char[0]
def get_task(self):
self.pause_draws = True
return self.background_draw
@tasks.loop(seconds=5)
async def background_draw(self):
await self.bot.wait_until_ready()
# opens and readies all the files
imgs = self.get_all_queues(self.place_path)
for im in imgs:
if im.fp not in self.progress:
start = SQLFunctions.get_config(f"Start_{im.fp}", self.conn)
if len(start) == 0:
start = 0
else:
start = start[0]
self.progress[im.fp] = {
"count": start,
"img": im,
"queue": im.get_queue()
}
self.queue.append({
"ID": im.fp,
"size": im.size,
"img": im,
"queue": im.get_queue()
})
channelID = SQLFunctions.get_config("PlaceChannel", self.conn)
if len(channelID) == 0:
channelID = 819966095070330950
else:
channelID = channelID[0]
channel = self.bot.get_channel(channelID)
if channel is None:
channel = self.bot.get_channel(402551175272202252) # fallback test channel
# keeps going through all lists
while len(self.queue) > 0 and not self.pause_draws:
drawing = self.queue[0]
start = SQLFunctions.get_config(f"Start_{drawing['ID']}", self.conn)
end = SQLFunctions.get_config(f"End_{drawing['ID']}", self.conn)
if len(start) == 0:
start = 0
else:
start = start[0]
if len(end) == 0:
end = drawing["img"].size
else:
end = end[0]
done = await self.draw_pixels(drawing["ID"], channel, start, end)
if done:
self.remove_drawing(drawing["ID"])
def remove_drawing(self, ID):
# removes the drawing from the sql table
SQLFunctions.delete_config(f"%{ID}", self.conn)
# removes it from the queue and progress bar
if ID in self.progress:
self.progress.pop(ID)
self.queue.pop(0)
while ID in self.cancel_draws:
self.cancel_draws.remove(ID)
os.remove(f"{self.place_path}{ID}.npy")
def get_all_queues(self, dir="./"):
q = []
for filename in os.listdir(dir):
if filename.endswith(".npy"):
img = im2q.PixPlace(filename.replace(".npy", ""), "q", setup=False)
img.load_array(os.path.join(dir, filename))
q.append(img)
return q
async def draw_pixels(self, ID, channel, start, end) -> bool:
pixels_queue = self.progress[ID]["queue"][start:end]
# draws the pixels
while len(pixels_queue) > 0:
if self.cancel_all or str(ID) in self.cancel_draws:
await channel.send(f"Canceled Project {ID}.")
return True
if self.pause_draws:
return False
pix = pixels_queue[0]
pX = pix[0]
pY = pix[1]
pHex = rgb2hex(pix[2], pix[3], pix[4])
try:
await channel.send(f".place setpixel {pX} {pY} {pHex} | PROJECT {ID}")
self.progress[ID]["count"] += 1
pixels_queue.pop(0)
if self.progress[ID]["count"] % 10 == 0:
SQLFunctions.insert_or_update_config(f"Start_{ID}", self.progress[ID]["count"], self.conn)
except Exception:
await asyncio.sleep(5)
return True
@commands.Cog.listener()
async def on_message(self, message):
# fights against people trying to ruin my images hehe ;)
if message.content.startswith(".place setpixel ") and self.image is not None:
cont = message.content.split(" ")
try:
x = int(cont[2])
y = int(cont[3])
except ValueError:
return
r, g, b, a = self.image.getpixel((x, y))
if a != 0:
color = rgb2hex(r, g, b)
if color != cont[4].lower():
channel = self.bot.get_channel(819966095070330950)
if channel is None:
channel = self.bot.get_channel(402563165247766528)
await channel.send(f".place setpixel {x} {y} {color} | COUNTERING {message.author.name}")
if message.author.id == self.userToCopyTextFrom and message.channel.id != 813430350965375046 and is_valid_msg(message.content):
pil_img, self.last_line, self.last_char = self.draw_text(message.content, self.last_line, self.last_char)
SQLFunctions.insert_or_update_config("Draw_Last_Line", self.last_line, self.conn)
SQLFunctions.insert_or_update_config("Draw_Last_Char", self.last_char, self.conn)
# id to stop specific draw
ID = str(random.randint(1000, 10000))
img = im2q.PixPlace(ID, ID, False, pil_img=pil_img)
img.left_to_right()
self.handle_image(img, 0, ID)
def draw_desc(self, ID):
if ID not in self.progress:
return "Project has no info"
prog = self.progress[ID]
topX, topY = prog["img"].top_left_corner
botX, botY = prog["img"].bot_right_corner
pix_drawn = prog["count"]
pix_total = prog["img"].size
return f"ID: {ID}\n" \
f"X: {topX} | Y: {topY}\n" \
f"Width: {botX - topX} | Height: {botY - topY}\n" \
f"Pixel Total: {pix_total}\n" \
f"Pixels to draw: {pix_total - pix_drawn}\n" \
f"Pixels drawn: {pix_drawn}\n" \
f"{loading_bar_draw(pix_drawn, pix_total)} {round(100 * pix_drawn / pix_total, 2)}%\n" \
f"Time Remaining: {round((pix_total - pix_drawn) * len(self.progress) / 60, 2)} mins\n" \
f"`.place zoom {topX} {topY} {min(max(botX - topX, botY - topY), 250)}` to see the progress.\n"
@commands.is_owner()
@commands.group(aliases=["d"], usage="draw <command> <x1> <x2> <y1> <y2> <step> <color/channel> [delete_messages: y/n]",
invoke_without_command=True)
async def draw(self, ctx, command=None, x1=None):
"""
Draws a picture using Battle's place command.
Commands:
- `image <x1> <x2> <y1> <y2> [step] [updates channel]`
- `multi <x1> <x2> <y1> <y2> [step] [updates channel]`
- `save [clear]`
- `cancel`: Cancels all currently going on drawings.
- `pause`: Pauses all drawings
Permissions: Owner
"""
if ctx.invoked_subcommand is None:
if command is None:
await ctx.send("No command given")
raise discord.ext.commands.errors.BadArgument
elif command == "pause":
self.pause_draws = not self.pause_draws
await ctx.send(f"Pause draws: {self.pause_draws}")
elif command == "cancel":
if x1 is None:
self.cancel_all = True
for d in self.queue:
self.remove_drawing(d["ID"])
else:
self.cancel_draws.append(x1)
self.remove_drawing(x1)
else:
await ctx.send("Command not found. Right now only `cancel`, `image` and `square` exist.")
def handle_image(self, img: im2q, drawn: int, ID: str):
self.progress[ID] = {
"count": drawn,
"img": img,
"queue": img.get_queue()
}
self.queue.append({
"ID": ID,
"size": img.size,
"img": img,
"queue": img.get_queue()
})
SQLFunctions.insert_or_update_config(f"Start_{ID}", 0, self.conn)
SQLFunctions.insert_or_update_config(f"End_{ID}", img.size, self.conn)
# saves the img as a numpy file so it can easily be reload when the bot restarts
img.save_array(f"{self.place_path}{ID}")
@commands.is_owner()
@draw.command(aliases=["i"], usage="image <x1> <x2> <y1> <y2> {mods}")
async def image(self, ctx, x1=None, x2=None, y1=None, y2=None, *mods):
"""
`x1`: x to start
`x2`: x to stop
`y1`: y to start
`y2`: y to stop
**Modifiers:**
`p <int>`: Percentage to start at
`e <int>`: Percentage to stop image at
`f`: Flip queue order
`c`: Center to out draw order
`r`: "Random" order
`l`: Left to right draw order
Permissions: Owner
"""
if len(ctx.message.attachments) == 0:
await ctx.send("No image given")
raise discord.ext.commands.errors.BadArgument
try:
buffer = await create_buffer(ctx, x1, x2, y1, y2)
except ValueError:
await ctx.send("Not all coordinates given.")
raise discord.ext.commands.errors.BadArgument
self.cancel_all = False
# id to stop specific draw
ID = str(random.randint(1000, 10000))
img = im2q.PixPlace(buffer, ID)
drawn = modifiers(img, mods)
self.handle_image(img, drawn, ID)
embed = discord.Embed(title="Started Drawing", description=self.draw_desc(ID))
await ctx.send(embed=embed)
@commands.is_owner()
@draw.command(aliases=["t"], usage="text")
async def text(self, ctx):
"""
Reads in pixels from a setpixel txt file
"""
if len(ctx.message.attachments) == 0:
await ctx.send("No text file given")
raise discord.ext.commands.errors.BadArgument
async with aiohttp.ClientSession() as cs:
async with cs.get(ctx.message.attachments[0].url) as r:
setpixels_file = await r.text()
self.cancel_all = False
# id to stop specific draw
ID = str(random.randint(1000, 10000))
img = im2q.PixPlace(ID, ID, setup=False, setpixels=setpixels_file)
self.handle_image(img, 0, ID)
embed = discord.Embed(title="Started Drawing", description=self.draw_desc(ID))
await ctx.send(embed=embed)
@commands.is_owner()
@commands.cooldown(1, 30, BucketType.guild)
@draw.command(aliases=["m"], usage="multi <x1> <x2> <y1> <y2> {mods}")
async def multi(self, ctx, x1=None, x2=None, y1=None, y2=None, *mods):
"""
Creates a txt file for setmultiplepixels and sends it via DMs.
`x1`: x to start
`x2`: x to stop
`y1`: y to start
`y2`: y to stop
**Modifiers:**
`p <int>`: Percentage to start at
`e <int>`: Percentage to stop image at
`f`: Flip queue order
`c`: Center to out draw order
`l`: Low to High Def draw order
"""
if len(ctx.message.attachments) == 0:
await ctx.send("No image given")
raise discord.ext.commands.errors.BadArgument
try:
buffer = await create_buffer(ctx, x1, x2, y1, y2)
except ValueError:
await ctx.send("Not all coordinates given.")
img = im2q.PixPlace(buffer, "multi")
modifiers(img, mods)
pixels_queue = img.get_queue()
# makes txt files instead
file_count = 0
files = []
while len(pixels_queue) > 0:
file_count += 1
content = ""
pixels_added = 0
for i in range(80000):
if len(pixels_queue) == 0:
break
pix = pixels_queue.pop(0)
pX = pix[0]
pY = pix[1]
pHex = rgb2hex(pix[2], pix[3], pix[4])
content += f"{pX} {pY} {pHex}"
if len(pixels_queue) != 0:
content += "|"
pixels_added += 1
filename = f"{file_count}-{pixels_added}.txt"
files.append(filename)
with open(filename, "a") as f:
f.write(content)
for f in files:
file = discord.File(f)
await ctx.author.send(f, file=file)
os.remove(f)
await ctx.author.send("Done")
return
@draw.command(usage="progress <ID>", aliases=["prog"])
async def progress(self, ctx, ID=""):
if "comp" not in ID and (ID == "" or ID not in self.progress):
keys = ""
rank = 1
total_pix = 0
for k in self.queue:
time_to_start = total_pix // 60
current_amount = self.progress[k["ID"]]["count"]
img_total = k["img"].size
percentage = round(current_amount * 100 / img_total, 2)
total_pix += img_total - current_amount
keys += f"`{rank}:` ID: {k['ID']}\n" \
f"---**Starting in:** {time_to_start}mins\n" \
f"---**Progress:** {percentage}%\n" \
f"---**Duration:** {img_total - current_amount} pixels | {(img_total - current_amount) // 60}mins\n" \
f"---**Finished in:** {total_pix // 60}mins\n"
rank += 1
if len(keys) > 2000:
await ctx.send("Too many projects currently in work. Use `$d prog compact` to get a compact view.")
return
await ctx.send(f"Project IDs | Count:{len(self.progress)} | Paused: {self.pause_draws}\n{keys}")
return
if "comp" in ID.lower(): # compact view to send a lot of projects easier
total_pix = 0
id_list = []
for k in self.queue:
total_pix += k["img"].size
id_list.append(k["ID"])
id_msg = ", ".join(id_list)
if len(id_msg) > 2000:
id_msg = id_msg[:2000] + "**[...]**"
embed = discord.Embed(title="Compact Projects", description=id_msg)
embed.add_field(name="Time", value=f"All projects finished in `{total_pix // 60}` mins")
await ctx.send(embed=embed)
return
embed = discord.Embed(
title=f"Drawing Progress | Project {ID}",
description=self.draw_desc(ID)
)
await ctx.send(embed=embed)
@commands.is_owner()
@draw.command(usage="preview <ID>")
async def preview(self, ctx, ID=None):
if ID is None or ID not in self.progress:
await ctx.send("Unknown ID given")
else:
async with ctx.typing():
img = self.progress[ID]["img"]
gif = await img.create_gif()
file = discord.File(fp=gif, filename="prev.gif")
await ctx.send(file=file)
@commands.is_owner()
@draw.command()
async def save(self, ctx, on="n"):
# saves the new image if needed
msg = ""
if len(ctx.message.attachments) != 0:
async with aiohttp.ClientSession() as cs:
async with cs.get(ctx.message.attachments[0].url) as r:
buffer = io.BytesIO(await r.read())
self.image = im = Image.open(buffer)
im.save("place.png", "PNG")
msg = "Successfully updated place.png"
if on.startswith("c") or on.startswith("n"):
self.image = None
await ctx.send(f"{msg}\nTurned `OFF` image protection.")
else:
self.image = Image.open("place.png")
await ctx.send(f"{msg}\nTurned `ON` image protection.")
@commands.is_owner()
@draw.command(aliases=["mismatches", "mis"])
async def mismatch(self, ctx, color_to_check=""):
if len(ctx.message.attachments) == 0:
await ctx.send("No image given")
raise discord.ext.commands.errors.BadArgument
fp = "place.png"
if not os.path.isfile(fp):
fp = "placeOFF.png"
if not os.path.isfile(fp):
await ctx.send("No image to compare to")
raise discord.ext.commands.errors.BadArgument
save_pixels = Image.open(fp).convert("RGBA").load()
async with aiohttp.ClientSession() as cs:
async with cs.get(ctx.message.attachments[0].url) as r:
buffer = io.BytesIO(await r.read())
place_pixels = Image.open(buffer).convert("RGBA").load()
im, count = self.find_mismatches(save_pixels, place_pixels, color_to_check)
im.save("mismatches.png", "PNG")
file = discord.File("mismatches.png")
await ctx.send(f"Found {count} mismatches:", file=file)
def find_mismatches(self, save_pixels, place_pixels, color_to_check=""):
im = Image.new(mode="RGBA", size=(1000, 1000), color=(0, 0, 0, 0))
pixels = im.load()
count = 0
for x in range(1000):
for y in range(1000):
r, g, b, a = save_pixels[x, y]
if a != 0:
rp, gp, bp, ap = place_pixels[x, y]
if color_to_check.replace("#", "") == rgb2hex(rp, gp, bp).replace("#", "") or color_to_check == "" and (r, g, b, a) != (
rp, gp, bp, ap):
count += 1
pixels[x, y] = (r, g, b, a)
return im, count
def draw_text(self, text, last_line, last_char) -> tuple[Image.Image, int, int]:
img = Image.new("RGBA", (1000, 1000), (0, 0, 0, 0))
d = ImageDraw.Draw(img)
text = " | " + text.replace("\n", " ")
# splits the text into lines
lines, last_char = self.write_lines(text, last_char)
# draws the lines
while len(lines) > 0:
empty_lines = ["" for _ in range(last_line)]
if len(lines) > self.LINE_HEIGHT - last_line:
li = lines[:self.LINE_HEIGHT - last_line]
lines = lines[self.LINE_HEIGHT - last_line:]
else:
li = lines
lines = []
last_line = (last_line + len(li)) % self.LINE_HEIGHT
text = "\n".join(empty_lines + li)
d.text((0, 0), text, fill=(34, 189, 67, 255), font=self.font)
if last_char > 0:
last_line -= 1
return img, last_line, last_char
def write_lines(self, text: str, last_char: int) -> tuple[list[str], int]:
"""Last char is the last character of the last line
Args:
text (str): The string which should be split into lines
last_char (int): The last character of the last line
Returns:
(list[str], int): A list with all the lines and an int where the last character was placed.
"""
lines = []
while len(text) > 0:
t = " " * last_char # we add spaces to move the text to the right by enough
chars_added = 0 # amount of characters added to the line
# if the text is too long to add to the line, we cut it so it fits perfectly
if len(text) > self.CHAR_WIDTH - last_char:
t += text[:self.CHAR_WIDTH - last_char]
chars_added = len(text[:self.CHAR_WIDTH - last_char])
text = text[self.CHAR_WIDTH - last_char:]
else:
t += text
chars_added = len(text)
text = ""
last_char = (last_char + chars_added) % self.CHAR_WIDTH
lines.append(t)
return lines, last_char
def setup(bot):
bot.add_cog(Draw(bot))
|
StarcoderdataPython
|
1711427
|
import datetime
import logging
from pathlib import Path
import boto3
import requests
from .metadata import Netkan, CkanGroup
from .common import sqs_batch_entries
class NetkanScheduler:
def __init__(self, path, ckan_meta_path, queue, base='NetKAN/', nonhooks_group=False, webhooks_group=False):
self.path = Path(path, base)
self.nonhooks_group = nonhooks_group
self.webhooks_group = webhooks_group
self.ckan_meta_path = ckan_meta_path
# TODO: This isn't super neat, do something better.
self.queue_url = 'test_url'
if queue != 'TestyMcTestFace':
self.client = boto3.client('sqs')
sqs = boto3.resource('sqs')
self.queue = sqs.get_queue_by_name(QueueName=queue)
self.queue_url = self.queue.url
def netkans(self):
# This can easily be recursive with '**/*.netkan', however
# implementing like for like initially.
return (Netkan(f) for f in sorted(self.path.glob('*.netkan'),
key=lambda p: p.stem.casefold()))
def sqs_batch_attrs(self, batch):
return {
'QueueUrl': self.queue_url,
'Entries': batch
}
def _in_group(self, netkan):
if netkan.hook_only():
return self.webhooks_group
else:
return self.nonhooks_group
def schedule_all_netkans(self):
messages = (nk.sqs_message(CkanGroup(self.ckan_meta_path, nk.identifier))
for nk in self.netkans() if self._in_group(nk))
for batch in sqs_batch_entries(messages):
self.client.send_message_batch(**self.sqs_batch_attrs(batch))
def can_schedule(self, max_queued, dev=False, min_credits=200):
if not dev:
end = datetime.datetime.utcnow()
start = end - datetime.timedelta(minutes=10)
response = requests.get(
'http://169.254.169.254/latest/meta-data/instance-id'
)
instance_id = response.text
cloudwatch = boto3.client('cloudwatch')
stats = cloudwatch.get_metric_statistics(
Dimensions=[{'Name': 'InstanceId', 'Value': instance_id}],
MetricName='CPUCreditBalance',
Namespace='AWS/EC2',
StartTime=start.strftime("%Y-%m-%dT%H:%MZ"),
EndTime=end.strftime("%Y-%m-%dT%H:%MZ"),
Period=10,
Statistics=['Average'],
)
# A pass consumes around 40 credits, with an accrue rate of 24/hr.
# So running every 2 hours should see using just a touch less than
# we gain in that time period.
creds = 0
try:
creds = stats['Datapoints'][0]['Average']
except IndexError:
logging.error("Couldn't acquire CPU Credit Stats")
if int(creds) < min_credits:
logging.info(
"Run skipped, below credit target (Current Avg: %s)", creds
)
return False
message_count = int(
self.queue.attributes.get(
'ApproximateNumberOfMessages', 0)
)
if message_count > max_queued:
logging.info(
"Run skipped, too many NetKANs to process (%s left)",
message_count
)
return False
return True
|
StarcoderdataPython
|
11222596
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
import os
import six
import random
from PIL import Image
from torch.utils import data
import warnings
import matplotlib.pyplot as plt
import torch
import pandas as pd
# --------------------提取全部数据集----------------------------------------
# file_path='/home/cxd/python/pycharm/data/BaiduCar/'
#
#
# label_dirname = os.path.join(file_path, 'Label_road02/Label/')
# labels = list()
# for dire in os.listdir(label_dirname):
# for dir in os.listdir(os.path.join(label_dirname, dire, 'Camera 5')):
# labels.append(os.path.join(label_dirname, dire, 'Camera 5/', dir))
# for dir in os.listdir(os.path.join(label_dirname, dire, 'Camera 6')):
# labels.append(os.path.join(label_dirname, dire, 'Camera 6/', dir))
# label_dirname = os.path.join(file_path, 'Label_road03/Label/')
# for dire in os.listdir(label_dirname):
# for dir in os.listdir(os.path.join(label_dirname, dire, 'Camera 5')):
# labels.append(os.path.join(label_dirname, dire, 'Camera 5/', dir))
# for dir in os.listdir(os.path.join(label_dirname, dire, 'Camera 6')):
# labels.append(os.path.join(label_dirname, dire, 'Camera 6/', dir))
# label_dirname = os.path.join(file_path, 'Label_road04/Label/')
# for dire in os.listdir(label_dirname):
# for dir in os.listdir(os.path.join(label_dirname, dire, 'Camera 5')):
# labels.append(os.path.join(label_dirname, dire, 'Camera 5/', dir))
# for dir in os.listdir(os.path.join(label_dirname, dire, 'Camera 6')):
# labels.append(os.path.join(label_dirname, dire, 'Camera 6/', dir))
#
# images = list()
# for ln in labels:
# if 'Label_road02' in ln:
# img_name = ln.replace('Label_road02/Label/', 'ColorImage_road02/ColorImage/')
# img_name = img_name.replace('_bin.png', '.jpg')
# elif 'Label_road03' in ln:
# img_name = ln.replace('Label_road03/Label/', 'ColorImage_road03/ColorImage/')
# img_name = img_name.replace('_bin.png', '.jpg')
# elif 'Label_road04' in ln:
# img_name = ln.replace('Label_road04/Label/', 'ColorImage_road04/ColorImage/')
# img_name = img_name.replace('_bin.png', '.jpg')
# images.append(img_name)
#
# print(len(labels))
# print(len(images))
# c ={'image':images,'label':labels}#合并成一个新的字典c
# data = pd.DataFrame(c)
# print(data.head())
# data.to_csv('./train.csv')
# --------------------创建训练样本/验证样本/测试样本----------------------------------------
# data=pd.read_csv('./train.csv')
# images=data['image']
# labels=data['label']
#
#
# from sklearn.model_selection import train_test_split
# X_train,X_test,y_train,y_test = train_test_split(images,labels,test_size=0.4)
# X_val,X_test,y_val,y_test = train_test_split(X_test,y_test,test_size=0.25)
#
#
# c ={'image':X_train,'label':y_train}#合并成一个新的字典c
# datax = pd.DataFrame(c)
# datax.to_csv('./train.csv')
#
# c ={'image':X_val,'label':y_val}#合并成一个新的字典c
# datay = pd.DataFrame(c)
# datay.to_csv('./val.csv')
#
# c ={'image':X_test,'label':y_test}#合并成一个新的字典c
# dataz = pd.DataFrame(c)
# dataz.to_csv('./test.csv')
data=pd.read_csv('./train.csv')
images=data['image']
print(len(images))
data=pd.read_csv('./val.csv')
images=data['image']
print(len(images))
data=pd.read_csv('./test.csv')
images=data['image']
print(len(images))
|
StarcoderdataPython
|
3444461
|
# Generated by Django 2.2.10 on 2020-06-03 19:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('customer', '0007_auto_20200603_1337'),
]
operations = [
migrations.AddField(
model_name='profile',
name='lat',
field=models.FloatField(default=0, max_length=255, verbose_name='Latitude'),
preserve_default=False,
),
migrations.AddField(
model_name='profile',
name='lng',
field=models.FloatField(default=0, max_length=255, verbose_name='Longitude'),
preserve_default=False,
),
]
|
StarcoderdataPython
|
1807167
|
<filename>hooks/charmhelpers/contrib/openstack/ip.py
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
from charmhelpers.core.hookenv import (
config,
unit_get,
service_name,
network_get_primary_address,
)
from charmhelpers.contrib.network.ip import (
get_address_in_network,
is_address_in_network,
is_ipv6,
get_ipv6_addr,
resolve_network_cidr,
)
from charmhelpers.contrib.hahelpers.cluster import is_clustered
PUBLIC = 'public'
INTERNAL = 'int'
ADMIN = 'admin'
ADDRESS_MAP = {
PUBLIC: {
'binding': 'public',
'config': 'os-public-network',
'fallback': 'public-address',
'override': 'os-public-hostname',
},
INTERNAL: {
'binding': 'internal',
'config': 'os-internal-network',
'fallback': 'private-address',
'override': 'os-internal-hostname',
},
ADMIN: {
'binding': 'admin',
'config': 'os-admin-network',
'fallback': 'private-address',
'override': 'os-admin-hostname',
}
}
def canonical_url(configs, endpoint_type=PUBLIC):
"""Returns the correct HTTP URL to this host given the state of HTTPS
configuration, hacluster and charm configuration.
:param configs: OSTemplateRenderer config templating object to inspect
for a complete https context.
:param endpoint_type: str endpoint type to resolve.
:param returns: str base URL for services on the current service unit.
"""
scheme = _get_scheme(configs)
address = resolve_address(endpoint_type)
if is_ipv6(address):
address = "[{}]".format(address)
return '%s://%s' % (scheme, address)
def _get_scheme(configs):
"""Returns the scheme to use for the url (either http or https)
depending upon whether https is in the configs value.
:param configs: OSTemplateRenderer config templating object to inspect
for a complete https context.
:returns: either 'http' or 'https' depending on whether https is
configured within the configs context.
"""
scheme = 'http'
if configs and 'https' in configs.complete_contexts():
scheme = 'https'
return scheme
def _get_address_override(endpoint_type=PUBLIC):
"""Returns any address overrides that the user has defined based on the
endpoint type.
Note: this function allows for the service name to be inserted into the
address if the user specifies {service_name}.somehost.org.
:param endpoint_type: the type of endpoint to retrieve the override
value for.
:returns: any endpoint address or hostname that the user has overridden
or None if an override is not present.
"""
override_key = ADDRESS_MAP[endpoint_type]['override']
addr_override = config(override_key)
if not addr_override:
return None
else:
return addr_override.format(service_name=service_name())
def resolve_address(endpoint_type=PUBLIC):
"""Return unit address depending on net config.
If unit is clustered with vip(s) and has net splits defined, return vip on
correct network. If clustered with no nets defined, return primary vip.
If not clustered, return unit address ensuring address is on configured net
split if one is configured, or a Juju 2.0 extra-binding has been used.
:param endpoint_type: Network endpoing type
"""
resolved_address = _get_address_override(endpoint_type)
if resolved_address:
return resolved_address
vips = config('vip')
if vips:
vips = vips.split()
net_type = ADDRESS_MAP[endpoint_type]['config']
net_addr = config(net_type)
net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
binding = ADDRESS_MAP[endpoint_type]['binding']
clustered = is_clustered()
if clustered and vips:
if net_addr:
for vip in vips:
if is_address_in_network(net_addr, vip):
resolved_address = vip
break
else:
# NOTE: endeavour to check vips against network space
# bindings
try:
bound_cidr = resolve_network_cidr(
network_get_primary_address(binding)
)
for vip in vips:
if is_address_in_network(bound_cidr, vip):
resolved_address = vip
break
except NotImplementedError:
# If no net-splits configured and no support for extra
# bindings/network spaces so we expect a single vip
resolved_address = vips[0]
else:
if config('prefer-ipv6'):
fallback_addr = get_ipv6_addr(exc_list=vips)[0]
else:
fallback_addr = unit_get(net_fallback)
if net_addr:
resolved_address = get_address_in_network(net_addr, fallback_addr)
else:
# NOTE: only try to use extra bindings if legacy network
# configuration is not in use
try:
resolved_address = network_get_primary_address(binding)
except NotImplementedError:
resolved_address = fallback_addr
if resolved_address is None:
raise ValueError("Unable to resolve a suitable IP address based on "
"charm state and configuration. (net_type=%s, "
"clustered=%s)" % (net_type, clustered))
return resolved_address
|
StarcoderdataPython
|
12816414
|
<gh_stars>0
from django.core.management.base import BaseCommand, CommandError
from accounts.models import Plan, ThumbSize
class Command(BaseCommand):
help = 'Create base plans and thumbnail resolutions'
def handle(self, *args, **options):
#foreign key here when file is original
thumb_original = ThumbSize.objects.create(
height = 0,
)
thumb_200 = ThumbSize.objects.create(
height = 200,
)
thumb_400 = ThumbSize.objects.create(
height = 400,
)
plan_basic = Plan.objects.create(
title = 'Basic',
)
plan_basic.thumb_sizes.add(thumb_200)
plan_premium = Plan.objects.create(
title = 'Premium',
enable_original_image = True,
)
plan_premium.thumb_sizes.add(thumb_200)
plan_premium.thumb_sizes.add(thumb_400)
plan_enterprise = Plan.objects.create(
title = 'Enterprise',
enable_original_image = True,
enable_expiring_link = True
)
plan_enterprise.thumb_sizes.add(thumb_200)
plan_enterprise.thumb_sizes.add(thumb_400)
self.stdout.write(self.style.SUCCESS('Successfully created base plans'))
|
StarcoderdataPython
|
9703540
|
import discord
from .utils.u_mongo import Mongo
from discord.ext import commands
from discord.ext.commands import has_permissions
class Admin(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True)
@has_permissions(administrator=True)
async def set_prefix(self, ctx, *, prefix='='):
"""
Sets the prefix for the server
-set_prefix <=, -, !>
"""
record = await Mongo.get_record('server_settings', 'id', ctx.message.guild.id)
upg = {
"prefix": prefix
}
await Mongo.update_record('server_settings', record, upg)
await ctx.send(f"Prefix is now: {prefix}")
@commands.command(pass_context=True)
@has_permissions(administrator=True)
async def set_descriptor(self, ctx, descriptor='en'):
"""
Set descriptor for translate other languages
-set_descriptor <en, ru>
"""
record = await Mongo.get_record('server_settings', 'id', ctx.message.guild.id)
upg = {
"translator": descriptor
}
await Mongo.update_record('server_settings', record, upg)
await ctx.send(f"Descriptor is now: {descriptor}")
@commands.command(pass_context=True)
@has_permissions(administrator=True)
async def set_emoji(self, ctx, emoji):
"""
Set emoji for money server
-set_emoji <discord.Emoji>
"""
server_id = ctx.message.guild.id
record = await Mongo.get_record('server_settings', 'id', server_id)
upg_emoji = {
"emoji_name":emoji
}
await Mongo.update_record('server_settings', record, upg_emoji)
await ctx.send(f"Money Emoji is now: {emoji}")
def setup(bot):
bot.add_cog(Admin(bot))
|
StarcoderdataPython
|
9668527
|
w = 9 #Default Number representation base 10 decimal
x = 0b1010 #Binary number base 2
y = 0o1247 #Octal Number base 8
z = 0xa43d #Hexadecimal number base 16
print(w);
print(type(w));
print(x);
print(type(x));
print(y);
print(type(y));
print(z);
print(type(z));
#decimal to hex conversion
print(hex(w));
#decimal to octal
print(oct(w));
print(bin(w));
#Floating point number
a = 10.25
print(a)
print(type(a))
#Floating point is upto 16 digit precision
#Complex Numbers
p = 2 + 3j
print(p.real) #print real value
print(p.imag) #print imaginary value
|
StarcoderdataPython
|
5188971
|
<gh_stars>1-10
import pandas as pd
from goatools.associations import read_gaf
from goatools.base import dnld_gaf
from goatools.base import download_go_basic_obo
from goatools.obo_parser import GODag
from tqdm import tqdm
from linker.constants import *
def download_ontologies():
"""
Download ontologies, a dictionary that maps GO IDs to GO terms. In most cases, we should use the basic OBO file.
:return: a dictionary where key is the gene ontology id ('GO:0000001') and value is the GOTerm class
"""
obo_fname = download_go_basic_obo(prt=None, loading_bar=False)
ontologies = GODag(obo_fname)
return ontologies
def gaf_names_to_id(gaf_filename):
df = pd.read_csv(gaf_filename, comment='!', sep='\t', header=None, dtype=str)
# temp has 2 columns. First is the gene id, next is the gene symbol
# example:
# 'ZDB-MIRNAG-081210-6', 'mir26b'
temp = df.iloc[:, 1:3].values
names_to_id = {symbol: my_id for my_id, symbol in temp}
return names_to_id
def to_id(names, names_to_id_dict):
ids = []
for x in names:
try:
my_id = names_to_id_dict[x.lower()]
ids.append(my_id)
except KeyError as e:
# logger.debug(e)
pass
return ids
def download_associations():
species_associations = {}
gaf_name_to_id = {}
for species, gaf_prefix in tqdm(SPECIES_TO_GAF_PREFIX.items()):
gaf_filename = dnld_gaf(gaf_prefix, prt=None, loading_bar=False)
gaf_name_to_id[species] = gaf_names_to_id(gaf_filename)
assocs = {}
for namespace in GO_NAMESPACES:
associations = read_gaf(gaf_filename, namespace=namespace, go2geneids=False, prt=None)
assocs[namespace] = associations
species_associations[species] = assocs
return species_associations, gaf_name_to_id
|
StarcoderdataPython
|
105679
|
<filename>rnn.py
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
def rnn_model():
inputs = layers.Input(shape=(None, 28))
x = layers.Bidirectional(layers.LSTM(512, return_sequences=True, activation='relu'))(inputs)
x = layers.LSTM(512, activation='relu')(x)
outputs = layers.Dense(10)(x)
model = keras.Model(inputs, outputs)
return model
# Model
model = rnn_model()
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=keras.optimizers.Adam(learning_rate=0.001),
metrics=['accuracy']
)
# Train
model.fit(x_train, y_train, epochs=5, verbose=2, batch_size=32)
# Evaluate
model.evaluate(x_test, y_test, verbose=2, batch_size=32)
|
StarcoderdataPython
|
8043704
|
"""
The MIT License (MIT)
Copyright (c) 2015-2021 Rapptz
Copyright (c) 2021-present Disnake Development
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import unicodedata
from typing import TYPE_CHECKING, List, Literal, Optional, Tuple, Type, Union
from .asset import Asset, AssetMixin
from .enums import StickerFormatType, StickerType, try_enum
from .errors import InvalidData
from .mixins import Hashable
from .utils import MISSING, cached_slot_property, find, get, snowflake_time
__all__ = (
"StickerPack",
"StickerItem",
"Sticker",
"StandardSticker",
"GuildSticker",
)
if TYPE_CHECKING:
import datetime
from .guild import Guild
from .state import ConnectionState
from .types.sticker import (
EditGuildSticker,
GuildSticker as GuildStickerPayload,
ListPremiumStickerPacks as ListPremiumStickerPacksPayload,
StandardSticker as StandardStickerPayload,
Sticker as StickerPayload,
StickerItem as StickerItemPayload,
StickerPack as StickerPackPayload,
)
from .user import User
class StickerPack(Hashable):
"""Represents a sticker pack.
.. versionadded:: 2.0
.. container:: operations
.. describe:: str(x)
Returns the name of the sticker pack.
.. describe:: x == y
Checks if the sticker pack is equal to another sticker pack.
.. describe:: x != y
Checks if the sticker pack is not equal to another sticker pack.
Attributes
----------
name: :class:`str`
The name of the sticker pack.
description: :class:`str`
The description of the sticker pack.
id: :class:`int`
The id of the sticker pack.
stickers: List[:class:`StandardSticker`]
The stickers of this sticker pack.
sku_id: :class:`int`
The SKU ID of the sticker pack.
cover_sticker_id: :class:`int`
The ID of the sticker used for the cover of the sticker pack.
cover_sticker: :class:`StandardSticker`
The sticker used for the cover of the sticker pack.
"""
__slots__ = (
"_state",
"id",
"stickers",
"name",
"sku_id",
"cover_sticker_id",
"cover_sticker",
"description",
"_banner",
)
def __init__(self, *, state: ConnectionState, data: StickerPackPayload) -> None:
self._state: ConnectionState = state
self._from_data(data)
def _from_data(self, data: StickerPackPayload) -> None:
self.id: int = int(data["id"])
stickers = data["stickers"]
self.stickers: List[StandardSticker] = [
StandardSticker(state=self._state, data=sticker) for sticker in stickers
]
self.name: str = data["name"]
self.sku_id: int = int(data["sku_id"])
self.cover_sticker_id: int = int(data["cover_sticker_id"])
self.cover_sticker: StandardSticker = get(self.stickers, id=self.cover_sticker_id) # type: ignore
self.description: str = data["description"]
self._banner: int = int(data["banner_asset_id"])
@property
def banner(self) -> Asset:
""":class:`Asset`: The banner asset of the sticker pack."""
return Asset._from_sticker_banner(self._state, self._banner)
def __repr__(self) -> str:
return f"<StickerPack id={self.id} name={self.name!r} description={self.description!r}>"
def __str__(self) -> str:
return self.name
class _StickerTag(Hashable, AssetMixin):
__slots__ = ()
id: int
format: StickerFormatType
async def read(self) -> bytes:
"""|coro|
Retrieves the content of this sticker as a :class:`bytes` object.
.. note::
Stickers that use the :attr:`StickerFormatType.lottie` format cannot be read.
Raises
------
HTTPException
Downloading the asset failed.
NotFound
The asset was deleted.
TypeError
The sticker is a lottie type.
Returns
-------
:class:`bytes`
The content of the asset.
"""
if self.format is StickerFormatType.lottie:
raise TypeError('Cannot read stickers of format "lottie".')
return await super().read()
class StickerItem(_StickerTag):
"""Represents a sticker item.
.. versionadded:: 2.0
.. container:: operations
.. describe:: str(x)
Returns the name of the sticker item.
.. describe:: x == y
Checks if the sticker item is equal to another sticker item.
.. describe:: x != y
Checks if the sticker item is not equal to another sticker item.
Attributes
----------
name: :class:`str`
The sticker's name.
id: :class:`int`
The ID of the sticker.
format: :class:`StickerFormatType`
The format for the sticker's image.
url: :class:`str`
The URL for the sticker's image.
"""
__slots__ = ("_state", "name", "id", "format", "url")
def __init__(self, *, state: ConnectionState, data: StickerItemPayload):
self._state: ConnectionState = state
self.name: str = data["name"]
self.id: int = int(data["id"])
self.format: StickerFormatType = try_enum(StickerFormatType, data["format_type"])
self.url: str = f"{Asset.BASE}/stickers/{self.id}.{self.format.file_extension}"
def __repr__(self) -> str:
return f"<StickerItem id={self.id} name={self.name!r} format={self.format}>"
def __str__(self) -> str:
return self.name
async def fetch(self) -> Union[Sticker, StandardSticker, GuildSticker]:
"""|coro|
Attempts to retrieve the full sticker data of the sticker item.
Raises
------
HTTPException
Retrieving the sticker failed.
Returns
-------
Union[:class:`StandardSticker`, :class:`GuildSticker`]
The retrieved sticker.
"""
data: StickerPayload = await self._state.http.get_sticker(self.id)
cls, _ = _sticker_factory(data["type"]) # type: ignore
return cls(state=self._state, data=data)
class Sticker(_StickerTag):
"""Represents a sticker.
.. versionadded:: 1.6
.. container:: operations
.. describe:: str(x)
Returns the name of the sticker.
.. describe:: x == y
Checks if the sticker is equal to another sticker.
.. describe:: x != y
Checks if the sticker is not equal to another sticker.
Attributes
----------
name: :class:`str`
The sticker's name.
id: :class:`int`
The ID of the sticker.
description: :class:`str`
The description of the sticker.
pack_id: :class:`int`
The ID of the sticker's pack.
format: :class:`StickerFormatType`
The format for the sticker's image.
url: :class:`str`
The URL for the sticker's image.
"""
__slots__ = ("_state", "id", "name", "description", "format", "url")
def __init__(self, *, state: ConnectionState, data: StickerPayload) -> None:
self._state: ConnectionState = state
self._from_data(data)
def _from_data(self, data: StickerPayload) -> None:
self.id: int = int(data["id"])
self.name: str = data["name"]
self.description: str = data["description"]
self.format: StickerFormatType = try_enum(StickerFormatType, data["format_type"])
self.url: str = f"{Asset.BASE}/stickers/{self.id}.{self.format.file_extension}"
def __repr__(self) -> str:
return f"<Sticker id={self.id} name={self.name!r}>"
def __str__(self) -> str:
return self.name
@property
def created_at(self) -> datetime.datetime:
""":class:`datetime.datetime`: Returns the sticker's creation time in UTC."""
return snowflake_time(self.id)
class StandardSticker(Sticker):
"""Represents a sticker that is found in a standard sticker pack.
.. versionadded:: 2.0
.. container:: operations
.. describe:: str(x)
Returns the name of the sticker.
.. describe:: x == y
Checks if the sticker is equal to another sticker.
.. describe:: x != y
Checks if the sticker is not equal to another sticker.
Attributes
----------
name: :class:`str`
The sticker's name.
id: :class:`int`
The ID of the sticker.
description: :class:`str`
The description of the sticker.
pack_id: :class:`int`
The ID of the sticker's pack.
format: :class:`StickerFormatType`
The format for the sticker's image.
tags: List[:class:`str`]
A list of tags for the sticker.
sort_value: :class:`int`
The sticker's sort order within its pack.
"""
__slots__ = ("sort_value", "pack_id", "type", "tags")
def _from_data(self, data: StandardStickerPayload) -> None:
super()._from_data(data)
self.sort_value: int = data["sort_value"]
self.pack_id: int = int(data["pack_id"])
self.type: StickerType = StickerType.standard
try:
self.tags: List[str] = [tag.strip() for tag in data["tags"].split(",")]
except KeyError:
self.tags = []
def __repr__(self) -> str:
return f"<StandardSticker id={self.id} name={self.name!r} pack_id={self.pack_id}>"
async def pack(self) -> StickerPack:
"""|coro|
Retrieves the sticker pack that this sticker belongs to.
Raises
------
InvalidData
The corresponding sticker pack was not found.
HTTPException
Retrieving the sticker pack failed.
Returns
-------
:class:`StickerPack`
The retrieved sticker pack.
"""
data: ListPremiumStickerPacksPayload = await self._state.http.list_premium_sticker_packs()
packs = data["sticker_packs"]
pack = find(lambda d: int(d["id"]) == self.pack_id, packs)
if pack:
return StickerPack(state=self._state, data=pack)
raise InvalidData(f"Could not find corresponding sticker pack for {self!r}")
class GuildSticker(Sticker):
"""Represents a sticker that belongs to a guild.
.. versionadded:: 2.0
.. container:: operations
.. describe:: str(x)
Returns the name of the sticker.
.. describe:: x == y
Checks if the sticker is equal to another sticker.
.. describe:: x != y
Checks if the sticker is not equal to another sticker.
Attributes
----------
name: :class:`str`
The sticker's name.
id: :class:`int`
The ID of the sticker.
description: :class:`str`
The description of the sticker.
format: :class:`StickerFormatType`
The format for the sticker's image.
available: :class:`bool`
Whether this sticker is available for use.
guild_id: :class:`int`
The ID of the guild that this sticker is from.
user: Optional[:class:`User`]
The user that created this sticker. This can only be retrieved using :meth:`Guild.fetch_sticker` and
having the :attr:`~Permissions.manage_emojis_and_stickers` permission.
emoji: :class:`str`
The name of a unicode emoji that represents this sticker.
"""
__slots__ = ("available", "guild_id", "user", "emoji", "type", "_cs_guild")
def _from_data(self, data: GuildStickerPayload) -> None:
super()._from_data(data)
self.available: bool = data["available"]
self.guild_id: int = int(data["guild_id"])
user = data.get("user")
self.user: Optional[User] = self._state.store_user(user) if user else None
self.emoji: str = data["tags"]
self.type: StickerType = StickerType.guild
def __repr__(self) -> str:
return f"<GuildSticker name={self.name!r} id={self.id} guild_id={self.guild_id} user={self.user!r}>"
@cached_slot_property("_cs_guild")
def guild(self) -> Optional[Guild]:
"""Optional[:class:`Guild`]: The guild that this sticker is from.
Could be ``None`` if the bot is not in the guild.
.. versionadded:: 2.0
"""
return self._state._get_guild(self.guild_id)
async def edit(
self,
*,
name: str = MISSING,
description: str = MISSING,
emoji: str = MISSING,
reason: Optional[str] = None,
) -> GuildSticker:
"""|coro|
Edits a :class:`GuildSticker` for the guild.
Parameters
----------
name: :class:`str`
The sticker's new name. Must be at least 2 characters.
description: Optional[:class:`str`]
The sticker's new description. Can be ``None``.
emoji: :class:`str`
The name of a unicode emoji that represents the sticker's expression.
reason: :class:`str`
The reason for editing this sticker. Shows up on the audit log.
Raises
------
Forbidden
You are not allowed to edit stickers.
HTTPException
An error occurred editing the sticker.
Returns
-------
:class:`GuildSticker`
The newly modified sticker.
"""
payload: EditGuildSticker = {}
if name is not MISSING:
payload["name"] = name
if description is not MISSING:
payload["description"] = description
if emoji is not MISSING:
try:
emoji = unicodedata.name(emoji)
except TypeError:
pass
else:
emoji = emoji.replace(" ", "_")
payload["tags"] = emoji
data: GuildStickerPayload = await self._state.http.modify_guild_sticker(
self.guild_id, self.id, payload, reason=reason
)
return GuildSticker(state=self._state, data=data)
async def delete(self, *, reason: Optional[str] = None) -> None:
"""|coro|
Deletes the custom :class:`Sticker` from the guild.
You must have :attr:`~Permissions.manage_emojis_and_stickers` permission to
do this.
Parameters
----------
reason: Optional[:class:`str`]
The reason for deleting this sticker. Shows up on the audit log.
Raises
------
Forbidden
You are not allowed to delete stickers.
HTTPException
An error occurred deleting the sticker.
"""
await self._state.http.delete_guild_sticker(self.guild_id, self.id, reason=reason)
def _sticker_factory(
sticker_type: Literal[1, 2]
) -> Tuple[Type[Union[StandardSticker, GuildSticker, Sticker]], StickerType]:
value = try_enum(StickerType, sticker_type)
if value == StickerType.standard:
return StandardSticker, value
elif value == StickerType.guild:
return GuildSticker, value
else:
return Sticker, value
|
StarcoderdataPython
|
3203977
|
"""Contract test package.
Modules:
test_ping
"""
|
StarcoderdataPython
|
11340590
|
# xpyBuild - eXtensible Python-based Build System
#
# This class is responsible for working out what tasks need to run, and for
# scheduling them
#
# Copyright (c) 2013 - 2017 Software AG, Darmstadt, Germany and/or its licensors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# $Id: stacktrace.py 301527 2017-02-06 15:31:43Z matj $
#
import traceback, signal, sys, threading
from buildcommon import isWindows
def print_stack_trace(sig, frame):
"""Dump a python stack trace to stderr on a signal"""
sys.stderr.write('\nTraceback:\n')
sys.stderr.flush()
id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
code = []
for threadId, stack in sys._current_frames().items():
sys.stderr.write("\n# Thread: %s(%d)\n" % (id2name.get(threadId, ""), threadId))
for filename, lineno, name, line in traceback.extract_stack(stack):
sys.stderr.write('\tFile: "%s", line %d, in %s\n' % (filename, lineno, name))
if line:
sys.stderr.write('\t\t%s\n' % line.strip())
sys.stderr.flush()
def listen_for_stack_signal():
if not isWindows():
signal.signal(signal.SIGUSR1, print_stack_trace)
|
StarcoderdataPython
|
8105834
|
<gh_stars>0
from typing import Callable
from inspect import signature
from enum import Enum
import functools
from .exceptions import InvalidParameters, NoPermissionError
from .context import MessageContext
class Mode(Enum):
POSITIONAL = 0
NON_POSITIONAL = 1
OWNER_ONLY = 2
FREE = 3
class Parameter(Enum):
CONTEXT = 0
POSITIONAL = 1
NON_POSITIONAL = 2
class Command:
def __init__(self, fn: Callable, prefix: str):
self.signature = signature(fn)
self.function = fn
self.uses_non_positional = False
self.parse_params()
self.prefix = prefix
self.mode = Mode.FREE
functools.update_wrapper(self, fn)
def parse_params(self):
args = str(self.signature)[1:-1:].replace(" ", "").split(",")
mode = Mode.POSITIONAL
self.length = len(args)
if args == ['']:
raise InvalidParameters()
args[0] = Parameter.CONTEXT
for index, arg in enumerate(args[1:], 1):
if arg == "*":
mode = Mode.NON_POSITIONAL
self.length -= 1
continue
if mode == Mode.POSITIONAL:
args[index] = Parameter.POSITIONAL
if mode == Mode.NON_POSITIONAL:
args[index] = Parameter.NON_POSITIONAL
if mode == Mode.NON_POSITIONAL:
break
self.params = args
async def __call__(self, message: MessageContext, user: str = None):
message._content = message.content[len(
self.prefix + self.function.__name__ + " "):
] # Removes the invoker
if self.mode == Mode.FREE:
return await self.function(message)
elif self.mode == Mode.OWNER_ONLY and not user:
return await self.function(message)
elif self.mode == Mode.OWNER_ONLY and user:
return await self.function(message)
else:
raise NoPermissionError
# In progress # noqa
|
StarcoderdataPython
|
11307388
|
<filename>resources/dot_PyCharm/system/python_stubs/-762174762/PySide/QtGui/QGraphicsAnchor.py<gh_stars>1-10
# encoding: utf-8
# module PySide.QtGui
# from C:\Python27\lib\site-packages\PySide\QtGui.pyd
# by generator 1.147
# no doc
# imports
import PySide.QtCore as __PySide_QtCore
import Shiboken as __Shiboken
class QGraphicsAnchor(__PySide_QtCore.QObject):
# no doc
def setSizePolicy(self, *args, **kwargs): # real signature unknown
pass
def setSpacing(self, *args, **kwargs): # real signature unknown
pass
def sizePolicy(self, *args, **kwargs): # real signature unknown
pass
def spacing(self, *args, **kwargs): # real signature unknown
pass
def unsetSpacing(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
staticMetaObject = None # (!) real value is '<PySide.QtCore.QMetaObject object at 0x0000000003F3CA88>'
|
StarcoderdataPython
|
6699637
|
<reponame>Appnet1337/OSINT-SAN
from settings import gmap_api, ipstack_api
import requests
import gmplot
# from plugins.api import ipstack
import webbrowser
import re
# from plugins.api import gmap
from ipaddress import *
from plugins.webosint.who.whois import *
if not ipstack_api:
print("Добавьте ключ api ipstack в settings.py")
if not gmap_api:
print("Добавьте ключ API Google Heatmap в settings.py")
def IPHeatmap():
print('''
1) Trace single IP
2) Trace multiple IPs''')
choice = input("OPTIONS >> ")
if choice == '1':
IP = input("Enter the IP : ")
read_single_IP(IP)
elif choice == '2':
IP_file = input("Enter the IP File Location : ")
read_multiple_IP(IP_file)
else:
print("\nError: Please choose an appropriate option")
def read_single_IP(IP):
print ('[ + ]' + " Идет сбор информации : %s ..." %IP + '\n')
if not re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$",IP):
print("Invalid IP Address")
IPHeatmap()
lats = []
lons = []
r = requests.get(f"http://api.IPstack.com/{IP}", params={"access_key": ipstack_api})
response = r.json()
print()
print(f"[ + ] IP адрес :{response['ip']}")
print(f"[ + ] Локация : {response['region_name']}")
print(f"[ + ] Страна : {response['country_name']}")
print(f"[ + ] Долгота : {response.get('latitude')}")
print(f"[ + ] Широта : {response.get('longitude')}")
if input("[ + ] Открыть еще больше информации (Y/N): ").upper() == "Y":
whois_more(IP)
if response.get('latitude') and response.get('longitude'):
lats = response['latitude']
lons = response['longitude']
query = f"{lats},+{lons}"
maps_url = f"https://maps.google.com/maps?q={query}"
print()
openWeb = input(" [ + ] Открыть местоположение GPS в web broser? (Y/N) ")
if openWeb.upper() == 'Y':
webbrowser.open(maps_url, new=2)
else:
pass
def read_multiple_IP(IP_file):
lats = []
lons = []
try:
f = open(IP_file, "r")
f1 = f.readlines()
print('[ + ]' + " Загрузка информации, сходи пока чай налей. Выпей, расслабься, если более сотни делаешь, долго..." + '\n')
for line in f1:
IP=re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$",line)
IP=IP.group()
r = requests.get("http://api.IPstack.com/" + IP + "?access_key=" + ipstack_api)
response = r.json()
if response['latitude'] and response['longitude']:
lats.append(response['latitude'])
lons.append(response['longitude'])
heat_map(lats,lons)
except IOError:
print("ERROR : Файл не существует\n")
IPHeatmap()
def heat_map(lats, lons):
gmap3 = gmplot.GoogleMapPlotter(20.5937, 78.9629, 5)
gmap3.heatmap(lats, lons)
gmap3.scatter(lats, lons, '#FF0000', size=50, marker=False)
gmap3.plot(lats, lons, 'cornflowerblue', edge_width = 3.0)
save_location = input(" [ + ] Введите место для сохранения файла : ")
gmap3.apikey = gmap_api
location = save_location + "/heatmap.html"
gmap3.draw(location)
print("[ + ] Heatmap saved at " + location)
openWeb = input(" [ + ] Открыть в web broser? (Y/N) : ")
if openWeb.upper() == 'Y':
webbrowser.open(url=("file:///"+location))
else:
pass
|
StarcoderdataPython
|
9773188
|
<reponame>kevinconway/venvctrl
"""Test suites for virtual environment features."""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import os
import subprocess
import uuid
import pytest
from venvctrl import api
@pytest.fixture(scope="function")
def random():
"""Get a random UUID."""
return str(uuid.uuid4())
@pytest.fixture(scope="function")
def venv(random, tmpdir):
"""Get an initialized venv."""
v = api.VirtualEnvironment(str(tmpdir.join(random)))
v.create()
return v
def test_create(random, tmpdir):
"""Test if new virtual environments can be created."""
path = str(tmpdir.join(random))
venv = api.VirtualEnvironment(path)
try:
venv.create()
except subprocess.CalledProcessError as exc:
assert False, exc.output
assert tmpdir.join(random).check()
def test_pip(venv):
"""Test the ability to manage packages with pip."""
venv.install_package("confpy")
assert venv.has_package("confpy")
venv.uninstall_package("confpy")
assert not venv.has_package("confpy")
path = os.path.join(venv.path, "..", "requirements.txt")
with open(path, "w") as req_file:
req_file.write("confpy{0}".format(os.linesep))
venv.install_requirements(path)
assert venv.has_package("confpy")
def test_relocate(venv):
"""Test the ability to relocate a venv."""
path = "/testpath"
pypy_shebang = "#!/usr/bin/env pypy"
f = open(venv.bin.abspath + "/pypy_shebang.py", "w")
f.write(pypy_shebang)
f.close()
venv.relocate(path)
for activate in venv.bin.activates:
assert activate.vpath == path
for script in venv.bin.files:
if script.shebang:
assert script.shebang == "#!{0}/bin/python".format(path)
def test_relocate_long_shebang(venv):
"""Test the ability to relocate a venv."""
path = "/testpath"
long_shebang = (
"#!/bin/sh{0}"
"'''exec' /tmp/rpmbuild/python \"$0\" \"$@\"{0}"
"' '''{0}".format(os.linesep)
)
f = open(venv.bin.abspath + "/long_shebang.py", "w")
f.write(long_shebang)
f.close()
venv.relocate(path)
for activate in venv.bin.activates:
assert activate.vpath == path
for script in venv.bin.files:
shebang = script.shebang
if shebang:
shebang = shebang.split(os.linesep)
if len(shebang) == 1:
assert shebang == ["#!{0}/bin/python".format(path)]
elif len(shebang) == 3:
assert shebang == [
"#!/bin/sh",
"'''exec' {0}/bin/python \"$0\" \"$@\"".format(path),
"' '''",
]
else:
assert False, "Invalid shebang length: {0}, {1}".format(
len(shebang), script.shebang
)
def test_relocate_no_original_path_pth(venv):
"""Test that the original path is not found in .pth files."""
path = "/testpath"
original_path = venv.abspath
f = open(venv.bin.abspath + "/something.pth", "w")
f.write(original_path)
f.close()
venv.relocate(path)
dirs = [venv]
while dirs:
current = dirs.pop()
dirs.extend(current.dirs)
for file_ in current.files:
if file_.abspath.endswith(".pth"):
with open(file_.abspath, "r") as source:
try:
lines = source.readlines()
except UnicodeDecodeError:
# Skip any non-text files. Binary files are out of
# scope for this test.
continue
for line in lines:
assert original_path not in line, file_.abspath
|
StarcoderdataPython
|
9705194
|
from abc import ABC
import numpy as np
from gym import spaces
import copy
from .environment import Environment
class Gridworldpy(object):
def __init__(self, size=5):
self.size = int(size)
self.x = int(0)
self.y = int(0)
self.count = 0
nums = self.size **2
self.nums = nums
self.numa = 4
self.observation_space = spaces.Box(low=np.zeros(nums), high=np.ones(nums), dtype=np.float32)
self.action_space = spaces.Discrete(self.numa)
self._P = None
self._R = None
def reset(self):
self.x = 0
self.y = 0
self.count = 0
return self.get_state()
def step(self, action):
a = int(action)
if a == 0:
self.y -= 1
elif a ==1:
self.y += 1
elif a ==2:
self.x -= 1
elif a==3:
self.x += 1
else:
raise Exception("Action out of range! Must be in [0,3]: " + a)
self.x = int(np.clip(self.x, 0, self.size-1))
self.y = int(np.clip(self.y, 0, self.size-1))
self.count += 1
reward = -1.0
return self.get_state(), reward, self.is_terminal()
def get_state(self):
x = np.zeros(self.nums, dtype=np.float32)
x[self.x*self.size + self.y] = 1
return x
def is_terminal(self):
return (self.x == self.size-1 and self.y == self.size-1) or (self.count > 500)
@property
def P(self):
if not (self._P is not None):
self._P = np.zeros((self.nums, self.numa, self.nums))
for x in range(self.size):
for y in range(self.size):
s1 = x*self.size + y
for a in range(self.numa):
x2 = x + 0
y2 = y + 0
if a == 0:
y2 = y - 1
elif a == 1:
y2 = y + 1
elif a == 2:
x2 = x - 1
elif a == 3:
x2 = x + 1
x2 = int(np.clip(x2, 0, self.size - 1))
y2 = int(np.clip(y2, 0, self.size - 1))
s2 = x2*self.size + y2
self._P[s1, a, s2] = 1.0
self._P[-1, :, :] = 0
self._P[-1, :, -1] = 1
return self._P
@property
def R(self):
if not (self._R is not None):
self._R = np.ones((self.nums, self.numa)) * -1.
self._R[-1, :] = 0.
return self._R
def policy_evaluationv(env, policy, gamma):
Psa = env.P
R = env.R
pi = np.zeros((env.nums, env.numa))
Ps = np.zeros((env.nums, env.nums))
for s in range(env.nums):
x = np.zeros(env.nums)
x[s] = 1
if np.ndim(x) ==1:
x = x[np.newaxis, :]
if policy.basis:
x = policy.basis.basify(x)
pi[s, :] = policy.np_get_p(x)
Ps[s, :] += np.sum([pi[s, a] * Psa[s,a] for a in range(env.numa)], axis=0)
b = np.sum(pi*R, axis=1)
# Ps2 = np.array([np.sum(pi[s].reshape(-1, 1) * Psa[s], axis=0) for s in range(env.nums)])
# assert Ps == Ps2, "Ps did not match Ps2"
I = np.eye(env.nums)
if gamma == 1:
gamma = 1.-1e-8
v = np.linalg.solve(I-gamma*Ps,b)
return v
def policy_evaluationq(env, policy, gamma):
Psas = env.P
pi = np.zeros((env.nums, env.numa))
v = policy_evaluationv(env, policy, gamma)
q = np.zeros((env.nums, env.numa))
for s in range(env.nums):
for a in range(env.numa):
q[s,a] = np.sum(Psas[s,a, :] * v)
# x = np.zeros(env.nums)
# x[s] = 1
# if np.ndim(x) ==1:
# x = x[np.newaxis, :]
# if policy.basis:
# x = policy.basis.basify(x)
# pi[s, :] = policy.np_get_p(x)
#
# for s in range(env.nums):
# for a in range(env.numa):
# for s2 in range(env.nums):
# Psa[s*env.numa+a, s2:(s2+env.numa)] += np.array([Psas[s,a,s2] * pi[s2, a2] for a2 in range(env.numa)])
# b = (pi*R).reshape(-1)
#
# I = np.eye(env.nums*env.numa)
# if gamma == 1:
# gamma = 1.-1e-6
# q = np.linalg.solve(I-gamma*Psa,b)
return q
def policy_iteration(mdp, gamma=1, iters=5, plot=True):
'''
Performs policy iteration on an mdp and returns the value function and policy
:param mdp: mdp class (GridWorld_MDP)
:param gam: discount parameter should be in (0, 1]
:param iters: number of iterations to run policy iteration for
:return: two numpy arrays of length |S|. U, pi where U is the value function and pi is the policy
'''
pi = np.zeros(mdp.num_states, dtype=np.int)
b = R = np.array([mdp.R(s) for s in range(mdp.num_states)])
Ptensor = build_p_tensor(mdp)
I = np.eye(mdp.num_states)
U = np.zeros(mdp.num_states)
Ustart = []
for i in range(iters):
# policy evaluation - solve AU = b with (A^TA)^{-1}A^Tb
P = build_pi_p_matrix(mdp,pi)
Ainv = np.linalg.pinv(I - gamma*P)
U = np.dot(Ainv,b)
print(U)
Ustart += [U[mdp.loc2state[mdp.start]]]
# policy improvement
for s in range(mdp.num_states):
MEU = np.dot(Ptensor[s], U)
pi[s] = np.argmax(MEU)
if plot:
fig = plt.figure()
plt.title("Policy Iteration with $\gamma={0}$".format(gamma))
plt.xlabel("Iteration (k)")
plt.ylabel("Utility of Start")
plt.ylim(-2, 1)
plt.plot(range(1,len(Ustart)+1),Ustart)
pp = PdfPages('./plots/piplot.pdf')
pp.savefig(fig)
plt.close()
pp.close()
return U, pi
def build_p_tensor(mdp):
'''
Returns an s x a x s' tensor
'''
P = np.zeros((mdp.num_states, mdp.num_actions, mdp.num_states))
for s in range(mdp.num_states):
for a in range(mdp.num_actions):
for s2, p in mdp.P_snexts(s, a).items():
if not mdp.is_absorbing(s2):
P[s, a, s2] = p
return P
def build_pi_p_matrix(mdp,pi):
'''
Returns an s x s' matrix
'''
P = np.zeros((mdp.num_states, mdp.num_states))
for s in range(mdp.num_states):
a = pi[s]
for s2, p in mdp.P_snexts(s, a).items():
if not mdp.is_absorbing(s2):
P[s, s2] = p
return P
|
StarcoderdataPython
|
230780
|
#!/usr/bin/env python
import main
import unittest
class Tests(unittest.TestCase):
def test_area_of_triangle(self):
self.assertEqual(main.get_area_of_triangle(3, 4, 5), 6.0)
def test_negative_number_rejected(self):
self.assertRaises(main.InvalidTriangleException,
main.get_area_of_triangle, 1, 11, -4)
def test_invalid_triangle_dimensions_rejected(self):
self.assertRaises(main.InvalidTriangleException,
main.get_area_of_triangle, 1, 10, 12)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
8074339
|
<gh_stars>0
""" device_wrangler.py
Instantiate devices and perform inital configuration
"""
# from typing import Dict
from smbus2 import SMBus, i2c_msg
class device_wrangler(object):
def __init__(self, device_assignments):
self.bus = SMBus(1)
self.devices = {}
print('Initializing Devices')
for name, properties in device_assignments.items():
self.devices[name] = properties['device'](self.bus, properties)
print(" Initialized {:12} {:}".format(name, properties['comment']))
# Do Not Delete
if __name__ == "__main__":
print("Tried to execute device_wrangler class definition - EXITING")
|
StarcoderdataPython
|
6485988
|
import os
import requests
from flask_restful import Resource
from flask_restful import reqparse
class WeatherEndpoint(Resource):
def get(self):
parser = reqparse.RequestParser()
# parser.add_argument('longitude', required=True , type=float, help='longitude is a required arguement')
# parser.add_argument('latitude', required=True , type=float, help='latitude is a required arguement')
parser.add_argument('city_name', required=True, type=str)
parser.add_argument('country_name', required=True, type=str)
args = parser.parse_args()
params = {
# "lat" : args['latitude'],
# "lon" : args['longitude'],
"city": args['city_name'],
"country": args['country_name'],
"key": os.getenv("WEATHER_API_KEY"),
"days": 5
}
url = "https://api.weatherbit.io/v2.0/forecast/daily"
r = requests.get(url, params=params)
j = r.json()
final = {}
final['city_name'] = j['city_name']
final['lon'] = j['lon']
final['lat'] = j['lat']
final['country_code'] = j['country_code']
res = []
for day in j["data"]:
single_res = {}
single_res["date"] = day["datetime"]
single_res["rainfall_probability"] = day["pop"]
single_res["rainfall_amount"] = day["precip"]
single_res["max_temperature"] = day["max_temp"]
single_res["min_temperature"] = day["min_temp"]
single_res["snow"] = day["snow"]
single_res["weather"] = day["weather"]
res.append(single_res)
final['data'] = res
return final
|
StarcoderdataPython
|
9603761
|
<filename>qiita_pet/test/test_prep_template.py
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from unittest import main
from os.path import join, exists
from os import remove
from json import loads
from qiita_pet.test.tornado_test_base import TestHandlerBase
from qiita_db.util import get_count, get_mountpoint
from qiita_db.metadata_template.prep_template import PrepTemplate
class TestPrepTemplateHandler(TestHandlerBase):
def setUp(self):
super(TestPrepTemplateHandler, self).setUp()
uploads_dp = get_mountpoint('uploads')[0][1]
self.new_prep = join(uploads_dp, '1', 'new_template.txt')
with open(self.new_prep, 'w') as f:
f.write("sample_name\tnew_col\nSKD6.640190\tnew_value\n")
self.broken_prep = join(uploads_dp, '1', 'broke_template.txt')
with open(self.broken_prep, 'w') as f:
f.write("sample_name\tbroke \col\nSKD6.640190\tnew_value\n")
def tearDown(self):
super(TestPrepTemplateHandler, self).tearDown()
if exists(self.new_prep):
remove(self.new_prep)
if exists(self.broken_prep):
remove(self.broken_prep)
def test_post(self):
new_prep_id = get_count('qiita.prep_template') + 1
arguments = {'study_id': '1',
'data-type': '16S',
'prep-file': 'new_template.txt'}
response = self.post('/prep_template/', arguments)
self.assertEqual(response.code, 200)
# Check that the new prep template has been created
self.assertTrue(PrepTemplate.exists(new_prep_id))
def test_post_broken_header(self):
arguments = {'study_id': '1',
'data-type': '16S',
'prep-file': 'broke_template.txt'}
response = self.post('/prep_template/', arguments)
self.assertEqual(response.code, 200)
self.assertIn('broke \\\\col', response.body)
def test_patch(self):
arguments = {'op': 'replace',
'path': '/1/investigation_type/',
'value': 'Cancer Genomics'}
response = self.patch('/prep_template/', data=arguments)
self.assertEqual(response.code, 200)
exp = {'status': 'success', 'message': ''}
self.assertEqual(loads(response.body), exp)
def test_delete(self):
# Create a new prep template so we can delete it
response = self.delete('/prep_template/', data={'prep-template-id': 1})
self.assertEqual(response.code, 200)
exp = {
"status": "error",
"message": "Cannot remove prep template 1 because it has an "
"artifact associated with it"}
self.assertEqual(loads(response.body), exp)
class TestPrepTemplateGraphHandler(TestHandlerBase):
def test_get(self):
response = self.get('/prep_template/1/graph/')
self.assertEqual(response.code, 200)
# job ids are generated by random so testing composition
obs = loads(response.body)
self.assertEqual(obs['message'], '')
self.assertEqual(obs['status'], 'success')
self.assertEqual(11, len(obs['nodes']))
self.assertIn(
['artifact', 'FASTQ', 1, 'Raw data 1\n(FASTQ)', 'artifact'],
obs['nodes'])
self.assertIn(
['artifact', 'Demultiplexed', 2,
'Demultiplexed 1\n(Demultiplexed)', 'artifact'],
obs['nodes'])
self.assertIn(
['artifact', 'Demultiplexed', 3,
'Demultiplexed 2\n(Demultiplexed)', 'artifact'],
obs['nodes'])
self.assertIn(['artifact', 'BIOM', 4, 'BIOM\n(BIOM)', 'artifact'],
obs['nodes'])
self.assertIn(['artifact', 'BIOM', 5, 'BIOM\n(BIOM)', 'artifact'],
obs['nodes'])
self.assertIn(['artifact', 'BIOM', 6, 'BIOM\n(BIOM)', 'artifact'],
obs['nodes'])
self.assertEqual(3, len([n for dt, _, _, n, _ in obs['nodes']
if n == 'Pick closed-reference OTUs' and
dt == 'job']))
self.assertEqual(2, len([n for dt, _, _, n, _ in obs['nodes']
if n == 'Split libraries FASTQ' and
dt == 'job']))
self.assertEqual(10, len(obs['edges']))
self.assertEqual(2, len([x for x, y in obs['edges'] if x == 1]))
self.assertEqual(3, len([x for x, y in obs['edges'] if x == 2]))
self.assertEqual(1, len([x for x, y in obs['edges'] if y == 2]))
self.assertEqual(1, len([x for x, y in obs['edges'] if y == 3]))
self.assertEqual(1, len([x for x, y in obs['edges'] if y == 4]))
self.assertEqual(1, len([x for x, y in obs['edges'] if y == 5]))
self.assertEqual(1, len([x for x, y in obs['edges'] if y == 6]))
self.assertIsNone(obs['workflow'])
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1674648
|
"""Public interface for btlewrap."""
import sys
# This check must be run first, so that it fails before loading the other modules.
# Otherwise we do not get a clean error message.
if sys.version_info <= (3, 4):
raise ValueError('this library requires at least Python 3.4. ' +
'You\'re running version {}.{} from {}.'.format(
sys.version_info.major,
sys.version_info.minor,
sys.executable))
|
StarcoderdataPython
|
6574135
|
"""
Wrapper around mypy which prevents the number of typecheck errors from increasing
but which does not force you to fix them all.
Developed against mypy 0.770
Verified to work with 0.790
"""
import json
import os
import re
import subprocess
import sys
from dataclasses import dataclass
from typing import List, Optional, Pattern
import mypy.version
from dataclasses_json import dataclass_json # type:ignore
ALLOWABLE_ERRORS_FILE_NAME = ".mypykaizen.json"
LINE_SANITIZATION_PATTERN: Pattern[str] = re.compile(r"note: .* defined here$")
WINDOWS_PATH_START: Pattern[str] = re.compile(r"^[A-Za-z]\:\\")
@dataclass_json
@dataclass
class AllowableErrors:
"""Class to keep track of the allowable errors"""
file_version: str = "v1"
mypy_version: Optional[str] = None
# TODO: Consider updating this to be aware of the -p parameter (what mypy is
# checking) and possibly other arguments as well
total_errors: Optional[int] = None
files_in_error: Optional[int] = None
last_full_output: Optional[List[str]] = None
@classmethod
def load(cls) -> "AllowableErrors":
if os.path.isfile(ALLOWABLE_ERRORS_FILE_NAME):
try:
with open(ALLOWABLE_ERRORS_FILE_NAME, "rt") as f:
return AllowableErrors.from_json(f.read())
except json.decoder.JSONDecodeError:
print("mypykaizen: Failed to decode errors file! Continuing any way")
return AllowableErrors()
def save(self) -> None:
with open(ALLOWABLE_ERRORS_FILE_NAME, "wt") as f:
f.write(self.to_json(indent=4))
def sanitize_output_lines(output_lines: List[str]) -> List[str]:
"""
Strips output lines from the file that aren't useful / lead to issues with
determinism
"""
def _sanitize_line(line: str) -> Optional[str]:
is_windows_prefixed = bool(WINDOWS_PATH_START.match(line))
if is_windows_prefixed:
splitline = line.split(":", maxsplit=2)
if len(splitline) < 3:
print("Unexpected input line", line)
return line
drive_prefix, path, trailing = splitline
path = f"{drive_prefix}:{path}"
else:
splitline = line.split(":", maxsplit=1)
if len(splitline) < 2:
print("Unexpected input line", line)
return line
path, trailing = splitline
# Normalize the path to a POSIX path standard, this _should_ provide compatibility
# with windows machines
if os.altsep is not None:
# This _should_ replace "\\" with "/" on windows systems
# For reference: https://docs.python.org/3/library/os.html#os.sep
path = path.replace(os.sep, os.altsep)
if (is_windows_prefixed or path.startswith("/")) and LINE_SANITIZATION_PATTERN.search(trailing):
return None
return f"{path}:{trailing}"
return list(filter(None, map(_sanitize_line, output_lines)))
def main() -> None:
# Run mypy:
result = subprocess.run(
["mypy"] + sys.argv[1:],
text=True,
# Redirect input/out streams
stdin=sys.stdin,
stderr=sys.stderr,
stdout=subprocess.PIPE,
)
# Note that stderr should be redirected so we dont have to worry about that - a cleaner
# approach would be to create a "capturing output stream" or something so sys.stdout
# is updated in real time
if result.stdout:
sys.stdout.write(result.stdout)
if result.returncode not in {0, 1} or not result.stdout:
# Ex: return code 2 seems to be used for when bad args are provided:
print("mypykaizen: Not active")
exit(result.returncode)
print()
allowable_errors = AllowableErrors.load()
needs_save = False
if mypy.version.__version__ != allowable_errors.mypy_version:
print()
print(f"mypykaizen: mypy version change - saved data from {allowable_errors.mypy_version}")
print(f" current version is {mypy.version.__version__}")
print()
allowable_errors.mypy_version = mypy.version.__version__
# Note that we are not forcing a save just when the version changes.
output_lines = result.stdout.splitlines()
last_line = output_lines[-1]
output_lines = output_lines[:-1] # Remove the last line which is just the summary
output_lines = sanitize_output_lines(output_lines)
output_lines.sort() # Sort them as it does not look like mypy is deterministic
if re.match(r"^Success: .*", last_line):
assert result.returncode == 0, result.returncode
print("mypykaizen: No errors!")
print("mypykaizen: SUCCESS")
# No longer allow any errors:
if allowable_errors.total_errors is None or allowable_errors.total_errors != 0:
allowable_errors.total_errors = 0
needs_save = True
if allowable_errors.files_in_error is None or allowable_errors.files_in_error != 0:
allowable_errors.files_in_error = 0
needs_save = True
if allowable_errors.last_full_output is None or allowable_errors.last_full_output != output_lines:
allowable_errors.last_full_output = output_lines
needs_save = True
if needs_save:
allowable_errors.save()
exit(result.returncode)
fail_match = re.match(r"^Found (?P<total>\d+) errors? in (?P<files>\d+) files? .*", last_line)
if not fail_match:
print("mypykaizen: Neither success nor failure for last line:")
print(last_line)
exit(10) # Arbitrary but not 0, 1, or 2 (the codes I have seen used by mypy)
total_errors = int(fail_match.group("total"))
files_in_error = int(fail_match.group("files"))
# Now check and do a comparison:
errors_increased = False
# Check total errors:
if allowable_errors.total_errors is None:
print(f"mypykaizen: Initializing total_errors to {total_errors}")
allowable_errors.total_errors = total_errors
needs_save = True
elif total_errors > allowable_errors.total_errors:
# Errors have increased
errors_increased = True
print(
f"mypykaizen: ERROR - Number of total errors has increased from\n"
f" {allowable_errors.total_errors} to {total_errors}"
)
elif total_errors < allowable_errors.total_errors:
print(
f"mypykaizen: YAY - Number of total errors has DECREASED from\n"
f" {allowable_errors.total_errors} to {total_errors}!!\n"
f" GOOD JOB - have a 🍪!"
)
allowable_errors.total_errors = total_errors
needs_save = True
else:
print(f"mypykaizen: total_errors unchanged at {total_errors}")
# Check files in error counts
if allowable_errors.files_in_error is None:
# init
print(f"mypykaizen: Initializing files_in_error to {files_in_error}")
allowable_errors.files_in_error = files_in_error
needs_save = True
elif files_in_error > allowable_errors.files_in_error:
# Errors have increased
errors_increased = True
print(
f"mypykaizen: ERROR - Number of total files_in_error has increased from\n"
f" {allowable_errors.files_in_error} to {files_in_error}"
)
elif files_in_error < allowable_errors.files_in_error:
# Decreased
print(
f"mypykaizen: YAY - Number of files_in_error errors has DECREASED from\n"
f" {allowable_errors.files_in_error} to {files_in_error}!!\n"
f" GOOD JOB - have a 🍪!"
)
allowable_errors.files_in_error = files_in_error
needs_save = True
else:
# No change
print(f"mypykaizen: files_in_error unchanged at {files_in_error}")
# Display a simplified diff for new type checking errors which have been introduced
if errors_increased and allowable_errors.last_full_output:
import difflib
print(f"mypykaizen: Differences")
for l in difflib.unified_diff(allowable_errors.last_full_output, output_lines, n=0):
if not l:
continue
# Only looking at the lines which start with + for new addition
if l.startswith("+++"):
continue
if not l.startswith("+"):
continue
print(" " * 3, l[1:]) # Print adds an extra space
# Note as coded, that this technically allows you to introduce new type errors if you
# fix an equal number. This is largely unintentional but does make it super easy to
# support refactoring usecases where a bunch of line numbers change
if not errors_increased and (
allowable_errors.last_full_output is None or allowable_errors.last_full_output != output_lines
):
allowable_errors.last_full_output = output_lines
needs_save = True
if needs_save:
allowable_errors.save()
if errors_increased:
exit(11) # Arbitrary but not 0, 1, or 2
print("mypykaizen: SUCCESS, but try and clean some of these problems up :)")
assert total_errors + files_in_error > 0 # Exit on success earlier
exit(0)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1825010
|
<gh_stars>0
def evaluate(s):
su = 0
last_op = '+'
i = 0
while i < len(s):
if s[i] == '+':
last_op = '+'
elif s[i] == '*':
last_op = '*'
elif s[i] == ')':
print("returning on )")
return su,i
elif s[i] == '(':
print("calling on (")
if last_op == '+':
new_s, new_i = evaluate(s[i+1:])
su += new_s
i += new_i + 1
elif last_op == '*':
new_s, new_i = evaluate(s[i+1:])
su *= new_s
i += new_i + 1
else:
print("last op not set!")
else:
if last_op == '+':
su += int(s[i])
elif last_op == '*':
su *= int(s[i])
else:
print("last op not set (normal)!")
i+=1
return su
def main():
inp = open("etc/data.txt").read().splitlines()
ans = 0
for i in inp:
i = i.replace(' ', '')
tmp = evaluate(i)
ans += tmp
print(ans)
main()
|
StarcoderdataPython
|
11253174
|
<filename>app/api/images.py
import asyncio
from contextlib import suppress
from sanic import Blueprint, response
from sanic.log import logger
from sanic_openapi import doc
from .. import helpers, models, settings, utils
blueprint = Blueprint("images", url_prefix="/images")
@blueprint.get("/")
@doc.summary("List sample memes")
@doc.operation("images.list")
@doc.produces(
doc.List({"url": str, "template": str}),
description="Successfully returned a list of sample memes",
content_type="application/json",
)
async def index(request):
samples = await asyncio.to_thread(helpers.get_sample_images, request)
return response.json(
[{"url": url, "template": template} for url, template in samples]
)
@blueprint.post("/")
@doc.summary("Create a meme from a template")
@doc.operation("images.create")
@doc.consumes(
doc.JsonBody(
{"template_key": str, "text_lines": [str], "extension": str, "redirect": bool}
),
content_type="application/json",
location="body",
)
@doc.response(201, {"url": str}, description="Successfully created a meme")
@doc.response(
400, {"error": str}, description='Required "template_key" missing in request body'
)
async def create(request):
if request.form:
payload = dict(request.form)
with suppress(KeyError):
payload["template_key"] = payload.pop("template_key")[0]
with suppress(KeyError):
payload["text_lines"] = payload.pop("text_lines[]")
else:
payload = request.json
try:
template_key = payload["template_key"]
except KeyError:
return response.json({"error": '"template_key" is required'}, status=400)
template = models.Template.objects.get(template_key)
url = template.build_custom_url(
request.app,
payload.get("text_lines") or [],
extension=payload.get("extension"),
)
if payload.get("redirect", False):
return response.redirect(url)
return response.json({"url": url}, status=201)
@blueprint.get("/<template_key>.png")
@doc.summary("Display a template background")
@doc.produces(
doc.File(),
description="Successfully displayed a template background",
content_type="image/png",
)
@doc.response(404, doc.File(), description="Template not found")
@doc.response(415, doc.File(), description="Unable to download image URL")
@doc.response(
422,
doc.File(),
description="Invalid style for template or no image URL specified for custom template",
)
async def blank_png(request, template_key):
return await render_image(request, template_key, ext="png")
@blueprint.get("/<template_key>.jpg")
@doc.summary("Display a template background")
@doc.produces(
doc.File(),
description="Successfully displayed a template background",
content_type="image/jpeg",
)
@doc.response(404, doc.File(), description="Template not found")
@doc.response(415, doc.File(), description="Unable to download image URL")
@doc.response(
422,
doc.File(),
description="Invalid style for template or no image URL specified for custom template",
)
async def blank_jpg(request, template_key):
return await render_image(request, template_key, ext="jpg")
@blueprint.get("/<template_key>/<text_paths:[\s\S]+>.png")
@doc.summary("Display a custom meme")
@doc.produces(
doc.File(),
description="Successfully displayed a custom meme",
content_type="image/png",
)
@doc.response(404, doc.File(), description="Template not found")
@doc.response(414, doc.File(), description="Custom text too long (length >200)")
@doc.response(415, doc.File(), description="Unable to download image URL")
@doc.response(
422,
doc.File(),
description="Invalid style for template or no image URL specified for custom template",
)
async def text_png(request, template_key, text_paths):
slug, updated = utils.text.normalize(text_paths)
if updated:
url = request.app.url_for(
"images.text_png",
template_key=template_key,
text_paths=slug,
**request.args,
).replace("%3A%2F%2F", "://")
return response.redirect(url, status=301)
return await render_image(request, template_key, slug)
@blueprint.get("/<template_key>/<text_paths:[\s\S]+>.jpg")
@doc.summary("Display a custom meme")
@doc.produces(
doc.File(),
description="Successfully displayed a custom meme",
content_type="image/jpeg",
)
@doc.response(404, doc.File(), description="Template not found")
@doc.response(414, doc.File(), description="Custom text too long (length >200)")
@doc.response(415, doc.File(), description="Unable to download image URL")
@doc.response(
422,
doc.File(),
description="Invalid style for template or no image URL specified for custom template",
)
async def text_jpg(request, template_key, text_paths):
slug, updated = utils.text.normalize(text_paths)
if updated:
url = request.app.url_for(
"images.text_jpg",
template_key=template_key,
text_paths=slug,
**request.args,
).replace("%3A%2F%2F", "://")
return response.redirect(url, status=301)
return await render_image(request, template_key, slug, ext="jpg")
async def render_image(
request, key: str, slug: str = "", ext: str = settings.DEFAULT_EXT
):
status = 200
if len(slug.encode()) > 200:
logger.error(f"Slug too long: {slug}")
slug = slug[:50] + "..."
template = models.Template.objects.get("_error")
style = "default"
status = 414
elif key == "custom":
style = "default"
url = request.args.get("background") or request.args.get("alt")
if url:
template = await models.Template.create(url)
if not template.image.exists():
logger.error(f"Unable to download image URL: {url}")
template = models.Template.objects.get("_error")
status = 415
else:
logger.error("No image URL specified for custom template")
template = models.Template.objects.get("_error")
status = 422
else:
template = models.Template.objects.get_or_none(key)
if not template:
logger.error(f"No such template: {key}")
template = models.Template.objects.get("_error")
status = 404
style = request.args.get("style") or request.args.get("alt")
if style and style not in template.styles:
logger.error(f"Invalid style for template: {style}")
status = 422
lines = utils.text.decode(slug)
size = int(request.args.get("width", 0)), int(request.args.get("height", 0))
await helpers.track(request, lines)
path = await asyncio.to_thread(utils.images.save, template, lines, ext, style, size)
return await response.file(path, status)
|
StarcoderdataPython
|
5155407
|
<reponame>Knowledge-Precipitation-Tribe/Neural-network<filename>code/NonLinearBinaryClassification/XorGateClassifier-keras.py
# -*- coding: utf-8 -*-#
'''
# Name: XorGateClassifier-keras
# Description:
# Author: super
# Date: 2020/5/25
'''
from XorGateClassifier import *
from keras.models import Sequential
from keras.layers import Dense
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
def load_data():
dataReader = XOR_DataReader()
dataReader.ReadData()
x_train, y_train = dataReader.XTrain, dataReader.YTrain
return x_train, y_train
def build_model():
model = Sequential()
model.add(Dense(2, activation='sigmoid', input_shape=(2, )))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='Adam',
loss='binary_crossentropy',
metrics=['accuracy'])
return model
#画出训练过程中训练和验证的精度与损失
def draw_train_history(history):
plt.figure(1)
# summarize history for accuracy
plt.subplot(211)
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
# summarize history for loss
plt.subplot(212)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
if __name__ == '__main__':
x_train, y_train = load_data()
model = build_model()
history = model.fit(x_train, y_train, epochs=5000, batch_size=1, validation_data=(x_train, y_train))
draw_train_history(history)
loss, accuracy = model.evaluate(x_train, y_train)
print("test loss: {}, test accuracy: {}".format(loss, accuracy))
weights = model.get_weights()
print("weights: ", weights)
|
StarcoderdataPython
|
6430857
|
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:
p = head
all_val = []
while p:
q = ListNode(0)
q.val = p.val
all_val.append(q)
p = p.next
all_val.remove(all_val[len(all_val) - n])
if len(all_val) == 0:
return None
for i in range(len(all_val) - 1):
all_val[i].next = all_val[i + 1]
return all_val[0]
a = Solution()
t1 = ListNode(1)
t2 = ListNode(2)
# t3 = ListNode(3)
# t4 = ListNode(4)
# t5 = ListNode(5)
t1.next = t2
# t2.next = t3
# t3.next = t4
# t4.next = t5
a.removeNthFromEnd(t1, 1)
|
StarcoderdataPython
|
96429
|
# Str!
API_TOKEN = 'YOUR TOKEN GOES HERE'
# Int!
ADMIN_ID = 'USER_ID OF PERSON(s) DESIGNATED AS ADMINS'
|
StarcoderdataPython
|
17043
|
from pprint import pprint
from enum import Enum
class Direction(Enum):
UP = 'up'
DOWN = 'down'
LEFT = 'left'
RIGHT = 'right'
class Network:
def __init__(self, diagramRows):
self.diagram = self.setDiagram(diagramRows)
self.currentPosition = self.setCurrentPosition()
self.currentDirection = Direction.DOWN
self.route = []
self.steps = 1
def setDiagram(self, diagramRows):
splitRows = []
for diagramRow in diagramRows:
splitRows.append(list(diagramRow))
return splitRows
def setCurrentPosition(self):
startIndex = self.diagram[0].index('|')
return (startIndex, 0)
def getTraveledRoute(self):
return ''.join(self.route)
def getTraveledSteps(self):
return self.steps
def getForwardPosition(self):
if self.currentDirection == Direction.UP:
return (self.currentPosition[0], self.currentPosition[1] - 1)
elif self.currentDirection == Direction.DOWN:
return (self.currentPosition[0], self.currentPosition[1] + 1)
elif self.currentDirection == Direction.LEFT:
return (self.currentPosition[0] - 1, self.currentPosition[1])
elif self.currentDirection == Direction.RIGHT:
return (self.currentPosition[0] + 1, self.currentPosition[1])
def getLeftPosition(self):
if self.currentDirection == Direction.UP:
return (self.currentPosition[0] - 1, self.currentPosition[1])
elif self.currentDirection == Direction.DOWN:
return (self.currentPosition[0] + 1, self.currentPosition[1])
elif self.currentDirection == Direction.LEFT:
return (self.currentPosition[0], self.currentPosition[1] + 1)
elif self.currentDirection == Direction.RIGHT:
return (self.currentPosition[0], self.currentPosition[1] - 1)
def getRightPosition(self):
if self.currentDirection == Direction.UP:
return (self.currentPosition[0] + 1, self.currentPosition[1])
elif self.currentDirection == Direction.DOWN:
return (self.currentPosition[0] - 1, self.currentPosition[1])
elif self.currentDirection == Direction.LEFT:
return (self.currentPosition[0], self.currentPosition[1] - 1)
elif self.currentDirection == Direction.RIGHT:
return (self.currentPosition[0], self.currentPosition[1] + 1)
def positionExists(self, position):
return (position[1] > 0
and position[1] < len(self.diagram)
and position[0] > 0
and position[0] < len(self.diagram[position[1]])
and self.diagram[position[1]][position[0]] is not ' ')
def changeDirectionLeft(self):
if self.currentDirection == Direction.UP:
self.currentDirection = Direction.LEFT
elif self.currentDirection == Direction.DOWN:
self.currentDirection = Direction.RIGHT
elif self.currentDirection == Direction.LEFT:
self.currentDirection = Direction.DOWN
elif self.currentDirection == Direction.RIGHT:
self.currentDirection = Direction.UP
def changeDirectionRight(self):
if self.currentDirection == Direction.UP:
self.currentDirection = Direction.RIGHT
elif self.currentDirection == Direction.DOWN:
self.currentDirection = Direction.LEFT
elif self.currentDirection == Direction.LEFT:
self.currentDirection = Direction.UP
elif self.currentDirection == Direction.RIGHT:
self.currentDirection = Direction.DOWN
def getNextPosition(self):
nextPosition = self.getForwardPosition()
if self.positionExists(nextPosition):
return nextPosition
nextPosition = self.getLeftPosition()
if self.positionExists(nextPosition):
self.changeDirectionLeft()
return nextPosition
nextPosition = self.getRightPosition()
if self.positionExists(nextPosition):
self.changeDirectionRight()
return nextPosition
return False
def run(self):
hasNextPosition = True
while hasNextPosition:
nextPosition = self.getNextPosition()
if not nextPosition:
hasNextPosition = False
else:
self.currentPosition = nextPosition
self.steps += 1
character = self.diagram[nextPosition[1]][nextPosition[0]]
if character.isalpha():
self.route.append(character)
|
StarcoderdataPython
|
9631408
|
<gh_stars>0
def main(n):
if n == 10:
print "Blastoff!"
elif n > 10:
print "Number needs to be 10 or under."
else:
print n
main(n + 1)
main(1)
|
StarcoderdataPython
|
5048228
|
<gh_stars>1-10
"""
Name : Check_2d_fft.py
Author: <NAME>
e-mail: <EMAIL>
Date : 2021-07-14
DESC :
"""
from numpy import genfromtxt
import numpy as np
import matplotlib.pyplot as plt
import fluidplasma as fp
# Merge all small csv file into signel matrix
for i in range(99, 899, 100):
fname = "./data/FFT/PH"+str(i)+".csv"
print(fname)
ph1 = genfromtxt(fname, delimiter=' ')
ph1 = ph1.transpose()
if i==99:
ph2 = genfromtxt(fname, delimiter=' ')
ph2 = ph2.transpose()
if i > 99:
ph2 = np.vstack((ph2, ph1))
# Save combined data matrix in csv file
np.savetxt('./data/FFT.csv', ph2, delimiter=" ")
# Some constants
dx = 0.2 # Spatial grid size
dt=0.1 # Temporal grid size
z2, k2, w2 = fp.wk2d(ph2, dx, dt) # Compute 2d fft
# Plotting the result
tpc = plt.imshow(z2, aspect='auto', interpolation='none',
extent=[k2.min(), k2.max(), w2.min(), w2.max()], origin='lower')
plt.colorbar(tpc)
plt.jet()
plt.ylim(0, 2)
plt.xlim(-3, 3)
plt.xlabel('$k$')
plt.ylabel("$\omega$")
plt.title("$\omega -$k plot")
plt.savefig("./figures/wkplot.png")
plt.show()
|
StarcoderdataPython
|
8145738
|
<filename>source/remediation_runbooks/scripts/MakeRDSSnapshotPrivate.py
#!/usr/bin/python
###############################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License Version 2.0 (the "License"). You may not #
# use this file except in compliance with the License. A copy of the License #
# is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0/ #
# #
# or in the "license" file accompanying this file. This file is distributed #
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express #
# or implied. See the License for the specific language governing permis- #
# sions and limitations under the License. #
###############################################################################
import json
import boto3
from botocore.config import Config
from botocore.exceptions import ClientError
def connect_to_rds():
boto_config = Config(
retries ={
'mode': 'standard'
}
)
return boto3.client('rds', config=boto_config)
def make_snapshot_private(event, context):
rds_client = connect_to_rds()
snapshot_id = event['DBSnapshotId']
snapshot_type = event['DBSnapshotType']
try:
if (snapshot_type == 'snapshot'):
rds_client.modify_db_snapshot_attribute(
DBSnapshotIdentifier=snapshot_id,
AttributeName='restore',
ValuesToRemove=['all']
)
elif (snapshot_type == 'cluster-snapshot'):
rds_client.modify_db_cluster_snapshot_attribute(
DBClusterSnapshotIdentifier=snapshot_id,
AttributeName='restore',
ValuesToRemove=['all']
)
else:
exit(f'Unrecognized snapshot_type {snapshot_type}')
print(f'Remediation completed: {snapshot_id} public access removed.')
return {
"response": {
"message": f'Snapshot {snapshot_id} permissions set to private',
"status": "Success"
}
}
except Exception as e:
exit(f'Remediation failed for {snapshot_id}: {str(e)}')
|
StarcoderdataPython
|
3297925
|
from pymongo import MongoClient
from sampledata import Sampledata
import pymongo
class Db:
env = 'prod'
client = MongoClient()
# client = MongoClient("mongodb://mongodb0.example.net:55888")
sampledata = Sampledata()
def __init__(self):
if self.env == 'prod':
self.db = self.client.prod
else:
self.db = self.client.test
def getMessages(self):
messages = []
cursor = self.db.messages.find().sort([('conv_id', 1), ('q_nr', 1)])
for document in cursor:
messages.append(document)
print 'nr of messages: ', len(messages)
return messages
def getConversations(self):
conversations = []
cursor = self.db.convsersations.find()
for document in cursor:
conversations.append(document)
return conversations
def _clearMessages(self):
cursor = self.db.messages.drop()
return True
def _clearConvsations(self):
cursor = self.db.conversations.drop()
return True
def _clearDb(self):
try:
self._clearMessages()
self._clearConvsations()
return True
except Exception, e:
print e
return False
def insertTestData(self):
testmessages = self.sampledata.getMessages()
testconvs = self.sampledata.getConversations()
graphmsg = self.sampledata.getgraphmessages()
print 'inserting...'
self.db.messages.insert(testmessages)
self.db.conversations.insert(testconvs)
self.db.graphdata.insert(graphmsg)
def resetDBToTestState(self):
print 'resetting...'
self._clearDb()
self.insertTestData()
def clearTestIncomingMsg(self):
self.db.testincomingmsgs.drop()
# simulating test incoming msgs, mongodb stores strings as unicode
def insertTestIncomingMsg(self, msg):
self.db.testincomingmsgs.insert(msg)
def getMostRecentTestIncomingMsg(self):
result = list(self.db.testincomingmsgs.find().sort([('$natural', pymongo.DESCENDING)]).limit(1))
return result[0]
def storeIncomingMsg(self, messagesender, messagetext, timestamp):
self.db.incomingmessages.insert({'sender': messagesender, 'messagetext' : messagetext, 'timestamp' : timestamp})
def storeOutgoingMsg(self, messagesender, responsetext, timestamp):
self.db.outgoingmessages.insert({'sender': messagesender, 'responsetext' : responsetext, 'timestamp' : timestamp})
|
StarcoderdataPython
|
1969860
|
<reponame>keaparrot/secbootctl
# secbootctl - Secure Boot Helper
#
# @license https://github.com/keaparrot/secbootctl/blob/master/LICENSE.md
from __future__ import annotations
from pathlib import Path
class Env:
APP_NAME: str = 'secbootctl'
APP_VERSION: str = '0.2.0'
APP_TITLE: str = f'{APP_NAME} v{APP_VERSION} - Secure Boot Helper'
APP_CONFIG_FILE_PATH: Path = Path(f'/etc/{APP_NAME}/{APP_NAME}.conf')
APP_HOOK_PATH: Path = Path(f'/etc/{APP_NAME}/hooks')
BOOTLOADER_DEFAULT_BOOT_FILE_SUBPATH: str = 'EFI/BOOT/BOOTX64.EFI'
BOOTLOADER_SYSTEMD_BOOT_BOOT_FILE_SUBPATH: str = 'EFI/systemd/systemd-bootx64.efi'
BOOTLOADER_CONFIG_FILE_SUBPATH: str = 'loader/loader.conf'
BOOTLOADER_DEFAULT_ENTRY_FILE_NAME: str = f'{APP_NAME}-default-linux.conf'
BOOTLOADER_DEFAULT_ENTRY_FILE_SUBPATH: str = f'loader/entries/{BOOTLOADER_DEFAULT_ENTRY_FILE_NAME}'
BOOTLOADER_SYSTEMD_BOOT_STUB_FILE_PATH: Path = Path('/usr/lib/systemd/boot/efi/linuxx64.efi.stub')
EFI_BOOT_MODE_CHECK_PATH: Path = Path('/sys/firmware/efi')
KERNEL_CMDLINE_ETC_FILE_PATH: Path = Path('/etc/kernel/cmdline')
KERNEL_CMDLINE_PROC_FILE_PATH: Path = Path('/proc/cmdline')
MACHINE_ID: str = ''
OS_RELEASE_FILE_PATH: Path = Path('/etc/os-release')
SB_KEY_NAME_DB: str = 'db'
SUPPORTED_PACKAGE_MANAGERS: list = ['pacman', 'apt']
SUPPORTED_SECURITY_TOKENS: list = ['yubikey']
UNIFIED_IMAGE_SUBPATH: str = 'EFI/Linux'
@staticmethod
def load() -> None:
Env.MACHINE_ID = Path('/etc/machine-id').read_text().rstrip()
|
StarcoderdataPython
|
6513366
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 14, 2011
@author:<NAME>
Random sample features to initialize k-mean. Each block is processed in a separate thread.
"""
import os;
import dbrec3d_batch
import multiprocessing
import Queue
import time
import random
import optparse
import sys
from numpy import log, ceil
from xml.etree.ElementTree import ElementTree
#time.sleep(30);
class dbvalue:
def __init__(self, index, type):
self.id = index # unsigned integer
self.type = type # string
#*******************The Main Algorithm ************************#
if __name__=="__main__":
dbrec3d_batch.register_processes();
dbrec3d_batch.register_datatypes();
#Parse inputs
parser = optparse.OptionParser(description='bof Statistics Pass 0');
parser.add_option('--bof_dir', action="store", dest="bof_dir");
parser.add_option('--init_k_means_dir', action="store", dest="init_k_means_dir");
parser.add_option('--num_means', action="store", dest="num_means", type="int", default =0)
options, args = parser.parse_args()
bof_dir = options.bof_dir;
num_means = options.num_means;
init_k_means_dir = options.init_k_means_dir;
if not os.path.isdir(bof_dir +"/"):
print "Invalid bof Dir"
sys.exit(-1);
if not os.path.isdir(init_k_means_dir +"/"):
os.mkdir(init_k_means_dir +"/");
dbrec3d_batch.init_process("bofRndMeansProcess");
dbrec3d_batch.set_input_string(0,bof_dir);
dbrec3d_batch.set_input_int(1,num_means);
dbrec3d_batch.set_input_string(2, init_k_means_dir + "/sp_means.txt");
dbrec3d_batch.run_process();
dbrec3d_batch.clear();
|
StarcoderdataPython
|
97224
|
<reponame>mcara/stsci.skypac
"""skymatch"""
import os
from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
# package is not installed
__version__ = 'UNKNOWN'
# from .version import version as __version__
__author__ = '<NAME>'
__docformat__ = 'restructuredtext en'
__taskname__ = 'skymatch'
from . import parseat # noqa: F401
from . import utils # noqa: F401
from . import pamutils # noqa: F401
from . import region # noqa: F401
from . import skystatistics # noqa: F401
from . import skyline # noqa: F401
from . import skymatch # noqa: F401
from stsci.tools import teal
teal.print_tasknames(__name__, os.path.dirname(__file__))
def help():
msg = """The SkyPac package contains the following tasks that allow users
perform sky level matching on user images.
skypac:
skymatch - primary task for performing sky level matching on
user images
"""
print(msg)
|
StarcoderdataPython
|
1968084
|
<gh_stars>0
#!/usr/bin/env python
"""Read a list of AWS ECR images from stdin and return a list of hashes that are
older than --num-to-keep in --branch."""
import json
import sys
from collections import OrderedDict
from datetime import timedelta, datetime
from optparse import OptionParser
def sortdict(d):
"""Sort a dictionary by element value."""
for key in sorted(d): yield d[key]
parser = OptionParser()
parser.add_option("-b", "--branch", dest="branch",
help="Image branch to target (dev, prod)")
parser.add_option("-n", "--num-to-keep", dest="num_to_keep",
help="The number of historical images to keep")
(options, args) = parser.parse_args()
if not options.branch:
parser.error('Image branch not specified')
if not options.num_to_keep:
parser.error('Number of images to keep not specified')
images = {}
for repo_image in json.load(sys.stdin)["imageIds"]:
image_age = 0
try:
tag = repo_image['imageTag']
if '-' in tag:
branch, date = tag.split('-')
if options.branch == branch:
dt = datetime.strptime(date,'%Y%m%d%H%M%S')
images[dt] = repo_image['imageDigest']
except KeyError:
pass
all_dated_images = OrderedDict(sorted(images.items(), key=lambda t: t[0]))
images_to_remove = list(all_dated_images.items())[:-int(int(options.num_to_keep) + 1)]
for image_to_remove in images_to_remove:
image_date, image_sha = image_to_remove
print str(image_date) + '|' + image_sha
|
StarcoderdataPython
|
3210483
|
<gh_stars>1-10
import matplotlib.pyplot as plt
# position visualization
def plot(new, ticker1, ticker2):
"""Visualise a position given a _signals_ object and two ticker names"""
fig = plt.figure(figsize=(10, 5))
bx = fig.add_subplot(111)
bx2 = bx.twinx()
# plot two different assets
(l1,) = bx.plot(new.index, new["asset1"], c="#4abdac")
(l2,) = bx2.plot(new.index, new["asset2"], c="#907163")
(u1,) = bx.plot(
new.loc[new["positions1"] == 1].index,
new["asset1"][new["positions1"] == 1],
lw=0,
marker="^",
markersize=8,
c="g",
alpha=0.7,
)
(d1,) = bx.plot(
new.loc[new["positions1"] == -1].index,
new["asset1"][new["positions1"] == -1],
lw=0,
marker="v",
markersize=8,
c="r",
alpha=0.7,
)
(u2,) = bx2.plot(
new.loc[new["positions2"] == 1].index,
new["asset2"][new["positions2"] == 1],
lw=0,
marker=2,
markersize=9,
c="g",
alpha=0.9,
markeredgewidth=3,
)
(d2,) = bx2.plot(
new.loc[new["positions2"] == -1].index,
new["asset2"][new["positions2"] == -1],
lw=0,
marker=3,
markersize=9,
c="r",
alpha=0.9,
markeredgewidth=3,
)
bx.set_ylabel(
ticker1,
)
bx2.set_ylabel(ticker2, rotation=270)
bx.yaxis.labelpad = 15
bx2.yaxis.labelpad = 15
bx.set_xlabel("Date")
bx.xaxis.labelpad = 15
plt.legend(
[l1, l2, u1, d1, u2, d2],
[
ticker1,
ticker2,
"LONG {}".format(ticker1),
"SHORT {}".format(ticker1),
"LONG {}".format(ticker2),
"SHORT {}".format(ticker2),
],
loc=8,
)
plt.title("Pair Trading")
plt.xlabel("Date")
plt.grid(True)
plt.show()
|
StarcoderdataPython
|
3399524
|
<reponame>Pathfinder-for-Pitch-Momentum-Bias/FlightSoftware<gh_stars>1-10
import subprocess
import pty
import json
import os
import serial
import unittest
class TestDownlinkParser(unittest.TestCase):
"""
Ensures that the downlink parser accumulates downlink packets and dumps the
data contained in a downlink frame once no more packets are available.
"""
filepath = os.path.dirname(os.path.abspath(__file__))
binary_dir = os.path.join(filepath, "../.pio/build/gsw_downlink_parser/program")
def setUp(self):
master_fd, slave_fd = pty.openpty()
self.downlink_parser = subprocess.Popen([self.binary_dir], stdin=master_fd,
stdout=master_fd)
self.console = serial.Serial(os.ttyname(slave_fd), 9600, timeout=1)
def getFilepath(self, filename):
# Get full path of file specified by the argument.
data_dir = os.path.join(self.filepath, "dat/DownlinkParser")
return os.path.join(data_dir, filename)
def testAddInvalidDownlink(self):
"""The parser produces an error if you feed it a nonexistent file. """
self.console.write((self.getFilepath("nonexistent_downlink") + "\n").encode())
response = self.console.readline().rstrip()
self.assertEqual(response.decode(), "Error: file not found.")
def testValidDownlink(self):
"""Test valid downlink reading."""
# Adding one downlink packet results in the parser returning data from
# the last collected downlink frame. There is no such frame, so the
# parser returns back an empty JSON string.
self.console.write((self.getFilepath("downlink1") + "\n").encode())
response = json.loads(self.console.readline().rstrip())
self.assertIsNone(response)
# Sending downlink packet 1 again tells the parser that a downlink frame
# has been completed, so it dumps the data contained in the previously
# sent downlink frame.
self.console.write((self.getFilepath("downlink1") + "\n").encode())
response = json.loads(self.console.readline().rstrip())
expectedResponse = json.load(open(self.getFilepath("expected_output.json")))
self.assertDictEqual(response, expectedResponse)
def tearDown(self):
self.downlink_parser.kill()
self.console.close()
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
4946905
|
<gh_stars>0
import os
from pathlib import Path
import pytest
import toml
tests_location = os.path.dirname(os.path.realpath(__file__))
@pytest.fixture(
params=Path(os.path.join(tests_location, "texts")).glob("*.toml")
)
def test_text(request):
yield toml.load(request.param)
|
StarcoderdataPython
|
1765563
|
import gzip
import os
import shutil
from pathlib import Path
from tempfile import NamedTemporaryFile
import pytest
import skhep_testdata
import pylhe
TEST_FILE = skhep_testdata.data_path("pylhe-testfile-pr29.lhe")
@pytest.fixture(scope="session")
def testdata_gzip_file():
test_data = skhep_testdata.data_path("pylhe-testfile-pr29.lhe")
tmp_path = Path(NamedTemporaryFile().name)
# create what is basically pylhe-testfile-pr29.lhe.gz
with open(test_data, "rb") as readfile:
with gzip.open(tmp_path, "wb") as writefile:
shutil.copyfileobj(readfile, writefile)
yield tmp_path
# teardown
os.remove(tmp_path)
def test_gzip_open(tmpdir, testdata_gzip_file):
assert pylhe._extract_fileobj(TEST_FILE)
assert pylhe._extract_fileobj(testdata_gzip_file)
# Needs path-like object, not a fileobj
with pytest.raises(TypeError):
with open(TEST_FILE, "rb") as fileobj:
pylhe._extract_fileobj(fileobj)
with open(TEST_FILE, "rb") as fileobj:
assert isinstance(pylhe._extract_fileobj(TEST_FILE), type(fileobj))
assert isinstance(pylhe._extract_fileobj(Path(TEST_FILE)), type(fileobj))
assert isinstance(pylhe._extract_fileobj(testdata_gzip_file), gzip.GzipFile)
assert isinstance(pylhe._extract_fileobj(Path(testdata_gzip_file)), gzip.GzipFile)
def test_read_num_events(testdata_gzip_file):
assert pylhe.read_num_events(TEST_FILE) == 791
assert pylhe.read_num_events(TEST_FILE) == pylhe.read_num_events(testdata_gzip_file)
def test_lhe_init(testdata_gzip_file):
assert pylhe.read_lhe_init(TEST_FILE) == pylhe.read_lhe_init(testdata_gzip_file)
init_data = pylhe.read_lhe_init(TEST_FILE)
init_info = init_data["initInfo"]
assert init_info["beamA"] == pytest.approx(1.0)
assert init_info["beamB"] == pytest.approx(2.0)
assert init_info["energyA"] == pytest.approx(1.234567)
assert init_info["energyB"] == pytest.approx(2.345678)
assert init_info["PDFgroupA"] == pytest.approx(3.0)
assert init_info["PDFgroupB"] == pytest.approx(4.0)
assert init_info["PDFsetA"] == pytest.approx(5.0)
assert init_info["PDFsetB"] == pytest.approx(6.0)
assert init_info["weightingStrategy"] == pytest.approx(7.0)
assert init_info["numProcesses"] == pytest.approx(8.0)
def test_read_lhe(testdata_gzip_file):
assert pylhe.read_lhe(TEST_FILE)
assert pylhe.read_lhe(testdata_gzip_file)
|
StarcoderdataPython
|
1662308
|
<filename>892SurfaceArea/Surface.py
"""
在 N * N 的网格上,我们放置一些 1 * 1 * 1 的立方体。
每个值 v = grid[i][j] 表示 v 个正方体叠放在对应单元格 (i, j) 上。
请你返回最终形体的表面积。
示例 1:
输入:[[2]]
输出:10
示例 2:
输入:[[1,2],[3,4]]
输出:34
示例 3:
输入:[[1,0],[0,2]]
输出:16
示例 4:
输入:[[1,1,1],[1,0,1],[1,1,1]]
输出:32
示例 5:
输入:[[2,2,2],[2,1,2],[2,2,2]]
输出:46
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/surface-area-of-3d-shapes
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
class Solution:
def surfaceArea(self, grid) -> int:
'''
for every grid has x cubes:
(y cudes for border grid)
S_x = 6x - (2x-2) - ∂(y==0)(y if y<x else x)
'''
if not grid : return 0
w,h = len(grid[0]), len(grid)
S = []
for i in range(w):
for j in range(h):
# vertical
if grid[i][j] == 0: continue
S_temp = 4*grid[i][j] + 2
for a in (1,-1):
if i+a >= 0 and i+a <w :
if grid[i+a][j] != 0:
area_loss = grid[i+a][j] if grid[i+a][j]<grid[i][j] else grid[i][j]
S_temp -= area_loss
if j+a >=0 and j+a < h:
if grid[i][j+a] != 0:
area_loss = grid[i][j+a] if grid[i][j+a]<grid[i][j] else grid[i][j]
S_temp -= area_loss
S.append(S_temp)
return sum(S)
if __name__ == "__main__":
s = Solution()
grid = [[1,2],[3,4]]
print(s.surfaceArea(grid))
|
StarcoderdataPython
|
110112
|
<gh_stars>0
from struct import Struct
# little-endian 0xfeedface
MH_MAGIC = b'\xce\xfa\xed\xfe'
# little-endian 0xfeedfacf
MH_MAGIC_64 = b'\xcf\xfa\xed\xfe'
LC_CODE_SIGNATURE = 0x1d
# def struct_factory(target, little_endian=True):
# if little_endian:
# base = LittleEndianStructure
# else:
# base = BigEndianStructure
# return type(target.__name__, (target, base), {})()
class DynStruct(Struct):
"""
struct.Struct with configurable endian
"""
_fields_ = []
def __init__(self, little_endian=True):
if not self._fields_:
raise NotImplementedError("Need to be subclassed")
self._keys, fmts = zip(*self._fields_)
self._fmt = ''.join(fmts)
pad = '<' if little_endian else '>'
self._fmt = ''.join(
(pad, self._fmt))
super().__init__(self._fmt)
return
def unpack_to_dict(self, buffer):
"""Unpack data into dict() corresponding to subclass definition"""
data = zip(self._keys, self.unpack(buffer))
self.__dict__.update(data)
return data
def pack_from_dict(self, data=None):
"""Pack data into binary form using internal data mapping"""
binary = (getattr(self, key) for key in self._keys)
return self.pack(*binary)
class MachHeader(DynStruct):
"""
struct mach_header {
uint32_t magic; /* mach magic number identifier */
cpu_type_t cputype; /* cpu specifier */
cpu_subtype_t cpusubtype; /* machine specifier */
uint32_t filetype; /* type of file */
uint32_t ncmds; /* number of load commands */
uint32_t sizeofcmds; /* the size of all the load commands */
uint32_t flags; /* flags */
};
"""
_fields_ = [
("magic", "I"),
("cputype", "I"),
("cpusubtype", "I"),
("filetype", "I"),
("ncmds", "I"),
("sizeofcmds", "I"),
("flags", "I"),
]
class MachHeader64(DynStruct):
"""
struct mach_header_64 {
uint32_t magic; /* mach magic number identifier */
cpu_type_t cputype; /* cpu specifier */
cpu_subtype_t cpusubtype; /* machine specifier */
uint32_t filetype; /* type of file */
uint32_t ncmds; /* number of load commands */
uint32_t sizeofcmds; /* the size of all the load commands */
uint32_t flags; /* flags */
uint32_t reserved; /* reserved */
};
"""
_fields_ = [
("magic", "I"),
("cputype", "I"),
("cpusubtype", "I"),
("filetype", "I"),
("ncmds", "I"),
("sizeofcmds", "I"),
("flags", "I"),
("reserved", "I"),
]
class LoadCommand(DynStruct):
"""
struct load_command {
uint32_t cmd; /* type of load command */
uint32_t cmdsize; /* total size of command in bytes */
};
"""
_fields_ = [
("cmd", "I"),
("cmdsize", "I"),
]
class LinkeditDataCommand(DynStruct):
"""
struct linkedit_data_command {
uint32_t cmd; /* LC_CODE_SIGNATURE, LC_SEGMENT_SPLIT_INFO,
LC_FUNCTION_STARTS, LC_DATA_IN_CODE,
LC_DYLIB_CODE_SIGN_DRS or
LC_LINKER_OPTIMIZATION_HINT. */
uint32_t cmdsize; /* sizeof(struct linkedit_data_command) */
uint32_t dataoff; /* file offset of data in __LINKEDIT segment */
uint32_t datasize; /* file size of data in __LINKEDIT segment */
};
"""
_fields_ = [
("cmd", "I"),
("cmdsize", "I"),
("dataoff", "I"),
("datasize", "I"),
]
|
StarcoderdataPython
|
3305754
|
from screen import *
import pygame
import apple
head_direction = 'RIGHT'
apple_coordinate = apple.apple_coord
sprite = pygame.image.load('assets/matheus.nielsen_snake.png')
# sets snake direction
def set_direction(direction):
global head_direction
head_direction = direction
# exports snake's direction
def get_direction():
return head_direction
# controls snake movement
def movement():
global apple_coordinate
apple_coordinate = apple.get_coord()
global snake
# checks the snake's direction and moves it accordingly
if head_direction == 'LEFT':
snake[0] = (snake[0][0] - grid_square, snake[0][1])
elif head_direction == 'RIGHT':
snake[0] = (snake[0][0] + grid_square, snake[0][1])
elif head_direction == 'UP':
snake[0] = (snake[0][0], snake[0][1] - grid_square)
elif head_direction == 'DOWN':
snake[0] = (snake[0][0], snake[0][1] + grid_square)
# moves the rest of the snake's body
for i in range(len(snake) - 1, 0, -1):
snake[i] = (snake[i - 1][0], snake[i - 1][1])
for j in range(0, len(snake) - 4):
snake_hitbox[j] = (snake[j + 3][0], snake[j + 3][1])
# snake render
def blit():
for pos in snake:
screen.blit(sprite, pos)
# Checks if the snake eats the apple
def on_apple_collision():
if snake[0] == apple_coordinate:
print('score')
apple.score()
snake.append(screen_size)
snake_hitbox.append(screen_size)
print(snake_hitbox)
# high score file
high_score_read = open('high_scores.md', 'r')
# checks if the snake hit the wall
def on_wall_collision():
if not screen_size[0] - grid_square > snake[0][0] > 0 or not screen_size[1] - grid_square > snake[0][1] > hud_y:
print(apple.get_points())
game_over_sfx.play()
return True
# checks if the snake hit itself
def on_player_collision():
on_wall_collision()
if snake[0] in snake_hitbox or on_wall_collision() == True:
if apple.get_points() > int(high_score_read.read()):
high_score_write = open('high_scores.md', 'w')
high_score_write.write(str(apple.get_points()))
high_score_write.close()
game_over_sfx.play()
return True
# player setup
snake = [(320, 320), (352, 320), (384, 320), (406, 320)]
snake_hitbox = [(406, 320)]
head_coord = (snake[0][0], snake[0][1])
|
StarcoderdataPython
|
92372
|
n = int(input())
nums = list(map(int, input().strip().split()))
print(min(nums) * max(nums))
|
StarcoderdataPython
|
168298
|
#========================================================================================================
# TOPIC: PYTHON - Modules
#========================================================================================================
# NOTES: * Any Python file is a module.
# * Module is a file with Python statements, file extension ".py"
# * Modules have classes, functions and variables
# * Using the ModuleName.FunctionName is the notation to refer to the
# module objects (classes, functions and variables)
# * Modules can import other modules.
# * The keyword "import <module-name>" is used to import a module
# * The keyword "from <module-name> import <function1>" enables the developer to import
# specific objects of a module.
# * Python comes with a library of standard modules
#
#
#========================================================================================================
#
# FILE-NAME : 013_module.py
# DEPENDANT-FILES : These are the files and libraries needed to run this program ;
# 013_module.py and 013_module_usage.py
#
# AUTHOR : learnpython.com / Hemaxi
# (c) 2013
#
# DESC : Python Modules , used to organize code.
#
#========================================================================================================
# Declare GLOBAL Variables
# Some variables
country_1 = "USA"
country_2 = "China"
country_3 = "India"
# GLOBAL list
list_world_nations = ["USA", "China", "India"]
# GLOBAL tuple
tuple_world_nations = ("USA", "China", "India")
# GLOBAL dictionary
dictionary_world_nations = {'Country_1':'USA', 'Country_1':'China', 'Country_1':'India'}
# Module Function WITHOUT a return type
def module_function_add(in_number1, in_number2):
"This function add the two input numbers"
return in_number1 + in_number2
#========================================================================================================
# END OF CODE
#========================================================================================================
|
StarcoderdataPython
|
11376702
|
from __future__ import annotations
import typing as t
# single source of truth for package version,
# see https://packaging.python.org/en/latest/single_source_version/
__version__ = "0.3.10-dev"
VersionType = t.Union[t.Tuple[int, int, int], t.Tuple[int, int, int, str]]
# parse to a tuple
def parse_version(s: str) -> VersionType:
pre: tuple[()] | tuple[str, int] = ()
if s.endswith("-dev"):
pre = ("dev", 0)
s = s.rsplit("-", 1)[0]
vals = s.split(".")
if len(vals) != 3:
raise ValueError("bad version")
return t.cast(VersionType, tuple(int(x) for x in vals) + pre)
PARSED_VERSION = parse_version(__version__)
# app name to send as part of SDK requests
app_name = f"funcX SDK v{__version__}"
|
StarcoderdataPython
|
12805963
|
def main():
h,w = map(int,input().split())
maze = []
for _ in range(h):
s = list(input())
maze.append(s)
ans = 0
dx = [ 1, 0,-1, 0]
dy = [ 0, 1, 0,-1]
key = 0
for sx in range(h):
for sy in range(w):
if maze[sx][sy] == '#':
continue
else:
d = [[-1 for _ in range(w)] for _ in range(h)]
que = []
d[sx][sy] = 0
que.append([sx,sy])
M = 0
while que:
[qx,qy]=que[0]
que.pop(0)
for i in range(4):
nx=qx+dx[i]
ny=qy+dy[i]
if nx >= 0 and h > nx and ny >= 0 and w > ny and maze[nx][ny] == "." and d[nx][ny] == -1:
d[nx][ny]=d[qx][qy]+1
if d[qx][qy]+1 > M:
M = d[qx][qy]+1
que.append([nx,ny])
ans = max(ans,M)
print(ans)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
11286405
|
<gh_stars>0
#!/usr/bin/env python
from __future__ import print_function, absolute_import, division
import logging
from collections import defaultdict
from errno import ENOENT
from stat import S_IFDIR, S_IFLNK, S_IFREG
from sys import argv, exit
from time import time
from fuse import FUSE, FuseOSError, Operations, LoggingMixIn
block_size = 8
if not hasattr(__builtins__, 'bytes'):
bytes = str
class Memory(LoggingMixIn, Operations):
'Example memory filesystem. Supports only one level of files.'
def __init__(self):
self.files = {}
self.data = defaultdict(list)
self.fd = 0
now = time()
self.files['/'] = dict(st_mode=(S_IFDIR | 0o755), st_ctime=now,
st_mtime=now, st_atime=now, st_nlink=2)
def chmod(self, path, mode):
self.files[path]['st_mode'] &= 0o770000
self.files[path]['st_mode'] |= mode
return 0
def chown(self, path, uid, gid):
self.files[path]['st_uid'] = uid
self.files[path]['st_gid'] = gid
def create(self, path, mode):
self.files[path] = dict(st_mode=(S_IFREG | mode), st_nlink=1,
st_size=0, st_ctime=time(), st_mtime=time(),
st_atime=time())
self.fd += 1
return self.fd
def getattr(self, path, fh=None):
if path not in self.files:
raise FuseOSError(ENOENT)
return self.files[path]
def getxattr(self, path, name, position=0):
attrs = self.files[path].get('attrs', {})
try:
return attrs[name]
except KeyError:
return '' # Should return ENOATTR
def listxattr(self, path):
attrs = self.files[path].get('attrs', {})
return attrs.keys()
def mkdir(self, path, mode):
self.files[path] = dict(st_mode=(S_IFDIR | mode), st_nlink=2,
st_size=0, st_ctime=time(), st_mtime=time(),
st_atime=time())
self.files['/']['st_nlink'] += 1
def open(self, path, flags):
self.fd += 1
return self.fd
###############################################################################################################
# Commands: cat
# All the words in the 'data' dictionary under the given path will be popped and added to a string and returned
# The popped words are added back to their original place to maintain the state
###############################################################################################################
def read(self, path, size, offset, fh):
result = ''
length = len(self.data[path])
index = 0
while (length != 0):
popword = self.data[path].pop(index)
self.data[path].append(popword)
result = result + popword
length = length-1
print ('popped word: ' + popword)
print ('result: ' + result)
offset = len(result) # set offset till where the data was read
return result
def readdir(self, path, fh):
return ['.', '..'] + [x[1:] for x in self.files if x != '/']
def readlink(self, path):
return self.data[path]
def removexattr(self, path, name):
attrs = self.files[path].get('attrs', {})
try:
del attrs[name]
except KeyError:
pass # Should return ENOATTR
def rename(self, old, new):
self.files[new] = self.files.pop(old)
def rmdir(self, path):
self.files.pop(path)
self.files['/']['st_nlink'] -= 1
def setxattr(self, path, name, value, options, position=0):
# Ignore options
attrs = self.files[path].setdefault('attrs', {})
attrs[name] = value
def statfs(self, path):
return dict(f_bsize=512, f_blocks=4096, f_bavail=2048)
def symlink(self, target, source):
final = [] # create an empty list
var = 0
for i in range(0,len(source),block_size): # for loop to convert source into a list
divdata = source[var:var+block_size]
var = var + block_size
final.append(divdata) # appending divdata to final
source = final # copy final into source
self.files[target] = dict(st_mode=(S_IFLNK | 0o777), st_nlink=1, st_size=len(source))
self.data[target] = source
########################################################################################
# Commands: truncate
# This method trims the data to a given length that is specified as a parameter
# We first take all the words in the list, append them, trim them to the required length
# Then we store the result as blocks back in the list
########################################################################################
def truncate(self, path, length, fh=None):
string = ''
tot_len = len(self.data[path])
index = 0
while (tot_len != 0):
popword = self.data[path].pop(index)
string = string + popword
tot_len = tot_len - 1
string = string [:length]
final = []
var = 0
for i in range(0, len(string), block_size):
divdata = string[var : var + block_size]
var = var + block_size
final.append(divdata)
self.data[path] = final
#self.data[path] = self.data[path][:length]
self.files[path]['st_size'] = length
def unlink(self, path):
self.files.pop(path)
def utimens(self, path, times=None):
now = time()
atime, mtime = times if times else (now, now)
self.files[path]['st_atime'] = atime
self.files[path]['st_mtime'] = mtime
######################################################################################################
# Commands: echo
# based on if the write is happening to a new file or to a already written file, two cases can be seen
######################################################################################################
def write(self, path, data, offset, fh):
if (len(self.data[path]) == 0):
final = []
var = 0
for i in range(0,len(data),block_size):
divdata = data[var:var+block_size]
var = var + block_size
final.append(divdata)
offset = offset + len(data)
print (final)
self.data[path] = final
print ('splitdata' + str(self.data[path]))
else:
var1 = 0
final2 = []
strsize = len(self.data[path]) - 1
print ('length' + str(strsize))
print ('first element--------------------> ' +str(self.data[path][0]))
#print self.data[path]
laststr = self.data[path].pop(strsize)
new_word = laststr + data # concatinating the last element of self.data[path] and the incoming data.
for i in range(0,len(new_word),block_size): # running a for loop for the length of new_word and dividing the new_word into strings of 8 bytes.
divdata = new_word[var1:var1+block_size]
var1 = var1 + block_size # moving the pointer ahead, so that the next 8 bytes will be taken from the divdata.
final2.append(divdata)
self.data[path].extend(final2)
offset = offset + len(data) # changing the offset.
print (str(self.data[path]))
print (self.files[path]['st_size'])
self.files[path]['st_size'] = (len(self.data[path])-1) * block_size + len(self.data[path][-1]) # the st_size will be the length of the charcters in the self.data[path]
return len(data)
if __name__ == '__main__':
if len(argv) != 2:
print('usage: %s <mountpoint>' % argv[0])
exit(1)
logging.basicConfig(level=logging.DEBUG)
fuse = FUSE(Memory(), argv[1], foreground=True, debug=True)
|
StarcoderdataPython
|
8040740
|
<reponame>pratikadarsh/Algorithms<gh_stars>100-1000
'''
* @file TernarySearchDiscrete.py
* @author (original JAVA) <NAME>, <EMAIL>
* (conversion to Python) <NAME>, <EMAIL>
* @date 29 Jun 2020
* @version 0.1
* @brief An implementation of Ternary search
* Ternary search is similar to binary search except that it works on a function which decreases and
* then increases. This implementation of ternary search works on discrete values and returns the
* input value corresponding with the minimum output value of the function you're searching on.
*
* <p>Time Complexity: O(log(high - low)).
*
* <p>NOTE: You can also work with a function which increases and then decreases, simply negate your
* function :)
'''
import math
# A discrete function is just a set of data points.
function = [16, 12, 10, 3, 6, 7, 9, 10, 11, 12, 13, 17]
# Define your own function on whatever you're attempting to ternary
# search. Remember that your function must be a discrete and a unimodal
# function, this means a function which decreases then increases (U shape)
f = lambda i: function[i]
class TernarySearchDiscrete():
"""
"""
def __init__(self):
# Define a very small epsilon value to compare double values.
self.EPS = 0.000000001
def discreteTernarySearch(self, lo, hi):
while lo != hi:
if hi - lo == 1:
return min(f(lo), f(hi))
if hi - lo == 2:
return min(f(lo), min(f(lo + 1), f(hi)))
mid1 = (2 * lo + hi) // 3
mid2 = (lo + 2 * hi) // 3
res1 = f(mid1)
res2 = f(mid2)
if abs(res1 - res2) < 0.000000001:
lo = mid1
hi = mid2
elif res1 > res2:
lo = mid1
else:
hi = mid2
return lo
if __name__ == '__main__':
"""
Example usage
"""
ts = TernarySearchDiscrete()
lo = 0
hi = len(function) - 1
# Use ternary search to find the minimum value on the
# whole interval of out function.
minValue = ts.discreteTernarySearch(lo, hi)
print('{:.4f}\n'.format(minValue))
|
StarcoderdataPython
|
4891593
|
palabra=input("Ingrese una palabra: ")
Index=len(palabra)-1
nuevapalabra=""
while Index>=0:
nuevapalabra=nuevapalabra+palabra[Index]
Index=Index-1
if palabra == nuevapalabra:
print ("SI ES PALINDROMO")
else:
print("NO ES PALINDROMO")
|
StarcoderdataPython
|
353157
|
#!/usr/bin/env python
#
# Runs R Group Converter for Library Creation
#
# ----------------------------------------------------------
# imports
# ---------
from rdkit import Chem
import ruamel.yaml as yaml
from file_handler import FileWriter, FileParser
# Load datasources
# -------------
def load_datasources():
"""
Load all the datasources for running this package in local context.
This might slow down performance later -- we can opt in to load sources of data dependent on the functional group.
"""
from pathlib import Path
datasource_location = Path(__file__).absolute().parent
with open(str(datasource_location) + "/datasources/R_Groups.yaml") as stream:
try:
global R_GROUP_DATASOURCE
R_GROUP_DATASOURCE = yaml.safe_load(stream)
global R_GROUPS
R_GROUPS = R_GROUP_DATASOURCE['R_Groups']
except yaml.YAMLError as exc:
print ("Datasources not loading correctly, Please contact lead developer")
print(exc)
class RaiseMoleculeError(Exception):
__version_error_parser__ = 1.0
__allow_update__ = False
"""
Raise Molecule Error if for some reason we can't evaluate a SMILES, 2D, or 3D molecule.
"""
def __init__(self, message, errors):
super().__init__(message)
self.errors = errors
class Cocktail(object):
"""
This class is used to take in a molecule and replace any R groups with a one of the groups from the R-Group.
"""
__version_parser__ = 1.0
__allow_update__ = False
def __init__(self, molecules):
from molvs import Validator
# I will allow the user to pass a string but for easier sake down the road
# I will reimplement it as a list.
load_datasources()
self.dimensionality = '1D'
self.modified_molecules = []
if type(molecules) is not list:
self.molecules = [molecules]
else:
self.molecules = molecules
for molecule in self.molecules:
self.original_smiles = Chem.MolToSmiles(molecule)
# Validation
validator_format = '%(asctime)s - %(levelname)s - %(validation)s - %(message)s'
self.validate = Validator(log_format=validator_format)
def validate_smiles(self, smiles):
"""
This method takes the smiles string and runs through the validation check of a smiles string.
Arguments:
self (Object): Class Cocktail
smiles (string): Smiles string that needs to be evaluated
Returns:
N / A
Exceptions:
RaiseMoleculeError (Exception): MolVs Stacktrace and the smiles string that failed.
"""
# Use MolVS to validate the smiles to make sure enumeration and r group connections are correct
# at least in the 1D Format.
from molvs import validate_smiles as vs
try:
vs(smiles)
return True
except RaiseMoleculeError as RME:
print ("Not a Valid Smiles, Please check the formatting: %s" % self.original_smiles)
print ("MolVs Stacktrace %s" % RME)
return False
def validate_molecule(self, molecule):
"""
This function will be used to validate molecule objects
Arguments:
self (Object): Class Cocktail
molecule (RDKit Object): Molecule object we need to sanitize.
Returns:
molecule (RDKit Object): The RDkit Molecule molecule object
Exceptions:
RaiseMoleculeError (Exception): Raise the Raise Molcule Error if the molecule is not valid.
TODO: Verify Sanitize molecule that the validation works
"""
if not molecule:
try:
Chem.rdmolops.SanitizeMol(molecule)
except RaiseMoleculeError as RME:
print ("Not a valid molecule: %s" % RME)
finally:
return molecule
def detect_functional_groups(self):
"""
Find functional groups that ligand library loader supports
:return:
"""
pattern_payload = {}
load_datasources()
print ("Detecting Functional Groups...")
for molecule in self.molecules:
for key, value in R_GROUPS.items():
pattern = Chem.MolFromSmarts(value[1])
if molecule.GetSubstructMatches(pattern,uniquify=False):
print ("Found Functional Group: %s | Pattern Count: %s" % (key,
len(molecule.GetSubstructMatches(
pattern,uniquify=False))))
pattern_payload[key] = [value[0], value[1]]
return pattern_payload
def shake(self, shape=None):
"""
Used to swap out molecules based on the patterns found from the detection.
Arguments:
self (Object): Cocktail object of the list of molecules
Return:
modified_molecules (List): List of the RDKit molecule objects that have had their structures replaced.
TODO: Do this faster than O(n)^3 as this algorithm is not the most efficient.
"""
# Run detection first to see and validate what functional groups have been found.
patterns_found = self.detect_functional_groups()
print ("Shaking Compound....")
modified_molecules = []
for molecule in self.molecules:
for key, value in patterns_found.items():
smarts_mol = Chem.MolFromSmarts(value[1])
for r_functional_group, r_data in R_GROUPS.items():
# Skip redundacies if the r group is already matched.
if r_data[1] == value[1]:
continue
try:
modified_molecule = Chem.ReplaceSubstructs(molecule, smarts_mol,
Chem.MolFromSmiles(r_data[0]), replaceAll=True)
modified_molecules.append(modified_molecule[0])
except RaiseMoleculeError:
print ("Molecule Formed is not possible")
# continue
self.modified_molecules = modified_molecules
return modified_molecules
def enumerate(self, enumeration_complexity=None, dimensionality=None):
"""
Enumerate the drug library based on dimension.
Arguments:
molecules (List): a list of molecules that the user would like enumerated.
enumeration_complexity (String): Declares how many times we will want to discover another molecule
configuration
dimensionality (String): Enumerate based on dimensionality (1D, 2D, 3D)
Returns:
enumerated_molecules (List): Dependent on the dimensionality of the user it can be -> a list of smiles, or
a list of RDKit Molecule Objects.
"""
# Enumeration comes from the user iwatobipen
# https://iwatobipen.wordpress.com/2018/11/15/generate-possible-list-of-smlies-with-rdkit-rdkit/
print ("Enumerating Compunds....")
if enumeration_complexity.lower() == 'low':
complexity = 10
elif enumeration_complexity.lower() == 'medium':
complexity = 100
elif enumeration_complexity.lower() == 'high':
complexity = 1000
else:
complexity = 10
enumerated_molecules = []
for molecule in self.modified_molecules:
for i in range(complexity):
smiles_enumerated = Chem.MolToSmiles(molecule, doRandom=True)
if dimensionality == '1D' and smiles_enumerated not in enumerated_molecules:
enumerated_molecules.append(smiles_enumerated)
elif dimensionality == '2D':
if not smiles_enumerated in enumerated_molecules:
enumerated_molecules.append(Chem.MolFromSmiles(smiles_enumerated))
elif dimensionality == '3D':
print ('Not supported yet')
return enumerated_molecules
return enumerated_molecules
# Implemented Strictly for Testing
# --------------------------------
# if __name__ == '__main__':
#
# cocktail = Cocktail([Chem.MolFromSmiles('c1cc(CCCO)ccc1'), Chem.MolFromSmiles('c1cc(CCCBr)ccc1')])
# compounds_result = cocktail.shake()
# compounds_result_enumerated = cocktail.enumerate(enumeration_complexity='Low', dimensionality='2D')
#
# FileWriter("test", compounds_result, "maestro")
#
|
StarcoderdataPython
|
11208488
|
<reponame>neilom18/g5-chess<gh_stars>0
from time import time
print(int(time()% 10000))
|
StarcoderdataPython
|
9786766
|
from nltk.translate.bleu_score import sentence_bleu as bleu
from nltk.translate.bleu_score import SmoothingFunction
class Bleu(object):
def __init__(self, settings):
self.settings = settings
def eval(self, hypList, refList):
number = len(hypList)
n_ref = len(refList) / number
result = {
'bleu_1':0.0,
'bleu_2':0.0,
'bleu_3':0.0,
'bleu_4':0.0,
'bleu':0.0
}
for Index in range(0, number):
ref = [refList[i].split() for i in range(Index * n_ref, (Index+1) * n_ref)]
ref = [r[:-1] if r[-1] == '.' else r for r in ref]
hyp = hypList[Index].split()
if (hyp[-1] == '.'):
hyp = hyp[:-1]
#print type([ref]), type(ref), type(ref[0])
#print type(hyp), type(hyp[0])
Smooth = SmoothingFunction()
bleu_1 = bleu(ref, hyp, weights=[1], smoothing_function = Smooth.method1)
bleu_2 = bleu(ref, hyp, weights=[0, 1], smoothing_function = Smooth.method1)
bleu_3 = bleu(ref, hyp, weights=[0, 0, 1], smoothing_function = Smooth.method1)
bleu_4 = bleu(ref, hyp, weights=[0, 0, 0, 1], smoothing_function = Smooth.method1)
bleu_all = bleu(ref, hyp, weights=[0.25, 0.25, 0.25, 0.25], smoothing_function = Smooth.method1)
#print hyp, ref
#print Index, bleu_1, bleu_2, bleu_3, bleu_4
result['bleu_1'] += bleu_1
result['bleu_2'] += bleu_2
result['bleu_3'] += bleu_3
result['bleu_4'] += bleu_4
result['bleu'] += bleu_all
result['bleu_1'] /= number
result['bleu_2'] /= number
result['bleu_3'] /= number
result['bleu_4'] /= number
result['bleu'] /= number
return result
|
StarcoderdataPython
|
9783670
|
import copy
import topside as top
from topside.procedures.tests.testing_utils import NeverSatisfied
def one_component_engine():
states = {
'open': {
(1, 2, 'A1'): 1,
(2, 1, 'A2'): 1
},
'closed': {
(1, 2, 'A1'): top.CLOSED,
(2, 1, 'A2'): top.CLOSED
},
'halfway_open': {
(1, 2, 'A1'): 100,
(2, 1, 'A2'): 100
}
}
edges = [(1, 2, 'A1'), (2, 1, 'A2')]
c1 = top.PlumbingComponent('c1', states, edges)
mapping = {'c1': {1: 1, 2: 2}}
pressures = {1: (100, False), 2: (0, False)}
initial_states = {'c1': 'open'}
return top.PlumbingEngine({'c1': c1}, mapping, pressures, initial_states)
def single_procedure_suite():
close_action = top.StateChangeAction('c1', 'closed')
open_action = top.StateChangeAction('c1', 'open')
s1 = top.ProcedureStep('s1', close_action, [(
top.Immediate(), top.Transition('p1', 's2'))], 'PRIMARY')
s2 = top.ProcedureStep('s2', open_action, [], 'PRIMARY')
proc = top.Procedure('p1', [s1, s2])
return top.ProcedureSuite([proc], 'p1')
def branching_procedure_suite_no_options():
close_action = top.StateChangeAction('c1', 'closed')
open_action = top.StateChangeAction('c1', 'open')
halfway_open_action = top.StateChangeAction('c1', 'halfway_open')
s1 = top.ProcedureStep('s1', close_action, [
(NeverSatisfied(), top.Transition('p1', 's2')),
(NeverSatisfied(), top.Transition('p2', 's3'))], 'PRIMARY')
s2 = top.ProcedureStep('s2', halfway_open_action, [], 'PRIMARY')
s3 = top.ProcedureStep('s3', open_action, [], 'PRIMARY')
proc_1 = top.Procedure('p1', [s1, s2, s3])
proc_2 = top.Procedure('p2', [s1, s2, s3])
return top.ProcedureSuite([proc_1, proc_2], 'p1')
def branching_procedure_suite_one_option():
close_action = top.StateChangeAction('c1', 'closed')
open_action = top.StateChangeAction('c1', 'open')
halfway_open_action = top.StateChangeAction('c1', 'halfway_open')
s1 = top.ProcedureStep('s1', close_action, [
(NeverSatisfied(), top.Transition('p1', 's2')),
(top.Immediate(), top.Transition('p2', 's3'))], 'PRIMARY')
s2 = top.ProcedureStep('s2', halfway_open_action, {}, 'PRIMARY')
s3 = top.ProcedureStep('s3', open_action, {}, 'PRIMARY')
proc_1 = top.Procedure('p1', [s1, s2, s3])
proc_2 = top.Procedure('p2', [s1, s2, s3])
return top.ProcedureSuite([proc_1, proc_2], 'p1')
def branching_procedure_suite_two_options():
close_action = top.StateChangeAction('c1', 'closed')
open_action = top.StateChangeAction('c1', 'open')
halfway_open_action = top.StateChangeAction('c1', 'halfway_open')
s1 = top.ProcedureStep('s1', close_action, [
(top.Immediate(), top.Transition('p1', 's2')),
(top.Immediate(), top.Transition('p2', 's3'))], 'PRIMARY')
s2 = top.ProcedureStep('s2', halfway_open_action, {}, 'PRIMARY')
s3 = top.ProcedureStep('s3', open_action, {}, 'PRIMARY')
proc_1 = top.Procedure('p1', [s1, s2, s3])
proc_2 = top.Procedure('p2', [s1, s2, s3])
return top.ProcedureSuite([proc_1, proc_2], 'p1')
def test_load_suite():
p1s1 = top.ProcedureStep('p1s1', top.StateChangeAction('injector_valve', 'open'), [], 'PRIMARY')
suite_1 = top.ProcedureSuite([top.Procedure('p1', [p1s1])], 'p1')
p2s1 = top.ProcedureStep('p2s1', top.StateChangeAction('injector_valve', 'closed'), [],
'SECONDARY')
suite_2 = top.ProcedureSuite([top.Procedure('p2', [p2s1])], 'p2')
proc_eng = top.ProceduresEngine(None, suite_1)
assert proc_eng._suite == suite_1
assert proc_eng.current_procedure_id == 'p1'
assert proc_eng.current_step == p1s1
assert proc_eng.step_position == top.StepPosition.Before
proc_eng.load_suite(suite_2)
assert proc_eng._suite == suite_2
assert proc_eng.current_procedure_id == 'p2'
assert proc_eng.current_step == p2s1
assert proc_eng.step_position == top.StepPosition.Before
def test_execute_custom_action():
plumb_eng = one_component_engine()
proc_eng = top.ProceduresEngine(plumb_eng)
action = top.StateChangeAction('c1', 'closed')
assert plumb_eng.current_state('c1') == 'open'
proc_eng.execute(action)
assert plumb_eng.current_state('c1') == 'closed'
misc_action = top.MiscAction('Approach the tower')
proc_eng.execute(misc_action)
assert plumb_eng.current_state('c1') == 'closed'
def test_execute_current():
plumb_eng = one_component_engine()
proc_eng = top.ProceduresEngine(plumb_eng, single_procedure_suite())
assert proc_eng.current_step.step_id == 's1'
assert proc_eng.step_position == top.StepPosition.Before
assert plumb_eng.current_state('c1') == 'open'
proc_eng.execute_current()
assert proc_eng.current_step.step_id == 's1'
assert proc_eng.step_position == top.StepPosition.After
assert plumb_eng.current_state('c1') == 'closed'
proc_eng.execute_current() # No effect; engine is in a post-node
assert proc_eng.current_step.step_id == 's1'
assert proc_eng.step_position == top.StepPosition.After
assert plumb_eng.current_state('c1') == 'closed'
def test_ready_to_proceed_requires_post():
proc_eng = top.ProceduresEngine(None, single_procedure_suite())
assert proc_eng.ready_to_proceed() is False
def test_ready_to_proceed_if_condition_satisfied():
proc_eng = top.ProceduresEngine(None, single_procedure_suite())
proc_eng.execute_current()
assert proc_eng.ready_to_proceed() is True
def test_ready_to_proceed_one_is_enough():
proc_eng = top.ProceduresEngine(None, branching_procedure_suite_one_option())
proc_eng.execute_current()
assert proc_eng.ready_to_proceed() is True
def test_ready_to_proceed_no_options():
proc_eng = top.ProceduresEngine(None, branching_procedure_suite_no_options())
proc_eng.execute_current()
assert proc_eng.ready_to_proceed() is False
def test_proceed():
plumb_eng = one_component_engine()
proc_eng = top.ProceduresEngine(plumb_eng, single_procedure_suite())
proc_eng.execute_current()
assert proc_eng.current_step.step_id == 's1'
assert proc_eng.step_position == top.StepPosition.After
assert plumb_eng.current_state('c1') == 'closed'
proc_eng.proceed()
assert proc_eng.current_step.step_id == 's2'
assert proc_eng.step_position == top.StepPosition.Before
assert plumb_eng.current_state('c1') == 'closed'
proc_eng.proceed() # No effect; engine is in a pre-node
assert proc_eng.current_step.step_id == 's2'
assert proc_eng.step_position == top.StepPosition.Before
assert plumb_eng.current_state('c1') == 'closed'
def test_proceed_requires_satisfaction():
plumb_eng = one_component_engine()
proc_eng = top.ProceduresEngine(plumb_eng, branching_procedure_suite_no_options())
proc_eng.execute_current()
proc_eng.proceed()
assert proc_eng.current_procedure_id == 'p1'
assert proc_eng.current_step.step_id == 's1'
def test_proceed_follows_satisfied_condition():
plumb_eng = one_component_engine()
proc_eng = top.ProceduresEngine(plumb_eng, branching_procedure_suite_one_option())
proc_eng.execute_current()
proc_eng.proceed()
assert proc_eng.current_procedure_id == 'p2'
assert proc_eng.current_step.step_id == 's3'
def test_proceed_follows_highest_priority_condition():
plumb_eng = one_component_engine()
proc_eng = top.ProceduresEngine(plumb_eng, branching_procedure_suite_two_options())
proc_eng.execute_current()
proc_eng.proceed()
assert proc_eng.current_procedure_id == 'p1'
assert proc_eng.current_step.step_id == 's2'
def test_next_step():
plumb_eng = one_component_engine()
proc_eng = top.ProceduresEngine(plumb_eng, single_procedure_suite())
assert proc_eng.current_step.step_id == 's1'
assert plumb_eng.current_state('c1') == 'open'
proc_eng.next_step()
assert proc_eng.current_step.step_id == 's1'
assert plumb_eng.current_state('c1') == 'closed'
proc_eng.next_step()
assert proc_eng.current_step.step_id == 's2'
assert plumb_eng.current_state('c1') == 'open'
proc_eng.next_step()
assert proc_eng.current_step.step_id == 's2'
assert plumb_eng.current_state('c1') == 'open'
def test_transitions_respects_procedure_identifier():
plumb_eng = one_component_engine()
action = top.MiscAction('Do nothing')
s1 = top.ProcedureStep('s1', action, [(NeverSatisfied(), top.Transition('p1', 'same_name')),
(top.Immediate(), top.Transition('p2', 'same_name'))],
'PRIMARY')
same_name_1 = top.ProcedureStep('same_name', action, [], 'PRIMARY')
same_name_2 = top.ProcedureStep('same_name', action, [], 'PRIMARY')
proc_1 = top.Procedure('p1', [s1, same_name_1])
proc_2 = top.Procedure('p2', [same_name_2])
proc_suite = top.ProcedureSuite([proc_1, proc_2], 'p1')
proc_eng = top.ProceduresEngine(plumb_eng, proc_suite)
proc_eng.execute_current()
assert proc_eng.current_procedure_id == 'p1'
assert proc_eng.current_step.step_id == 's1'
proc_eng.next_step()
assert proc_eng.current_procedure_id == 'p2'
assert proc_eng.current_step.step_id == 'same_name'
def test_update_conditions_updates_pressures():
plumb_eng = one_component_engine()
plumb_eng.set_component_state('c1', 'open')
s1 = top.ProcedureStep('s1', None, [(top.Less(1, 75), top.Transition('p1', 's2'))], 'PRIMARY')
proc = top.Procedure('p1', [s1])
proc_suite = top.ProcedureSuite([proc], 'p1')
proc_eng = top.ProceduresEngine(plumb_eng, proc_suite)
proc_eng.execute_current()
assert proc_eng.ready_to_proceed() is False
plumb_eng.solve()
proc_eng.update_conditions()
assert proc_eng.ready_to_proceed() is True
def test_update_conditions_updates_time():
plumb_eng = one_component_engine()
plumb_eng.set_component_state('c1', 'open')
s1 = top.ProcedureStep('s1', None, [(top.WaitFor(10), 's2')], 'PRIMARY')
proc = top.Procedure('p1', [s1])
proc_suite = top.ProcedureSuite([proc], 'p1')
proc_eng = top.ProceduresEngine(plumb_eng, proc_suite)
proc_eng.execute_current()
assert proc_eng.ready_to_proceed() is False
plumb_eng.step(10)
proc_eng.update_conditions()
assert proc_eng.ready_to_proceed() is True
def test_step_advances_time_equally():
managed_eng = one_component_engine()
managed_eng.set_component_state('c1', 'open')
unmanaged_eng = copy.deepcopy(managed_eng)
proc_eng = top.ProceduresEngine(managed_eng, single_procedure_suite())
assert managed_eng.current_pressures() == unmanaged_eng.current_pressures()
proc_eng.step_time(1e6)
unmanaged_eng.step(1e6)
assert managed_eng.current_pressures() == unmanaged_eng.current_pressures()
def test_step_updates_conditions():
plumb_eng = one_component_engine()
plumb_eng.set_component_state('c1', 'open')
s1 = top.ProcedureStep('s1', None, [(top.Less(1, 75), 's2')], 'PRIMARY')
proc = top.Procedure('p1', [s1])
proc_suite = top.ProcedureSuite([proc], 'p1')
proc_eng = top.ProceduresEngine(plumb_eng, proc_suite)
proc_eng.execute_current()
assert proc_eng.ready_to_proceed() is False
proc_eng.step_time(1e6)
assert proc_eng.ready_to_proceed() is True
def test_reset():
plumb_eng = one_component_engine()
proc_eng = top.ProceduresEngine(plumb_eng, branching_procedure_suite_one_option())
assert proc_eng.current_procedure_id == 'p1'
assert proc_eng.current_step.step_id == 's1'
assert plumb_eng.current_state('c1') == 'open'
proc_eng.next_step()
assert proc_eng.current_procedure_id == 'p1'
assert proc_eng.current_step.step_id == 's1'
assert plumb_eng.current_state('c1') == 'closed'
proc_eng.next_step()
assert proc_eng.current_procedure_id == 'p2'
assert proc_eng.current_step.step_id == 's3'
assert plumb_eng.current_state('c1') == 'open'
proc_eng.reset()
assert proc_eng.current_procedure_id == 'p1'
assert proc_eng.current_step.step_id == 's1'
assert plumb_eng.current_state('c1') == 'open'
def test_reset_clears_conditions():
plumb_eng = one_component_engine()
plumb_eng.set_component_state('c1', 'open')
s1 = top.ProcedureStep('s1', None, [(top.WaitFor(10), 's2')], 'PRIMARY')
proc = top.Procedure('p1', [s1])
proc_suite = top.ProcedureSuite([proc], 'p1')
proc_eng = top.ProceduresEngine(plumb_eng, proc_suite)
proc_eng.execute_current()
assert proc_eng.ready_to_proceed() is False
plumb_eng.step(10)
proc_eng.update_conditions()
assert proc_eng.ready_to_proceed() is True
proc_eng.reset()
proc_eng.execute_current()
assert proc_eng.ready_to_proceed() is False
|
StarcoderdataPython
|
3334822
|
# coding=utf-8
import os
import re
import datetime
from django.conf import settings
from dju_common.tools import dtstr_to_datetime
from .image import adjust_image, image_get_format
from .tools import get_profile_configs, get_variant_label, get_relative_path_from_img_id, media_path, save_file
from . import settings as dju_settings
re_tmp = re.compile(r'^{pref}(?P<dtstr>[a-z0-9]{7,9})_[a-z0-9]{4}.*$'.replace(
'{pref}', dju_settings.DJU_IMG_UPLOAD_TMP_PREFIX
))
def get_files_recursive(path):
for root, dirs, files in os.walk(path):
for fn in files:
yield os.path.join(root, fn).replace('\\', '/')
def remove_old_tmp_files(profiles=None, max_lifetime=(7 * 24)):
"""
Removes old temp files that is older than expiration_hours.
If profiles is None then will be use all profiles.
"""
assert isinstance(profiles, (list, tuple)) or profiles is None
if profiles is None:
profiles = dju_settings.DJU_IMG_UPLOAD_PROFILES.keys()
profiles = set(('default',) + tuple(profiles))
total = removed = 0
old_dt = datetime.datetime.utcnow() - datetime.timedelta(hours=max_lifetime)
for profile in profiles:
conf = get_profile_configs(profile=profile)
root_path = os.path.join(settings.MEDIA_ROOT, dju_settings.DJU_IMG_UPLOAD_SUBDIR, conf['PATH'])
for file_path in get_files_recursive(root_path):
m = re_tmp.match(os.path.basename(file_path))
if m is None:
continue
total += 1
fdt = dtstr_to_datetime(m.group('dtstr'))
if fdt and old_dt > fdt:
os.remove(file_path)
removed += 1
return removed, total
def remake_images_variants(profiles, clear=True):
"""
Перестворює варіанти для картинок згідно налаштувань.
profiles - список профілів, для картинок яких треба перестворити варіанти.
clear - якщо True, тоді перед створенням варіантів будуть видалені ВСІ попередні варіанти.
"""
assert isinstance(profiles, (list, tuple)) or profiles is None
if profiles is None:
profiles = dju_settings.DJU_IMG_UPLOAD_PROFILES.keys()
profiles = set(('default',) + tuple(profiles))
removed = remade = 0
for profile in profiles:
conf = get_profile_configs(profile=profile)
root_path = os.path.join(settings.MEDIA_ROOT, dju_settings.DJU_IMG_UPLOAD_SUBDIR, conf['PATH'])
if clear:
for fn in get_files_recursive(root_path):
if dju_settings.DJU_IMG_UPLOAD_VARIANT_SUFFIX in os.path.basename(fn):
os.remove(fn)
removed += 1
for fn in get_files_recursive(root_path):
filename = os.path.basename(fn)
if dju_settings.DJU_IMG_UPLOAD_VARIANT_SUFFIX in filename:
continue
if dju_settings.DJU_IMG_UPLOAD_MAIN_SUFFIX not in filename:
continue
img_id = '{profile}:{name}'.format(
profile=profile,
name=filename[:filename.find(dju_settings.DJU_IMG_UPLOAD_MAIN_SUFFIX)]
)
with open(fn, 'rb') as f:
for v_conf in conf['VARIANTS']:
label = v_conf['LABEL']
if not label:
label = get_variant_label(v_conf)
v_f = adjust_image(f, max_size=v_conf['MAX_SIZE'], new_format=v_conf['FORMAT'],
jpeg_quality=v_conf['JPEG_QUALITY'], fill=v_conf['FILL'],
stretch=v_conf['STRETCH'], return_new_image=True)
v_relative_path = get_relative_path_from_img_id(img_id, variant_label=label,
ext=image_get_format(v_f))
v_full_path = media_path(v_relative_path)
save_file(v_f, v_full_path)
remade += 1
return removed, remade
def update_wrong_hashes():
"""
Оновлює хеші в назвах файлів.
Запускати після зміни ключа DJU_IMG_UPLOAD_KEY.
"""
pass # todo do it
def clean():
"""
Видаляє файли, в яких невірний хеш та зайві варіанти.
"""
pass # todo do it
|
StarcoderdataPython
|
6619137
|
import os
import pandas as pd
output_dir = os.path.join('/sb-personal/cvqa/', 'data/visual-genome/8-26-2017/generated-data/')
questions_output_file = os.path.join(output_dir, 'actions_vg_expanded_dataset-v3.csv')
new_questions_output_file = os.path.join(output_dir, 'specific_relevance_actions_vg_expanded_dataset-v2.csv')
df = pd.read_csv(questions_output_file)
print len(df)
reduce_list = ['no hold found','no stand found','no sit found']
for reduce_item in reduce_list:
length = len(df[df['answer'] == reduce_item])
remove_qa_ids = df[df['answer'] == reduce_item].sample(length-2000)['qa_id'].tolist()
remove_qa_ids += [(-1 * qa) for qa in remove_qa_ids]
remove_qa_ids = set(remove_qa_ids)
df = df[~df['qa_id'].isin(remove_qa_ids)]
grouped_df = df.groupby('answer', as_index=False).count().sort_values(['image_file'])
print grouped_df[['answer','image_file']]
print len(df)
df = df.copy()
df['specific_answer'] = ''
i = 0
total = len(df)
for _,row in df[df['qa_id'] < 0].iterrows():
if i == 1000:
print 'Question: [%d/%d]' % (i,total)
qa_id = -1 * row['qa_id']
# print qa_id
specific_answer = row['answer'][3:-7]
df.loc[df['qa_id'] == qa_id, 'answer'] = 'relevant because ' + row['answer'][3:]
df.loc[df['qa_id'] == qa_id, 'specific_answer'] = specific_answer
row['specific_answer'] = specific_answer
i+=1
# print df[df['qa_id'] == qa_id]
# df[df['qa_id']==qa_id]['answer'] = answer
# print df
df.to_csv(new_questions_output_file)
# grouped_df = df.groupby('answer', as_index=False).count().sort_values(['image_file'])
# print grouped_df[['answer','image_file']]
# df.to_csv(os.path.join(output_dir, 'sub_relevance_actions_vg_expanded_dataset.csv'))
|
StarcoderdataPython
|
4970178
|
from ._SetCameraInfo import *
|
StarcoderdataPython
|
299506
|
import theano.tensor
try:
# Statsmodels is optional
from statsmodels.genmod.families.links import (identity, logit, inverse_power, log)
except:
identity, logit, inverse_power, log = [None] * 4
__all__ = ['Identity', 'Logit', 'Inverse', 'Log']
class LinkFunction(object):
"""Base class to define link functions.
If initialization via statsmodels is desired, define sm.
"""
def __init__(self, theano_link=None, sm_link=None):
if theano_link is not None:
self.theano = theano_link
if sm_link is not None:
self.sm = sm_link
class Identity(LinkFunction):
theano = lambda self, x: x
sm = identity
class Logit(LinkFunction):
theano = theano.tensor.nnet.sigmoid
sm = logit
class Inverse(LinkFunction):
theano = theano.tensor.inv
sm = inverse_power
class Log(LinkFunction):
theano = theano.tensor.log
sm = log
|
StarcoderdataPython
|
5199231
|
<reponame>redshodan/codepunks<filename>setup.py
import os
import runpy
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.md')).read()
CHANGES = open(os.path.join(here, 'CHANGES.md')).read()
tests_require = [
'pytest',
'pytest-cov',
'pytest-runner',
]
# Extract the version from codepunks
VERSION = runpy.run_path(os.path.join(here, "codepunks/version.py"))["VERSION"]
def requirements(filename):
if os.path.exists(filename):
return [l for l in open(filename).read().splitlines()
if not l.startswith("#")]
else:
return ""
setup(name='codepunks',
version=VERSION,
description='Codepunks, yet another base library',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Intended Audience :: Developers",
"Operating System :: POSIX",
"Natural Language :: English",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Development Status :: 3 - Alpha",
],
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/redshodan/codepunks',
keywords=[],
packages=find_packages(),
include_package_data=True,
zip_safe=False,
platforms=["Any"],
test_suite='codepunks',
install_requires=requirements("requirements.txt"),
setup_requires=['pytest-runner'],
tests_require=tests_require,
license="Apache Software License",
)
|
StarcoderdataPython
|
11368755
|
<gh_stars>1-10
#!/usr/bin/env python
# The MIT License (MIT)
#
# Copyright (c) 2013 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
This module is intended to be run in the following way:
$ python -m microanalyst <filename.json>
"""
import os
import sys
import argparse
import microanalyst.model
def parse(args):
"""Parse mandatory JSON filename."""
parser = argparse.ArgumentParser(usage='python -m microanalyst <file.json>')
parser.add_argument('filename')
return parser.parse_args(args)
def enable_interactive_mode():
"""Equivalent to: $ python -i script.py"""
os.environ['PYTHONINSPECT'] = '1'
if __name__ == '__main__':
try:
params = parse(sys.argv[1:])
if 'filename' in params:
model = microanalyst.model.from_file(params.filename)
enable_interactive_mode()
print 'Type "model", "help(model)" for more information.'
except Exception as ex:
print >> sys.stderr, ex
|
StarcoderdataPython
|
176688
|
<reponame>DalavanCloud/owning-a-home-api<filename>ratechecker/tests/helpers.py<gh_stars>1-10
from cStringIO import StringIO
from datetime import date
from zipfile import ZipFile
from ratechecker.dataset import Dataset
def get_sample_cover_sheet(day=None):
day = day or date.today()
return (
'<data>'
'<ProcessDate><Date>{day}</Date></ProcessDate>'
'<Scenarios>'
'<Scenario>'
'<ScenarioNo>0</ScenarioNo>'
'<AdjustedRates>3.25</AdjustedRates>'
'<AdjustedPoints>1.75</AdjustedPoints>'
'</Scenario>'
'</Scenarios>'
'</data>'
).format(
day=day.strftime('%Y%m%d')
)
def get_sample_dataset_zipfile(day=None, datasets={}):
day = day or date.today()
f = StringIO()
zf = ZipFile(f, 'w')
zf.writestr('CoverSheet.xml', get_sample_cover_sheet(day=day))
if not datasets:
datestr = day.strftime('%Y%m%d')
datasets = {
'{}_{}.txt'.format(datestr, k): 'dummy'
for k in Dataset.loaders.keys()
}
for filename, contents in datasets.items():
zf.writestr(filename, contents)
zf.close()
f.seek(0)
return f
def write_sample_dataset(filename):
content = get_sample_dataset_zipfile().read()
with open(filename, 'wb') as f:
f.write(content)
def get_sample_dataset(day=None, datasets={}):
return Dataset(get_sample_dataset_zipfile(day=day, datasets=datasets))
|
StarcoderdataPython
|
4921201
|
#!/usr/bin/python
# Copyright (c) 2013, <NAME>.
# All rights reserved.
#
# Released under the BSD 2-Clause license as published at the link below.
# http://opensource.org/licenses/BSD-2-Clause
import datetime
import functools
import json
import logging
import re
import socket
import xmlrpclib
import bottle
import pkg_resources
from openvisualizer import VERSION, PACKAGE_NAME
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
st = logging.StreamHandler()
st.setFormatter(logging.Formatter(fmt='%(asctime)s %(levelname)s %(message)s', datefmt="%H:%M:%S"))
logger.addHandler(st)
# add default parameters to all bottle templates
bottle.view = functools.partial(bottle.view, ovVersion=VERSION)
class WebServer:
""" Provides web UI for OpenVisualizer."""
def __init__(self, bottle_srv, rpc_server_addr, debug):
"""
:param bottle_srv: Bottle server instance
"""
logger.debug('create instance')
# store params
self.rpc_server = xmlrpclib.ServerProxy('http://{}:{}'.format(*rpc_server_addr))
self.bottle_srv = bottle_srv
self._define_routes()
# To find page templates
templates_path = '/'.join(('client', 'web_files', 'templates'))
templates_path = pkg_resources.resource_filename(PACKAGE_NAME, templates_path)
bottle.TEMPLATE_PATH.append(templates_path)
# ======================== public ==========================================
# ======================== private =========================================
def _define_routes(self):
"""
Matches web URL to impelementing method. Cannot use @route annotations on the methods due to the class-based
implementation.
"""
self.bottle_srv.route(path='/', callback=self._show_moteview)
self.bottle_srv.route(path='/moteview', callback=self._show_moteview)
self.bottle_srv.route(path='/moteview/:moteid', callback=self._show_moteview)
self.bottle_srv.route(path='/motedata/:moteid', callback=self._get_mote_data)
self.bottle_srv.route(path='/toggleDAGroot/:moteid', callback=self._toggle_dagroot)
self.bottle_srv.route(path='/eventBus', callback=self._show_event_bus)
self.bottle_srv.route(path='/routing', callback=self._show_routing)
self.bottle_srv.route(path='/routing/dag', callback=self._show_dag)
self.bottle_srv.route(path='/connectivity', callback=self._show_connectivity)
self.bottle_srv.route(path='/connectivity/motes', callback=self._show_motes_connectivity)
self.bottle_srv.route(path='/eventdata', callback=self._get_event_data)
self.bottle_srv.route(path='/wiresharkDebug/:enabled', callback=self._set_wireshark_debug)
self.bottle_srv.route(path='/gologicDebug/:enabled', callback=WebServer._set_gologic_debug)
self.bottle_srv.route(path='/topology', callback=self._topology_page)
self.bottle_srv.route(path='/topology/data', callback=self._topology_data)
self.bottle_srv.route(path='/topology/download', callback=self._topology_download)
self.bottle_srv.route(path='/topology/motes', method='POST', callback=self._topology_motes_update)
self.bottle_srv.route(path='/topology/connections', method='PUT', callback=self._topology_connections_create)
self.bottle_srv.route(path='/topology/connections', method='POST', callback=self._topology_connections_update)
self.bottle_srv.route(path='/topology/connections', method='DELETE', callback=self._topology_connections_delete)
self.bottle_srv.route(path='/topology/route', method='GET', callback=self._topology_route_retrieve)
self.bottle_srv.route(path='/static/<filepath:path>', callback=WebServer._server_static)
@bottle.view('moteview.tmpl')
def _show_moteview(self, moteid=None):
"""
Collects the list of motes, and the requested mote to view.
:param moteid: 16-bit ID of mote (optional)
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug("moteview moteid parameter is {0}".format(moteid))
try:
mote_list = self.rpc_server.get_mote_dict().keys()
except socket.error as err:
logger.error(err)
return {}
tmpl_data = {
'motelist': mote_list,
'requested_mote': moteid if moteid else 'none',
}
return tmpl_data
@staticmethod
def _server_static(filepath):
static_path = '/'.join(('client', 'web_files', 'static'))
static_path = pkg_resources.resource_filename(PACKAGE_NAME, static_path)
return bottle.static_file(filepath, root=static_path)
def _toggle_dagroot(self, moteid):
"""
Triggers toggle DAGroot state, via MoteState. No real response. Page is updated when next retrieve mote data.
:param moteid: 16-bit ID of mote
"""
logger.debug('Toggle root status for moteid {0}'.format(moteid))
try:
ms = self.rpc_server.get_mote_state(moteid)
except xmlrpclib.Fault as err:
logger.error("A fault occurred: {}".format(err))
return '{"result" : "fail"}'
except socket.error as err:
logger.error(err)
return '{}'
if ms:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Found mote {0} in mote_states'.format(moteid))
try:
self.rpc_server.set_root(moteid)
except socket.error as err:
logger.error(err)
return '{}'
return '{"result" : "success"}'
else:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Mote {0} not found in mote_states'.format(moteid))
return '{"result" : "fail"}'
def _get_mote_data(self, moteid):
"""
Collects data for the provided mote.
:param moteid: 16-bit ID of mote
"""
states = {}
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Get JSON data for moteid {0}'.format(moteid))
try:
states = self.rpc_server.get_mote_state(moteid)
except xmlrpclib.Fault as err:
logger.error("Could not fetch mote state for mote {}: {}".format(moteid, err))
return states
except socket.error as err:
logger.error(err)
return {}
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Found mote {0} in mote_states'.format(moteid))
return states
def _set_wireshark_debug(self, enabled):
"""
Selects whether eventBus must export debug packets.
:param enabled: 'true' if enabled; any other value considered false
"""
logger.info('Enable wireshark debug: {0}'.format(enabled))
try:
if enabled.strip() == 'true':
_ = self.rpc_server.enable_wireshark_debug()
elif enabled.strip() == 'false':
_ = self.rpc_server.disable_wireshark_debug()
else:
logger.error('Illegal value for \'_set_wireshark_debug\'')
except xmlrpclib.Fault as err:
logger.error("Caught a server fault: {}".format(err))
except socket.error as err:
logger.error(err)
@staticmethod
def _set_gologic_debug(enabled):
logger.info('Enable GoLogic debug : {0}'.format(enabled))
# vcdlogger.VcdLogger().set_enabled(enabled == 'true')
return '{"result" : "success"}'
@bottle.view('eventBus.tmpl')
def _show_event_bus(self):
""" Simple page; data for the page template is identical to the data for periodic updates of event list. """
tmpl_data = self._get_event_data().copy()
return tmpl_data
def _show_dag(self):
try:
states, edges = self.rpc_server.get_dag()
except socket.error as err:
logger.error(err)
return {}
return {'states': states, 'edges': edges}
@bottle.view('connectivity.tmpl')
def _show_connectivity(self):
return {}
def _show_motes_connectivity(self):
try:
states, edges = self.rpc_server.get_motes_connectivity()
except socket.error as err:
logger.error(err)
return {}
return {'states': states, 'edges': edges}
@bottle.view('routing.tmpl')
def _show_routing(self):
return {}
@bottle.view('topology.tmpl')
def _topology_page(self):
""" Retrieve the HTML/JS page. """
return {}
def _topology_data(self):
""" Retrieve the topology data, in JSON format. """
data = {}
try:
data = self.rpc_server.get_network_topology()
except socket.error as err:
logger.error(err)
return data
def _topology_motes_update(self):
""" Update the network topology (simulation only)"""
motes_temp = {}
for (k, v) in bottle.request.forms.items():
m = re.match(r"motes\[([0-9]*)\]\[([a-z]*)\]", k)
assert m
index = int(m.group(1))
param = m.group(2)
try:
v = int(v)
except ValueError:
try:
v = float(v)
except ValueError:
pass
if index not in motes_temp:
motes_temp[index] = {}
motes_temp[index][param] = v
try:
_ = self.rpc_server.update_network_topology(json.dumps(motes_temp))
except socket.error as err:
logger.error(err)
def _topology_connections_create(self):
data = bottle.request.forms
assert sorted(data.keys()) == sorted(['fromMote', 'toMote'])
from_mote = int(data['fromMote'])
to_mote = int(data['toMote'])
try:
_ = self.rpc_server.create_motes_connection(from_mote, to_mote)
except socket.error as err:
logger.error(err)
def _topology_connections_update(self):
data = bottle.request.forms
assert sorted(data.keys()) == sorted(['fromMote', 'toMote', 'pdr'])
from_mote = int(data['fromMote'])
to_mote = int(data['toMote'])
pdr = float(data['pdr'])
try:
_ = self.rpc_server.update_motes_connection(from_mote, to_mote, pdr)
except socket.error as err:
logger.error(err)
def _topology_connections_delete(self):
data = bottle.request.forms
assert sorted(data.keys()) == sorted(['fromMote', 'toMote'])
from_mote = int(data['fromMote'])
to_mote = int(data['toMote'])
try:
_ = self.rpc_server.delete_motes_connection(from_mote, to_mote)
except socket.error as err:
logger.error(err)
def _topology_route_retrieve(self):
data = bottle.request.query
assert data.keys() == ['destination']
destination_eui = [0x14, 0x15, 0x92, 0xcc, 0x00, 0x00, 0x00, int(data['destination'])]
route = {}
try:
route = self.rpc_server.retrieve_routing_path(destination_eui)
except socket.error:
pass
return route
def _topology_download(self):
""" Retrieve the topology data, in JSON format, and download it. """
data = self._topology_data()
now = datetime.datetime.now()
try:
dagroot = self.rpc_server.get_dagroot()
except socket.error as err:
logger.error(err)
return {}
if dagroot is not None:
dagroot = ''.join('%02x' % b for b in dagroot)
data['DAGroot'] = dagroot
bottle.response.headers['Content-disposition'] = 'attachment; filename=topology_data_' + now.strftime(
"%d-%m-%y_%Hh%M") + '.json'
bottle.response.headers['filename'] = 'test.json'
bottle.response.headers['Content-type'] = 'application/json'
return data
def _get_event_data(self):
try:
res = {
'isDebugPkts': 'true' if self.rpc_server.get_wireshark_debug() else 'false',
'stats': self.rpc_server.get_ebm_stats(),
}
except socket.error as err:
logger.error(err)
return {}
return res
|
StarcoderdataPython
|
6498465
|
import re
import textwrap
import unicodedata
import Default.comment
import sublime
import sublime_plugin
def previous_line(view, sr):
"""sr should be a Region covering the entire hard line"""
if sr.begin() == 0:
return None
else:
return view.full_line(sr.begin() - 1)
def next_line(view, sr):
"""sr should be a Region covering the entire hard line, including
the newline"""
if sr.end() == view.size():
return None
else:
return view.full_line(sr.end())
separating_line_pattern = re.compile("^[\\t ]*\\n?$")
def is_paragraph_separating_line(view, sr):
return separating_line_pattern.match(view.substr(sr)) is not None
def has_prefix(view, line, prefix):
if not prefix:
return True
line_start = view.substr(sublime.Region(line.begin(), line.begin() + len(prefix)))
return line_start == prefix
def expand_to_paragraph(view, tp):
sr = view.full_line(tp)
if is_paragraph_separating_line(view, sr):
return sublime.Region(tp, tp)
required_prefix = None
# If the current line starts with a comment, only select lines that are also
# commented
(line_comments, block_comments) = Default.comment.build_comment_data(view, tp)
dataStart = Default.comment.advance_to_first_non_white_space_on_line(view, sr.begin())
for c in line_comments:
(start, disable_indent) = c
comment_region = sublime.Region(dataStart, dataStart + len(start))
if view.substr(comment_region) == start:
required_prefix = view.substr(sublime.Region(sr.begin(), comment_region.end()))
break
first = sr.begin()
prev = sr
while True:
prev = previous_line(view, prev)
if (prev is None or is_paragraph_separating_line(view, prev) or
not has_prefix(view, prev, required_prefix)):
break
else:
first = prev.begin()
last = sr.end()
next = sr
while True:
next = next_line(view, next)
if (next is None or is_paragraph_separating_line(view, next) or
not has_prefix(view, next, required_prefix)):
break
else:
last = next.end()
return sublime.Region(first, last)
def all_paragraphs_intersecting_selection(view, sr):
paragraphs = []
para = expand_to_paragraph(view, sr.begin())
if not para.empty():
paragraphs.append(para)
while True:
line = next_line(view, para)
if line is None or line.begin() >= sr.end():
break
if not is_paragraph_separating_line(view, line):
para = expand_to_paragraph(view, line.begin())
paragraphs.append(para)
else:
para = line
return paragraphs
class OldExpandSelectionToParagraphCommand(sublime_plugin.TextCommand):
def run(self, edit):
regions = []
for s in self.view.sel():
regions.append(sublime.Region(
expand_to_paragraph(self.view, s.begin()).begin(),
expand_to_paragraph(self.view, s.end()).end()))
for r in regions:
self.view.sel().add(r)
class OldWrapLinesCommand(sublime_plugin.TextCommand):
def extract_prefix(self, sr):
lines = self.view.split_by_newlines(sr)
if len(lines) == 0:
return None
prefix = ''
for char in self.view.substr(lines[0]):
cat = unicodedata.category(char)[0]
if ord(char) > 32 and cat != 'Z' and cat != 'P':
break
prefix += char
if not prefix:
return None
for line in lines[1:]:
if self.view.substr(sublime.Region(line.begin(), line.begin() + len(prefix))) != prefix:
return None
return prefix
def width_in_spaces(self, str, tab_width):
sum = 0
for c in str:
if c == '\t':
sum += tab_width - 1
return sum
def run(self, edit, width=0):
if width == 0 and self.view.settings().get("wrap_width"):
try:
width = int(self.view.settings().get("wrap_width"))
except TypeError:
pass
if width == 0 and self.view.settings().get("rulers"):
# try and guess the wrap width from the ruler, if any
try:
width = int(self.view.settings().get("rulers")[0])
except ValueError:
pass
except TypeError:
pass
if width == 0:
width = 78
# Make sure tabs are handled as per the current buffer
tab_width = 8
if self.view.settings().get("tab_size"):
try:
tab_width = int(self.view.settings().get("tab_size"))
except TypeError:
pass
if tab_width == 0:
tab_width == 8
paragraphs = []
for s in self.view.sel():
paragraphs.extend(all_paragraphs_intersecting_selection(self.view, s))
if len(paragraphs) > 0:
self.view.sel().clear()
for p in paragraphs:
self.view.sel().add(p)
# This isn't an ideal way to do it, as we loose the position of the
# cursor within the paragraph: hence why the paragraph is selected
# at the end.
for s in self.view.sel():
wrapper = textwrap.TextWrapper()
wrapper.expand_tabs = False
wrapper.width = width
prefix = self.extract_prefix(s)
if prefix:
wrapper.initial_indent = prefix
wrapper.subsequent_indent = prefix
wrapper.width -= self.width_in_spaces(prefix, tab_width)
if wrapper.width < 0:
continue
txt = self.view.substr(s)
if prefix:
txt = txt.replace(prefix, u"")
txt = txt.expandtabs(tab_width)
txt = wrapper.fill(txt) + u"\n"
self.view.replace(edit, s, txt)
# It's unhelpful to have the entire paragraph selected, just leave the
# selection at the end
ends = [s.end() - 1 for s in self.view.sel()]
self.view.sel().clear()
for pt in ends:
self.view.sel().add(sublime.Region(pt))
|
StarcoderdataPython
|
5124507
|
import time
import json
import random
import paho.mqtt.client as mqtt
from threading import Timer
from ruuvitag_sensor.ruuvi import RuuviTagSensor
MQTTHOST = "mqtt.preview.oltd.de"
MQTTPORT = 8883
client = mqtt.Client('rpi-gateway_%d' % (random.randint(0, 1024)))
client.tls_set(ca_certs='chain.pem', certfile='device_cert.pem', keyfile='device_key.pem')
def on_mqtt_log(client, userdata, level, buf):
print(client, userdata, level, buf)
def on_mqtt_connect(client, userdata, flags, rc):
print("Connected to MQTT server {} with result code {} ({}).".format(MQTTHOST, mqtt.connack_string(rc), rc))
def on_mqtt_disconnect(client, userdata, rc):
print("Disconnected with code {} ({})".format(mqtt.connack_string(rc), rc))
def on_mqtt_message(client, userdata, msg):
print("Got: " + msg.topic+" "+str(msg.payload))
def push_data_ruuvitag():
msg = {
"type": "attributes",
"value": {
"temperature": 17,
"pressure": 16,
"humidity": 15,
"battery": 14,
"acceleration": {
"x": 10,
"y": 9,
"z": 8
}
}
}
print(client.publish("data-ingest", payload=json.dumps(msg).encode('UTF-8')))
print(msg)
t = Timer(2, push_data_ruuvitag)
t.start()
client.on_connect = on_mqtt_connect
client.on_message = on_mqtt_message
client.on_disconnect = on_mqtt_disconnect
client.on_log = on_mqtt_log
client.connect_async(MQTTHOST, MQTTPORT)
client.loop_start()
t = Timer(2, push_data_ruuvitag)
t.start()
while True:
time.sleep(1)
|
StarcoderdataPython
|
224
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All KeyTypes and which languages support them."""
# Placeholder for import for type annotations
from tink import aead
from tink import daead
from tink import hybrid
from tink import mac
from tink import prf
from tink import signature
from tink import streaming_aead
from tink.proto import tink_pb2
# All languages supported by cross-language tests.
ALL_LANGUAGES = ['cc', 'java', 'go', 'python']
# All KeyTypes (without the prefix 'type.googleapis.com/google.crypto.tink.')
AEAD_KEY_TYPES = [
'AesEaxKey',
'AesGcmKey',
'AesGcmSivKey',
'AesCtrHmacAeadKey',
'ChaCha20Poly1305Key',
'XChaCha20Poly1305Key',
]
DAEAD_KEY_TYPES = ['AesSivKey']
STREAMING_AEAD_KEY_TYPES = [
'AesCtrHmacStreamingKey',
'AesGcmHkdfStreamingKey',
]
HYBRID_PRIVATE_KEY_TYPES = ['EciesAeadHkdfPrivateKey']
MAC_KEY_TYPES = [
'AesCmacKey',
'HmacKey',
]
SIGNATURE_KEY_TYPES = [
'EcdsaPrivateKey',
'Ed25519PrivateKey',
'RsaSsaPkcs1PrivateKey',
'RsaSsaPssPrivateKey',
]
PRF_KEY_TYPES = [
'AesCmacPrfKey',
'HmacPrfKey',
'HkdfPrfKey',
]
ALL_KEY_TYPES = (
AEAD_KEY_TYPES + DAEAD_KEY_TYPES + STREAMING_AEAD_KEY_TYPES +
HYBRID_PRIVATE_KEY_TYPES + MAC_KEY_TYPES + SIGNATURE_KEY_TYPES +
PRF_KEY_TYPES)
# All languages that are supported by a KeyType
SUPPORTED_LANGUAGES = {
'AesEaxKey': ['cc', 'java', 'python'],
'AesGcmKey': ['cc', 'java', 'go', 'python'],
'AesGcmSivKey': ['cc', 'python'],
'AesCtrHmacAeadKey': ['cc', 'java', 'go', 'python'],
'ChaCha20Poly1305Key': ['java', 'go'],
'XChaCha20Poly1305Key': ['cc', 'java', 'go', 'python'],
'AesSivKey': ['cc', 'java', 'go', 'python'],
'AesCtrHmacStreamingKey': ['cc', 'java', 'go', 'python'],
'AesGcmHkdfStreamingKey': ['cc', 'java', 'go', 'python'],
'EciesAeadHkdfPrivateKey': ['cc', 'java', 'go', 'python'],
'AesCmacKey': ['cc', 'java', 'go', 'python'],
'HmacKey': ['cc', 'java', 'go', 'python'],
'EcdsaPrivateKey': ['cc', 'java', 'go', 'python'],
'Ed25519PrivateKey': ['cc', 'java', 'go', 'python'],
'RsaSsaPkcs1PrivateKey': ['cc', 'java', 'python'],
'RsaSsaPssPrivateKey': ['cc', 'java', 'python'],
'AesCmacPrfKey': ['cc', 'java', 'go', 'python'],
'HmacPrfKey': ['cc', 'java', 'go', 'python'],
'HkdfPrfKey': ['cc', 'java', 'go', 'python'],
}
KEY_TYPE_FROM_URL = {
'type.googleapis.com/google.crypto.tink.' + key_type: key_type
for key_type in ALL_KEY_TYPES}
# For each KeyType, a list of all KeyTemplate Names that must be supported.
KEY_TEMPLATE_NAMES = {
'AesEaxKey': ['AES128_EAX', 'AES256_EAX'],
'AesGcmKey': ['AES128_GCM', 'AES256_GCM'],
'AesGcmSivKey': ['AES128_GCM_SIV', 'AES256_GCM_SIV'],
'AesCtrHmacAeadKey': ['AES128_CTR_HMAC_SHA256', 'AES256_CTR_HMAC_SHA256'],
'ChaCha20Poly1305Key': ['CHACHA20_POLY1305'],
'XChaCha20Poly1305Key': ['XCHACHA20_POLY1305'],
'AesSivKey': ['AES256_SIV'],
'AesCtrHmacStreamingKey': [
'AES128_CTR_HMAC_SHA256_4KB',
'AES256_CTR_HMAC_SHA256_4KB',
],
'AesGcmHkdfStreamingKey': [
'AES128_GCM_HKDF_4KB',
'AES256_GCM_HKDF_4KB',
'AES256_GCM_HKDF_1MB',
],
'EciesAeadHkdfPrivateKey': [
'ECIES_P256_HKDF_HMAC_SHA256_AES128_GCM',
'ECIES_P256_HKDF_HMAC_SHA256_AES128_CTR_HMAC_SHA256'
],
'AesCmacKey': ['AES_CMAC'],
'HmacKey': [
'HMAC_SHA256_128BITTAG', 'HMAC_SHA256_256BITTAG',
'HMAC_SHA512_256BITTAG', 'HMAC_SHA512_512BITTAG'
],
'EcdsaPrivateKey': [
'ECDSA_P256', 'ECDSA_P384', 'ECDSA_P384_SHA384', 'ECDSA_P521',
'ECDSA_P256_IEEE_P1363', 'ECDSA_P384_IEEE_P1363',
'ECDSA_P384_SHA384_IEEE_P1363', 'ECDSA_P521_IEEE_P1363'
],
'Ed25519PrivateKey': ['ED25519'],
'RsaSsaPkcs1PrivateKey': [
'RSA_SSA_PKCS1_3072_SHA256_F4', 'RSA_SSA_PKCS1_4096_SHA512_F4'
],
'RsaSsaPssPrivateKey': [
'RSA_SSA_PSS_3072_SHA256_SHA256_32_F4',
'RSA_SSA_PSS_4096_SHA512_SHA512_64_F4'
],
'AesCmacPrfKey': ['AES_CMAC_PRF'],
'HmacPrfKey': ['HMAC_PRF_SHA256', 'HMAC_PRF_SHA512'],
'HkdfPrfKey': ['<KEY>'],
}
# KeyTemplate (as Protobuf) for each KeyTemplate name.
KEY_TEMPLATE = {
'AES128_EAX':
aead.aead_key_templates.AES128_EAX,
'AES256_EAX':
aead.aead_key_templates.AES256_EAX,
'AES128_GCM':
aead.aead_key_templates.AES128_GCM,
'AES256_GCM':
aead.aead_key_templates.AES256_GCM,
'AES128_GCM_SIV':
aead.aead_key_templates.AES128_GCM_SIV,
'AES256_GCM_SIV':
aead.aead_key_templates.AES256_GCM_SIV,
'AES128_CTR_HMAC_SHA256':
aead.aead_key_templates.AES128_CTR_HMAC_SHA256,
'AES256_CTR_HMAC_SHA256':
aead.aead_key_templates.AES256_CTR_HMAC_SHA256,
'CHACHA20_POLY1305':
tink_pb2.KeyTemplate(
type_url=('type.googleapis.com/google.crypto.tink.' +
'ChaCha20Poly1305Key'),
output_prefix_type=tink_pb2.TINK),
'XCHACHA20_POLY1305':
aead.aead_key_templates.XCHACHA20_POLY1305,
'AES256_SIV':
daead.deterministic_aead_key_templates.AES256_SIV,
'AES128_CTR_HMAC_SHA256_4KB':
streaming_aead.streaming_aead_key_templates.AES128_CTR_HMAC_SHA256_4KB,
'AES256_CTR_HMAC_SHA256_4KB':
streaming_aead.streaming_aead_key_templates.AES256_CTR_HMAC_SHA256_4KB,
'AES128_GCM_HKDF_4KB':
streaming_aead.streaming_aead_key_templates.AES128_GCM_HKDF_4KB,
'AES256_GCM_HKDF_4KB':
streaming_aead.streaming_aead_key_templates.AES256_GCM_HKDF_4KB,
'AES256_GCM_HKDF_1MB':
streaming_aead.streaming_aead_key_templates.AES256_GCM_HKDF_1MB,
'ECIES_P256_HKDF_HMAC_SHA256_AES128_GCM':
hybrid.hybrid_key_templates.ECIES_P256_HKDF_HMAC_SHA256_AES128_GCM,
'ECIES_P256_HKDF_HMAC_SHA256_AES128_CTR_HMAC_SHA256':
hybrid.hybrid_key_templates
.ECIES_P256_HKDF_HMAC_SHA256_AES128_CTR_HMAC_SHA256,
'AES_CMAC':
mac.mac_key_templates.AES_CMAC,
'HMAC_SHA256_128BITTAG':
mac.mac_key_templates.HMAC_SHA256_128BITTAG,
'HMAC_SHA256_256BITTAG':
mac.mac_key_templates.HMAC_SHA256_256BITTAG,
'HMAC_SHA512_256BITTAG':
mac.mac_key_templates.HMAC_SHA512_256BITTAG,
'HMAC_SHA512_512BITTAG':
mac.mac_key_templates.HMAC_SHA512_512BITTAG,
'ECDSA_P256':
signature.signature_key_templates.ECDSA_P256,
'ECDSA_P384':
signature.signature_key_templates.ECDSA_P384,
'ECDSA_P384_SHA384':
signature.signature_key_templates.ECDSA_P384_SHA384,
'ECDSA_P521':
signature.signature_key_templates.ECDSA_P521,
'ECDSA_P256_IEEE_P1363':
signature.signature_key_templates.ECDSA_P256_IEEE_P1363,
'ECDSA_P384_IEEE_P1363':
signature.signature_key_templates.ECDSA_P384_IEEE_P1363,
'ECDSA_P384_SHA384_IEEE_P1363':
signature.signature_key_templates.ECDSA_P384_SHA384_IEEE_P1363,
'ECDSA_P521_IEEE_P1363':
signature.signature_key_templates.ECDSA_P521_IEEE_P1363,
'ED25519':
signature.signature_key_templates.ED25519,
'RSA_SSA_PKCS1_3072_SHA256_F4':
signature.signature_key_templates.RSA_SSA_PKCS1_3072_SHA256_F4,
'RSA_SSA_PKCS1_4096_SHA512_F4':
signature.signature_key_templates.RSA_SSA_PKCS1_4096_SHA512_F4,
'RSA_SSA_PSS_3072_SHA256_SHA256_32_F4':
signature.signature_key_templates.RSA_SSA_PSS_3072_SHA256_SHA256_32_F4,
'RSA_SSA_PSS_4096_SHA512_SHA512_64_F4':
signature.signature_key_templates.RSA_SSA_PSS_4096_SHA512_SHA512_64_F4,
'AES_CMAC_PRF':
prf.prf_key_templates.AES_CMAC,
'HMAC_PRF_SHA256':
prf.prf_key_templates.HMAC_SHA256,
'HMAC_PRF_SHA512':
prf.prf_key_templates.HMAC_SHA512,
'HKDF_PRF_SHA256':
prf.prf_key_templates.HKDF_SHA256,
}
SUPPORTED_LANGUAGES_BY_TEMPLATE_NAME = {
name: SUPPORTED_LANGUAGES[KEY_TYPE_FROM_URL[template.type_url]]
for name, template in KEY_TEMPLATE.items()
}
|
StarcoderdataPython
|
8031330
|
<gh_stars>0
#!/usr/bin/env python3
# Copyright 2021 Unicorn
# See LICENSE file for licensing details.
#
# Learn more at: https://juju.is/docs/sdk
"""Prometheus-bind-exporter as charm the service.
"""
import logging
import subprocess
from ops.charm import CharmBase
from ops.framework import StoredState
from ops.main import main
from ops.model import ActiveStatus, MaintenanceStatus
logger = logging.getLogger(__name__)
DEFAULT_LISTEN_PORT = 9119
DEFAULT_STATS_GROUPS = "server,view,tasks"
class PrometheusBindExporterOperatorCharm(CharmBase):
"""Charm the service."""
_stored = StoredState()
def __init__(self, *args):
super().__init__(*args)
self.framework.observe(self.on.install, self._on_install)
self.framework.observe(self.on.config_changed, self._on_config_changed)
self._stored.set_default(listen_port=DEFAULT_LISTEN_PORT,
stats_groups=DEFAULT_STATS_GROUPS,)
self.unit.status = ActiveStatus("Unit is ready")
def _on_install(self, _):
"""Installation hook that installs prometheus-bind-exporter daemon."""
self.unit.status = MaintenanceStatus("Installing prometheus-bind-exporter")
snap_file = self.model.resources.fetch("prometheus-bind-exporter")
subprocess.check_call(["snap", "install", "--dangerous", snap_file])
self._manage_prometheus_bind_exporter_service()
self.unit.status = ActiveStatus("Unit is ready")
def _on_config_changed(self, _):
"""Config change hook."""
self.unit.status = MaintenanceStatus("prometheus-bind-exporter configuration")
self._stored.listen_port = self.config.get("exporter-listen-port")
self._stored.stats_groups = self.config.get("exporter-stats-groups")
self._manage_prometheus_bind_exporter_service()
self.unit.status = ActiveStatus("Unit is ready")
def _manage_prometheus_bind_exporter_service(self):
"""Manage the prometheus-bind-exporter service."""
logger.debug("prometheus-bind-exporter configuration in progress")
private_address = self.model.get_binding("designate-bind").network.bind_address
subprocess.check_call([
"snap", "set", "prometheus-bind-exporter",
f"web.listen-address={private_address or ''}:{self._stored.listen_port}",
f"web.stats-groups={self._stored.stats_groups}"
])
logger.info("prometheus-bind-exporter has been reconfigured")
if __name__ == "__main__":
main(PrometheusBindExporterOperatorCharm)
|
StarcoderdataPython
|
11338659
|
<filename>michelanglo_app/views/venus/venus_steps.py
from __future__ import annotations
from time import sleep
from typing import List
from michelanglo_protein import ProteinAnalyser, Mutation, ProteinCore, Structure, is_alphafold_taxon # noqa
from michelanglo_transpiler import PyMolTranspiler # used solely for temp folder
from .venus_base import VenusException, VenusBase
from ..buffer import system_storage
from ..common_methods import Comms
from ..uniprot_data import *
from ...models import User # needed solely for log.
log = logging.getLogger(__name__)
class VenusSteps(VenusBase):
"""
This class does the analysis steps but does not deal with the dispatching
"""
# ===== STEP 1 =======
def protein_step(self):
"""
Check mutations are valid
"""
self.start_timer()
if self.has(): # this is already done?
protein = system_storage[self.handle]
else:
log.info(f'Step 1 analysis requested by {User.get_username(self.request)}')
uniprot = self.request.params['uniprot']
taxid = self.request.params['species']
mutation_text = self.request.params['mutation']
# ## Do analysis
mutation = Mutation(mutation_text)
protein = ProteinAnalyser(uniprot=uniprot,
taxid=taxid)
assert protein.exists(), f'{uniprot} of {taxid} is absent'
protein.load()
protein.mutation = mutation
setattr(protein, 'current_step_complete', False) # venus specific
# assess
if not protein.check_mutation():
log.info('protein mutation discrepancy error')
discrepancy = protein.mutation_discrepancy()
self.reply = {**self.reply,
'error': 'mutation',
'msg': discrepancy,
'status': 'error'}
raise VenusException(discrepancy)
else:
system_storage[self.handle] = protein
self.reply['protein'] = self.jsonable(protein)
protein.current_step_complete = True
self.stop_timer()
# ======= STEP 2 =======
def mutation_step(self) -> None:
"""
Runs protein.predict_effect()
"""
self.start_timer()
log.info(f'Step 2 analysis requested by {User.get_username(self.request)}')
# ## has the previous step been done?
if not self.has():
self.protein_step()
# if protein.mutation has already run it still does it again...
# no shortcut useful.
protein = system_storage[self.handle]
protein.current_step_complete = False
protein.predict_effect()
featpos = protein.get_features_at_position(protein.mutation.residue_index)
featnear = protein.get_features_near_position(protein.mutation.residue_index)
pos_percent = round(protein.mutation.residue_index / len(protein) * 100)
self.reply['mutation'] = {**self.jsonable(protein.mutation),
'features_at_mutation': featpos,
'features_near_mutation': featnear,
'position_as_protein_percent': pos_percent,
'gnomAD_near_mutation': protein.get_gnomAD_near_position()}
protein.current_step_complete = True
self.stop_timer()
# ======= STEP 3 =======
def structural_step(self, structure=None, retrieve=True):
"""
runs protein.analyse_structure() iteratively until it works.
"""
self.start_timer()
log.info(f'Step 3 analysis requested by {User.get_username(self.request)}')
# previous done?
if not self.has():
self.mutation_step()
protein = system_storage[self.handle]
# this is slightly odd because structural None is either not done or no model
if self.has('structural'):
self.reply['structural'] = self.jsonable(protein.structural)
self.reply['has_structure'] = True
elif protein.current_step_complete is False:
while protein.current_step_complete is False: # polling
log.debug('Waiting for structural_step')
sleep(5)
return self.structural_step(structure, retrieve) # retry
else:
# this step has not been run before
protein.current_step_complete = False
if structure is not None: # user submitted structure.
protein.analyse_structure(structure=structure,
**self.get_user_modelling_options()
)
else:
self.structural_workings(protein, retrieve)
if protein.structural:
self.reply['structural'] = self.jsonable(protein.structural)
self.reply['has_structure'] = True
else:
log.info('No structural data available')
self.reply['status'] = 'terminated'
self.reply['error'] = 'No crystal structures or models available.'
self.reply['msg'] = 'Structrual analyses cannot be performed.'
self.reply['has_structure'] = False
raise VenusException(self.reply['msg'])
protein.current_step_complete = True
self.stop_timer()
# ===== Step 4 =====
def ddG_step(self): # noqa ddG is not camelcase but abbreviation which is PEP8 compliant.
self.start_timer()
log.info(f'Step 4 analysis requested by {User.get_username(self.request)}')
# ------- get protein
if self.handle not in system_storage:
for fun in (self.protein_step, self.mutation_step, self.structural_step):
fun()
if 'error' in self.reply:
return self.reply
protein = system_storage[self.handle]
# -------- analyse
if self.has('energetics'):
analysis = protein.energetics
elif protein.current_step_complete is False:
while protein.current_step_complete is False: # polling
log.debug('Waiting for ddG_step')
sleep(5)
return self.ddG_step() # retry
else:
protein.current_step_complete = False
applicable_keys = ('scorefxn_name', 'outer_constrained', 'remove_ligands',
'neighbour_only_score',
'scaling_factor', 'prevent_acceptance_of_incrementor',
'single_chain', 'radius', 'cycles')
user_options = self.get_user_modelling_options()
options = {k: v for k, v in user_options.items() if k in applicable_keys}
# radius and cycle minima are applied already
analysis = protein.analyse_FF(**options, spit_process=True)
if analysis is None:
self.log_if_error('pyrosetta step', 'likely segfault')
elif 'error' in analysis:
self.log_if_error('pyrosetta step', analysis)
else:
self.reply['ddG'] = analysis
# {ddG: float, scores: Dict[str, float], native:str, mutant:str, rmsd:int}
protein.current_step_complete = True
self.stop_timer()
# ====== Step 5 ======
def ddG_gnomad_step(self):
log.info(f'Step 5 analysis requested by {User.get_username(self.request)}')
if self.handle not in system_storage:
self.protein_step()
if 'error' in self.reply:
return self.reply['error']
self.mutation_step()
if 'error' in self.reply:
return self.reply['error']
protein = system_storage[self.handle]
if self.has('energetics_gnomAD'):
analysis = protein.energetics_gnomAD
elif protein.current_step_complete is False:
while protein.current_step_complete is False: # polling
log.debug('Waiting for ddG_gnomad_step')
sleep(5)
return self.ddG_gnomad_step() # retry
else:
protein.current_step_complete = False
applicable_keys = ('scorefxn_name', 'outer_constrained', 'remove_ligands',
'scaling_factor',
'single_chain', 'cycles', 'radius')
options = {k: v for k, v in self.get_user_modelling_options().items() if k in applicable_keys}
# speedy
options['cycles'] = 1
options['radius'] = min(6, options['radius'] if 'radius' in options else 6)
analysis = protein.analyse_gnomad_FF(**options, spit_process=True)
if analysis is None:
analysis = dict(error='likely segfault', msg='likely segfault')
if 'error' in analysis: # it is a failure.
self.log_if_error('ddG_gnomad_step', analysis)
self.reply['status'] = 'error'
self.reply['error'] = 'pyrosetta step'
self.reply['msg'] = analysis['error']
else:
self.reply['gnomAD_ddG'] = analysis
protein.current_step_complete = True
# ======= STEP EXTRA =======
def extra_step(self, mutation, algorithm):
if self.has():
self.ddG_step()
protein = system_storage[self.handle]
protein.current_step_complete = False
log.info(f'Extra analysis ({algorithm}) requested by {User.get_username(self.request)}')
applicable_keys = ('scorefxn_name', 'outer_constrained', 'remove_ligands',
'scaling_factor',
'single_chain', 'cycles', 'radius')
options = {k: v for k, v in self.get_user_modelling_options().items() if k in applicable_keys}
self.reply = {**self.reply,
**protein.analyse_other_FF(**options, mutation=mutation, algorithm=algorithm, spit_process=True)}
self.log_if_error('extra_step')
protein.current_step_complete = True
# ========= STEP EXTRA2 =========
def phospho_step(self):
if self.has():
self.ddG_step()
protein = system_storage[self.handle]
protein.current_step_complete = False
log.info(f'Phosphorylation requested by {User.get_username(self.request)}')
coordinates = protein.phosphorylate_FF(spit_process=True)
if isinstance(coordinates, str):
self.reply['coordinates'] = coordinates
elif isinstance(coordinates, dict):
self.reply = {**self.reply, **coordinates} # it is an error msg!
else:
self.reply = {**self.reply, 'status': 'error', 'error': 'Unknown', 'msg': 'No coordinates returned'}
self.log_if_error('phospho_step')
protein.current_step_complete = True
# ========= CHANGE STEP =========
def change_to_file(self, block, name, params: List[str] = ()):
# params is either None or a list of topology files
if self.has():
self.mutation_step()
protein = system_storage[self.handle]
protein.structural = None
protein.energetics = None
protein.current_step_complete = False
protein.rosetta_params_filenames = params
title, ext = os.path.splitext(name)
structure = Structure(title, 'Custom', 0, 9999, title,
type='custom', chain="A", offset=0, coordinates=block)
structure.is_satisfactory(protein.mutation.residue_index)
self.structural_step(structure=structure)
protein.current_step_complete = True
return self.reply
# ----- inner methods ----------------------------------------------------------------------------------------------
def get_user_modelling_options(self):
"""
User dictated choices.
Note `debug` is in the VenusBase.__init__
"""
user_modelling_options = {'allow_pdb': True,
'allow_swiss': True,
'allow_alphafold': True,
'scaling_factor': 0.239, # this is the kJ/mol <--> kcal/mol "mystery" value
'no_conservation': False, # Consurf SSL issue --> True
}
# ------ booleans
for key in ['allow_pdb',
'allow_swiss',
'allow_alphafold',
'outer_constrained',
'neighbour_only_score',
'prevent_acceptance_of_incrementor',
'remove_ligands',
'single_chain',
'scaling_factor']:
if key not in self.request.params:
pass
else:
user_modelling_options[key] = self.request.params[key] not in (False, 0, 'false', '0')
# ------ floats
for key in ['swiss_oligomer_identity_cutoff', 'swiss_monomer_identity_cutoff',
'swiss_oligomer_qmean_cutoff', 'swiss_monomer_qmean_cutoff']:
if key not in self.request.params:
pass # defaults from defaults in protein class. This must be an API call.
else:
user_modelling_options[key] = float(self.request.params[key])
# ----- for ddG calculations.
for key, minimum, maximum in (('cycles', 1, 5), ('radius', 8, 15)):
if key in self.request.params:
user_modelling_options[key] = max(minimum, min(maximum, int(self.request.params[key])))
else:
user_modelling_options[key] = minimum
log.debug(f'No {key} provided...')
# scorefxn... More are okay... but I really do not wish for users to randomly use these.
allowed_names = ('ref2015', 'beta_july15', 'beta_nov16',
'ref2015_cart', 'beta_july15_cart', 'beta_nov16_cart')
if 'scorefxn_name' in self.request.params and self.request.params['scorefxn_name'] in allowed_names:
user_modelling_options['scorefxn_name'] = self.request.params['scorefxn_name']
if 'custom_filename' in self.request.params:
# this is required to make the hash unique.
user_modelling_options['custom_filename'] = self.request.params['custom_filename']
return user_modelling_options
def structural_workings(self, protein, retrieve):
"""
Inner function of step 3 structural_step
"""
user_modelling_options = self.get_user_modelling_options()
# try options
# do not use the stored values of pdbs, but get the swissmodel ones (more uptodate)
if retrieve:
# if it is not a valid species nothing happens.
# if the gene is not valid... then this record is wrong.
protein.add_alphafold2() # protein::alphafold2_retrieval::FromAlphaFold2
# swissmodel
try:
if user_modelling_options['allow_pdb'] or user_modelling_options['allow_swiss']:
# The following _retrieves_ but also adds to self:
protein.retrieve_structures_from_swissmodel() # protein::swissmodel_retrieval::FromSwissmodel
except Exception as error:
if not self.suppress_errors:
raise error
msg = f'Swissmodel retrieval step failed: {error.__class__.__name__}: {error}'
log.critical(msg)
Comms.notify_admin(msg)
self.reply['warnings'].append('Retrieval of latest PDB data failed (admin notified). ' +
'Falling back onto stored data.')
chosen_structure = None
try:
chosen_structure = protein.get_best_model(**user_modelling_options)
# IMPORTANT PART ****************************************************
# ------------- find the best and analyse it
protein.analyse_structure(structure=chosen_structure,
**user_modelling_options)
# *******************************************************************
except Exception as error:
if not self.suppress_errors or chosen_structure is None:
raise error
# ConnectionError: # failed to download model...
if protein.swissmodel.count(chosen_structure) != 0:
protein.swissmodel.remove(chosen_structure)
source = 'Swissmodel'
elif protein.pdbs.count(chosen_structure) != 0:
protein.pdbs.remove(chosen_structure)
source = 'RCSB PDB'
elif protein.alphafold2.count(chosen_structure) != 0:
protein.alphafold2.remove(chosen_structure)
source = 'AlphaFold2'
msg = 'AlphaFold2 model was problematic'
log.critical(msg) # ideally should message admin.
else:
raise ValueError('structure from mystery source')
# ---- logging
if isinstance(error, ConnectionError):
msg = f'{source} {chosen_structure} could not be downloaded'
log.info(msg)
self.reply['warnings'].append(msg)
elif isinstance(error, ValueError):
msg = f'Residue missing in structure ({source}): {chosen_structure.code} ({error})'
log.info(msg)
self.reply['warnings'].append(msg)
else:
# this should not happen in step 3.
# causes: PDB given was a fake. How does that happen?
# causes: the PBD given was too big. e.g. 7MQ8
msg = f'Major issue ({error.__class__.__name__}) with model {chosen_structure.code} ({error})'
self.reply['warnings'].append(msg)
log.warning(msg)
Comms.notify_admin(msg)
# ---- repeat
self.structural_step(retrieve=False)
def save_params(self) -> List[str]:
"""
Confusingly, by params I mean Rosetta topology files
This could be done without saving to disk (cf. rdkit_to_params module)... One day will be fixed.
saves params : str to params as filenames
"""
if 'params' in self.request.params:
params = self.request.params['params']
elif 'params[]' in self.request.params:
params = self.request.params.getall('params')
else:
params = []
temp_folder = PyMolTranspiler.temporary_folder
# TODO In many places the data is stored in the module!
# In module page there is biggest offender.
if not os.path.exists(temp_folder):
os.mkdir(temp_folder)
files = []
for param in params: # it should be paramses
n = len(os.listdir(temp_folder))
file = os.path.join(temp_folder, f'{n:0>3}.params')
with open(file, 'w') as w:
w.write(param)
files.append(file)
return files
|
StarcoderdataPython
|
6470640
|
<gh_stars>0
__title__ = 'unpack_pixels'
__author__ = '<NAME>'
__license__ = 'MIT'
__copyright__ = 'Copyright 2021, <NAME>'
|
StarcoderdataPython
|
162483
|
<filename>Curso Python/Mundo 1/Modulo3/Desafios/Desafio2/des022.py
nome = str(input('Digite o seu nome completo: '))
nomelista = nome.split()
print('O nome em maiúscula: {}'.format(nome.upper()))
print('O nome em minúscula: {}'.format(nome.lower()))
print('O total de letras são {} letras'.format(len(nome.replace(' ', ''))))
print('O primeiro nome tem {} letras'.format(len(nomelista[0])))
|
StarcoderdataPython
|
5065891
|
<filename>src/processing/cloudclipper.py
import numpy as np
from skimage.measure import points_in_poly
class CloudClipper():
@staticmethod
def factory(method, **kwargs):
if method == "polar":
return PolarClipper(**kwargs)
elif method == "cartesian":
return CartesianClipper(**kwargs)
elif method == "polygon":
return PolygonalClipper(**kwargs)
else:
ValueError(method)
class PolarClipper():
def __init__(self, azimuth_range,
fov, distance_range, z_range, inverse=False):
self.azimuth_range = azimuth_range
self.fov = fov
self.distance_range = distance_range
self.z_range = z_range
self.inverse = inverse
def clip(self, data):
# Not implemented
return data
def get_config(self):
return {}
class CartesianClipper():
def __init__(self, x_range, y_range,
z_range, inverse=False):
# verify ranges are sorted
self.x_range = x_range
self.y_range = y_range
self.z_range = z_range
self.inverse = inverse
def clip(self, data):
# verify that data is 3D
# calculate masks for inliers in x, y and z ranges
x_mask = (data[:,0] > self.x_range[0]) and (data[:,0] < self.x_range[1])
y_mask = (data[:,1] > self.y_range[0]) and (data[:,1] < self.y_range[1])
z_mask = (data[:,2] > self.z_range[0]) and (data[:,2] < self.z_range[1])
mask = x_mask & y_mask & z_mask
# apply inversion if required
if self.inverse:
mask = np.invert(mask)
return data[mask]
def get_config(self):
return {}
class PolygonalClipper():
def __init__(self, polygon, z_range, inverse=False):
# verify polygon is not self intersecting
# z_range is sorted
self.polygon = np.array(polygon)
self.z_range = np.array(z_range)
self.inverse = inverse
def clip(self, data):
# calculate mask for inliers of 2d polygon defined in XY
# calculate maskf ro inliers in z range
xy_mask = points_in_poly(data[:,:2], self.polygon)
if self.z_range[0] == self.z_range[1]:
z_mask = np.full((data.shape[0], ), True)
else:
z_mask = (data[:,2] > self.z_range[0]) & (data[:,2] < self.z_range[1])
mask = xy_mask & z_mask
# apply inversion if required
if self.inverse:
mask = np.invert(mask)
return data[mask]
def get_config(self):
return {
"method": "polygon",
"params": {
"polygon": self.polygon.tolist(),
"z_range": self.z_range.tolist(),
"inverse": self.inverse
}
}
|
StarcoderdataPython
|
3257873
|
<reponame>cfergeau/cluster-node-tuning-operator
import fnmatch
import re
__all__ = ["DeviceMatcher"]
class DeviceMatcher(object):
"""
Device name matching against the devices specification in tuning profiles.
The devices specification consists of multiple rules separated by spaces.
The rules have a syntax of shell-style wildcards and are either positive
or negative. The negative rules are prefixed with an exclamation mark.
"""
def match(self, rules, device_name):
"""
Match a device against the specification in the profile.
If there is no positive rule in the specification, implicit rule
which matches all devices is added. The device matches if and only
if it matches some positive rule, but no negative rule.
"""
if isinstance(rules, str):
rules = re.split(r"\s|,\s*", rules)
positive_rules = [rule for rule in rules if not rule.startswith("!") and not rule.strip() == '']
negative_rules = [rule[1:] for rule in rules if rule not in positive_rules]
if len(positive_rules) == 0:
positive_rules.append("*")
matches = False
for rule in positive_rules:
if fnmatch.fnmatch(device_name, rule):
matches = True
break
for rule in negative_rules:
if fnmatch.fnmatch(device_name, rule):
matches = False
break
return matches
def match_list(self, rules, device_list):
"""
Match a device list against the specification in the profile. Returns
the list, which is a subset of devices which match.
"""
matching_devices = []
for device in device_list:
if self.match(rules, device):
matching_devices.append(device)
return matching_devices
|
StarcoderdataPython
|
208924
|
<reponame>kaferi/aspose-pdf-cloud-python
# coding: utf-8
"""
Aspose.PDF Cloud API Reference
Copyright (c) 2021 Aspose.PDF Cloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
OpenAPI spec version: 3.0
"""
from pprint import pformat
from six import iteritems
import re
class Table(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'links': 'list[Link]',
'alignment': 'HorizontalAlignment',
'horizontal_alignment': 'HorizontalAlignment',
'vertical_alignment': 'VerticalAlignment',
'top': 'float',
'left': 'float',
'default_cell_text_state': 'TextState',
'default_cell_padding': 'MarginInfo',
'border': 'BorderInfo',
'margin': 'MarginInfo',
'rows': 'list[Row]',
'default_column_width': 'str',
'default_cell_border': 'BorderInfo',
'broken': 'TableBroken',
'column_widths': 'str',
'repeating_rows_count': 'int',
'repeating_columns_count': 'int',
'repeating_rows_style': 'TextState',
'corner_style': 'BorderCornerStyle',
'break_text': 'TextRect',
'background_color': 'Color',
'is_borders_included': 'bool',
'column_adjustment': 'ColumnAdjustment',
'z_index': 'int'
}
attribute_map = {
'links': 'Links',
'alignment': 'Alignment',
'horizontal_alignment': 'HorizontalAlignment',
'vertical_alignment': 'VerticalAlignment',
'top': 'Top',
'left': 'Left',
'default_cell_text_state': 'DefaultCellTextState',
'default_cell_padding': 'DefaultCellPadding',
'border': 'Border',
'margin': 'Margin',
'rows': 'Rows',
'default_column_width': 'DefaultColumnWidth',
'default_cell_border': 'DefaultCellBorder',
'broken': 'Broken',
'column_widths': 'ColumnWidths',
'repeating_rows_count': 'RepeatingRowsCount',
'repeating_columns_count': 'RepeatingColumnsCount',
'repeating_rows_style': 'RepeatingRowsStyle',
'corner_style': 'CornerStyle',
'break_text': 'BreakText',
'background_color': 'BackgroundColor',
'is_borders_included': 'IsBordersIncluded',
'column_adjustment': 'ColumnAdjustment',
'z_index': 'ZIndex'
}
def __init__(self, links=None, alignment=None, horizontal_alignment=None, vertical_alignment=None, top=None, left=None, default_cell_text_state=None, default_cell_padding=None, border=None, margin=None, rows=None, default_column_width=None, default_cell_border=None, broken=None, column_widths=None, repeating_rows_count=None, repeating_columns_count=None, repeating_rows_style=None, corner_style=None, break_text=None, background_color=None, is_borders_included=None, column_adjustment=None, z_index=None):
"""
Table - a model defined in Swagger
"""
self._links = None
self._alignment = None
self._horizontal_alignment = None
self._vertical_alignment = None
self._top = None
self._left = None
self._default_cell_text_state = None
self._default_cell_padding = None
self._border = None
self._margin = None
self._rows = None
self._default_column_width = None
self._default_cell_border = None
self._broken = None
self._column_widths = None
self._repeating_rows_count = None
self._repeating_columns_count = None
self._repeating_rows_style = None
self._corner_style = None
self._break_text = None
self._background_color = None
self._is_borders_included = None
self._column_adjustment = None
self._z_index = None
if links is not None:
self.links = links
if alignment is not None:
self.alignment = alignment
if horizontal_alignment is not None:
self.horizontal_alignment = horizontal_alignment
if vertical_alignment is not None:
self.vertical_alignment = vertical_alignment
if top is not None:
self.top = top
if left is not None:
self.left = left
if default_cell_text_state is not None:
self.default_cell_text_state = default_cell_text_state
if default_cell_padding is not None:
self.default_cell_padding = default_cell_padding
if border is not None:
self.border = border
if margin is not None:
self.margin = margin
self.rows = rows
if default_column_width is not None:
self.default_column_width = default_column_width
if default_cell_border is not None:
self.default_cell_border = default_cell_border
if broken is not None:
self.broken = broken
if column_widths is not None:
self.column_widths = column_widths
if repeating_rows_count is not None:
self.repeating_rows_count = repeating_rows_count
if repeating_columns_count is not None:
self.repeating_columns_count = repeating_columns_count
if repeating_rows_style is not None:
self.repeating_rows_style = repeating_rows_style
if corner_style is not None:
self.corner_style = corner_style
if break_text is not None:
self.break_text = break_text
if background_color is not None:
self.background_color = background_color
if is_borders_included is not None:
self.is_borders_included = is_borders_included
if column_adjustment is not None:
self.column_adjustment = column_adjustment
if z_index is not None:
self.z_index = z_index
@property
def links(self):
"""
Gets the links of this Table.
Link to the document.
:return: The links of this Table.
:rtype: list[Link]
"""
return self._links
@links.setter
def links(self, links):
"""
Sets the links of this Table.
Link to the document.
:param links: The links of this Table.
:type: list[Link]
"""
self._links = links
@property
def alignment(self):
"""
Gets the alignment of this Table.
Gets HorizontalAlignment of the table alignment.
:return: The alignment of this Table.
:rtype: HorizontalAlignment
"""
return self._alignment
@alignment.setter
def alignment(self, alignment):
"""
Sets the alignment of this Table.
Gets HorizontalAlignment of the table alignment.
:param alignment: The alignment of this Table.
:type: HorizontalAlignment
"""
self._alignment = alignment
@property
def horizontal_alignment(self):
"""
Gets the horizontal_alignment of this Table.
Gets HorizontalAlignment of the table alignment.
:return: The horizontal_alignment of this Table.
:rtype: HorizontalAlignment
"""
return self._horizontal_alignment
@horizontal_alignment.setter
def horizontal_alignment(self, horizontal_alignment):
"""
Sets the horizontal_alignment of this Table.
Gets HorizontalAlignment of the table alignment.
:param horizontal_alignment: The horizontal_alignment of this Table.
:type: HorizontalAlignment
"""
self._horizontal_alignment = horizontal_alignment
@property
def vertical_alignment(self):
"""
Gets the vertical_alignment of this Table.
Gets VerticalAlignment of the annotation.
:return: The vertical_alignment of this Table.
:rtype: VerticalAlignment
"""
return self._vertical_alignment
@vertical_alignment.setter
def vertical_alignment(self, vertical_alignment):
"""
Sets the vertical_alignment of this Table.
Gets VerticalAlignment of the annotation.
:param vertical_alignment: The vertical_alignment of this Table.
:type: VerticalAlignment
"""
self._vertical_alignment = vertical_alignment
@property
def top(self):
"""
Gets the top of this Table.
Gets or sets the table top coordinate.
:return: The top of this Table.
:rtype: float
"""
return self._top
@top.setter
def top(self, top):
"""
Sets the top of this Table.
Gets or sets the table top coordinate.
:param top: The top of this Table.
:type: float
"""
self._top = top
@property
def left(self):
"""
Gets the left of this Table.
Gets or sets the table left coordinate.
:return: The left of this Table.
:rtype: float
"""
return self._left
@left.setter
def left(self, left):
"""
Sets the left of this Table.
Gets or sets the table left coordinate.
:param left: The left of this Table.
:type: float
"""
self._left = left
@property
def default_cell_text_state(self):
"""
Gets the default_cell_text_state of this Table.
Gets or sets the default cell text state.
:return: The default_cell_text_state of this Table.
:rtype: TextState
"""
return self._default_cell_text_state
@default_cell_text_state.setter
def default_cell_text_state(self, default_cell_text_state):
"""
Sets the default_cell_text_state of this Table.
Gets or sets the default cell text state.
:param default_cell_text_state: The default_cell_text_state of this Table.
:type: TextState
"""
self._default_cell_text_state = default_cell_text_state
@property
def default_cell_padding(self):
"""
Gets the default_cell_padding of this Table.
Gets or sets the default cell padding.
:return: The default_cell_padding of this Table.
:rtype: MarginInfo
"""
return self._default_cell_padding
@default_cell_padding.setter
def default_cell_padding(self, default_cell_padding):
"""
Sets the default_cell_padding of this Table.
Gets or sets the default cell padding.
:param default_cell_padding: The default_cell_padding of this Table.
:type: MarginInfo
"""
self._default_cell_padding = default_cell_padding
@property
def border(self):
"""
Gets the border of this Table.
Gets or sets the border.
:return: The border of this Table.
:rtype: BorderInfo
"""
return self._border
@border.setter
def border(self, border):
"""
Sets the border of this Table.
Gets or sets the border.
:param border: The border of this Table.
:type: BorderInfo
"""
self._border = border
@property
def margin(self):
"""
Gets the margin of this Table.
Gets or sets a outer margin for paragraph (for pdf generation)
:return: The margin of this Table.
:rtype: MarginInfo
"""
return self._margin
@margin.setter
def margin(self, margin):
"""
Sets the margin of this Table.
Gets or sets a outer margin for paragraph (for pdf generation)
:param margin: The margin of this Table.
:type: MarginInfo
"""
self._margin = margin
@property
def rows(self):
"""
Gets the rows of this Table.
Sets the rows of the table.
:return: The rows of this Table.
:rtype: list[Row]
"""
return self._rows
@rows.setter
def rows(self, rows):
"""
Sets the rows of this Table.
Sets the rows of the table.
:param rows: The rows of this Table.
:type: list[Row]
"""
if rows is None:
raise ValueError("Invalid value for `rows`, must not be `None`")
self._rows = rows
@property
def default_column_width(self):
"""
Gets the default_column_width of this Table.
Gets default cell border;
:return: The default_column_width of this Table.
:rtype: str
"""
return self._default_column_width
@default_column_width.setter
def default_column_width(self, default_column_width):
"""
Sets the default_column_width of this Table.
Gets default cell border;
:param default_column_width: The default_column_width of this Table.
:type: str
"""
self._default_column_width = default_column_width
@property
def default_cell_border(self):
"""
Gets the default_cell_border of this Table.
Gets default cell border;
:return: The default_cell_border of this Table.
:rtype: BorderInfo
"""
return self._default_cell_border
@default_cell_border.setter
def default_cell_border(self, default_cell_border):
"""
Sets the default_cell_border of this Table.
Gets default cell border;
:param default_cell_border: The default_cell_border of this Table.
:type: BorderInfo
"""
self._default_cell_border = default_cell_border
@property
def broken(self):
"""
Gets the broken of this Table.
Gets or sets table vertial broken;
:return: The broken of this Table.
:rtype: TableBroken
"""
return self._broken
@broken.setter
def broken(self, broken):
"""
Sets the broken of this Table.
Gets or sets table vertial broken;
:param broken: The broken of this Table.
:type: TableBroken
"""
self._broken = broken
@property
def column_widths(self):
"""
Gets the column_widths of this Table.
Gets the column widths of the table.
:return: The column_widths of this Table.
:rtype: str
"""
return self._column_widths
@column_widths.setter
def column_widths(self, column_widths):
"""
Sets the column_widths of this Table.
Gets the column widths of the table.
:param column_widths: The column_widths of this Table.
:type: str
"""
self._column_widths = column_widths
@property
def repeating_rows_count(self):
"""
Gets the repeating_rows_count of this Table.
Gets the first rows count repeated for several pages
:return: The repeating_rows_count of this Table.
:rtype: int
"""
return self._repeating_rows_count
@repeating_rows_count.setter
def repeating_rows_count(self, repeating_rows_count):
"""
Sets the repeating_rows_count of this Table.
Gets the first rows count repeated for several pages
:param repeating_rows_count: The repeating_rows_count of this Table.
:type: int
"""
self._repeating_rows_count = repeating_rows_count
@property
def repeating_columns_count(self):
"""
Gets the repeating_columns_count of this Table.
Gets or sets the maximum columns count for table
:return: The repeating_columns_count of this Table.
:rtype: int
"""
return self._repeating_columns_count
@repeating_columns_count.setter
def repeating_columns_count(self, repeating_columns_count):
"""
Sets the repeating_columns_count of this Table.
Gets or sets the maximum columns count for table
:param repeating_columns_count: The repeating_columns_count of this Table.
:type: int
"""
self._repeating_columns_count = repeating_columns_count
@property
def repeating_rows_style(self):
"""
Gets the repeating_rows_style of this Table.
Gets the style for repeating rows
:return: The repeating_rows_style of this Table.
:rtype: TextState
"""
return self._repeating_rows_style
@repeating_rows_style.setter
def repeating_rows_style(self, repeating_rows_style):
"""
Sets the repeating_rows_style of this Table.
Gets the style for repeating rows
:param repeating_rows_style: The repeating_rows_style of this Table.
:type: TextState
"""
self._repeating_rows_style = repeating_rows_style
@property
def corner_style(self):
"""
Gets the corner_style of this Table.
Gets or sets the styles of the border corners
:return: The corner_style of this Table.
:rtype: BorderCornerStyle
"""
return self._corner_style
@corner_style.setter
def corner_style(self, corner_style):
"""
Sets the corner_style of this Table.
Gets or sets the styles of the border corners
:param corner_style: The corner_style of this Table.
:type: BorderCornerStyle
"""
self._corner_style = corner_style
@property
def break_text(self):
"""
Gets the break_text of this Table.
Gets or sets break text for table
:return: The break_text of this Table.
:rtype: TextRect
"""
return self._break_text
@break_text.setter
def break_text(self, break_text):
"""
Sets the break_text of this Table.
Gets or sets break text for table
:param break_text: The break_text of this Table.
:type: TextRect
"""
self._break_text = break_text
@property
def background_color(self):
"""
Gets the background_color of this Table.
Gets or sets table background color
:return: The background_color of this Table.
:rtype: Color
"""
return self._background_color
@background_color.setter
def background_color(self, background_color):
"""
Sets the background_color of this Table.
Gets or sets table background color
:param background_color: The background_color of this Table.
:type: Color
"""
self._background_color = background_color
@property
def is_borders_included(self):
"""
Gets the is_borders_included of this Table.
Gets or sets border included in column widhts.
:return: The is_borders_included of this Table.
:rtype: bool
"""
return self._is_borders_included
@is_borders_included.setter
def is_borders_included(self, is_borders_included):
"""
Sets the is_borders_included of this Table.
Gets or sets border included in column widhts.
:param is_borders_included: The is_borders_included of this Table.
:type: bool
"""
self._is_borders_included = is_borders_included
@property
def column_adjustment(self):
"""
Gets the column_adjustment of this Table.
Gets or sets the table column adjustment.
:return: The column_adjustment of this Table.
:rtype: ColumnAdjustment
"""
return self._column_adjustment
@column_adjustment.setter
def column_adjustment(self, column_adjustment):
"""
Sets the column_adjustment of this Table.
Gets or sets the table column adjustment.
:param column_adjustment: The column_adjustment of this Table.
:type: ColumnAdjustment
"""
self._column_adjustment = column_adjustment
@property
def z_index(self):
"""
Gets the z_index of this Table.
Gets ZIndex of the annotation.
:return: The z_index of this Table.
:rtype: int
"""
return self._z_index
@z_index.setter
def z_index(self, z_index):
"""
Sets the z_index of this Table.
Gets ZIndex of the annotation.
:param z_index: The z_index of this Table.
:type: int
"""
self._z_index = z_index
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, Table):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
StarcoderdataPython
|
5168063
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Simple On Memory File System Creator
"""
import sys
import argparse
from pathlib import Path
# table format: file:[next, pos, size, name] dir:[next, num, 0, name]
def out_header(out, table, name):
out.write('/* This file is generated by the MEMFS Converter */\n')
out.write('#ifndef _MEMFS_{}_H_\n'.format(name))
out.write('#define _MEMFS_{}_H_\n'.format(name))
out.write('\n')
out.write('#ifdef __cplusplus\n')
out.write('extern "C" {\n')
out.write('#endif\n')
out.write('\n')
out.write('#include "memfs.h"\n')
out.write('\n')
out.write('extern const m_table ' + name + '_table[];\n')
out.write('extern const uint8_t ' + name + '_data[];\n')
out.write('extern const uint8_t ' + '_binary_' + name +
'_data_bin_start[];\n')
out.write('\n')
out.write('#ifdef MEMFS_IMPLEMENTATION\n')
out.write('\n')
out.write('const m_table ' + name + '_table[]={\n')
for i, t in enumerate(table):
out.write(' {')
out.write('{0},\t{1},\t{2},\t"{3}"'.format(t[0], t[1], t[2], t[3]))
out.write('}, // ' + str(i) + '\n')
i += 1
out.write('};\n')
out.write('#endif //MEMFS_IMPLEMENTATION\n')
out.write('#ifdef __cplusplus\n')
out.write('} // extern "C"\n')
out.write('#endif\n')
out.write('#endif //_MEMFS_{}_H_\n'.format(name))
def create_table(table, path, offs):
for k in path.keys():
offs += 1
pos = len(table)
if type(path[k]) is dict:
offs = create_table(table, path[k], offs)
table.insert(pos, [offs + 1, len(path[k]), 0, k])
else:
table.insert(pos, [offs + 1, path[k][0], path[k][1], k])
return offs
def out_data_c(out, path, offs):
for k in path.keys():
if type(path[k]) is dict:
offs = out_data_c(out, path[k], offs)
else:
name = str(path[k].relative_to(path[k].root))
print(name, '... ', end='', flush=True)
out.write('\n/* ' + name + ' */\n')
size = 0
for c in path[k].read_bytes():
out.write(hex(c) + ',')
size += 1
if (size % 16) == 0: out.write('\n')
print(hex(offs), size)
path[k] = [offs, size]
offs += size
# 64bit padding
padding = offs % 8
if padding > 0:
padding = 8 - padding
out.write('0,' * padding)
offs += padding
return offs
def out_data_bin(out, path, offs):
for k in path.keys():
if type(path[k]) is dict:
offs = out_data_bin(out, path[k], offs)
else:
name = str(path[k].relative_to(path[k].root))
print(name, '... ', end='', flush=True)
out.write(path[k].read_bytes())
size = path[k].stat().st_size
print(hex(offs), size)
path[k] = [offs, size]
offs += size
# 64bit padding
padding = offs % 8
if padding > 0:
padding = 8 - padding
out.write(bytearray([0] * padding))
offs += padding
return offs
def traverse(path, dir):
fs = {}
files = sorted(dir.iterdir())
if len(files) == 0: return # ignore empty dir
for f in files:
if f.is_dir():
traverse(fs, f)
elif f.stat().st_size > 0: # ignore empty file
fs[f.name] = f
path[dir.name] = fs
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Simple OnMemory File System Creator')
parser.add_argument('-d',
action='append',
help='dir path ("." is not allowed)')
parser.add_argument('-f', action='append', help='file path')
parser.add_argument('-o', help='memfs name', default='memfs')
parser.add_argument('-c',
action='store_true',
help='output as .c source code')
args = parser.parse_args()
if args.d == None and args.f == None:
parser.print_help()
sys.exit()
print(args.c)
if args.c:
c_file = Path(args.o + '_data.c')
if c_file.exists(): c_file.unlink()
print('generate', c_file.name)
else:
bin_file = Path(args.o + '_data.bin')
if bin_file.exists(): bin_file.unlink()
print('generate', bin_file.name)
h_file = Path(args.o + '_table.h')
if h_file.exists(): h_file.unlink()
root = {}
if args.d:
for dir in sorted(args.d):
if dir == '.':
print('"." is not allowed')
sys.exit(-1)
p = Path(dir)
if p.is_dir():
traverse(root, p)
else:
print('dir not found:', dir)
sys.exit(-1)
if args.f:
for file in sorted(args.f):
p = Path(file)
if p.is_file():
root[p.name] = p
else:
print('file not found:', file)
sys.exit(-1)
if args.c:
out = c_file.open('w')
out.write('/* This file is generated by the MEMFS Converter */\n')
out.write('#include <stdint.h>\n')
out.write('const uint8_t ' + args.o + '_data[]={')
out_data_c(out, root, 0)
out.write('};\n')
out.close()
else:
out = bin_file.open('wb')
out_data_bin(out, root, 0)
out.close()
table = []
create_table(table, root, 0)
# add root
num = 0
if args.d: num = len(args.d)
if args.f: num += len(args.f)
table.insert(0, [table[-1][0], num, 0, '.'])
out = h_file.open('w')
out_header(out, table, args.o)
out.close()
|
StarcoderdataPython
|
3567573
|
import tkinter as tk
import mysql.connector
import tkinter.font as tf
from tkinter import *
def delete_user_account():
mydb = mysql.connector.connect(
host="localhost",
user="root",
password="<PASSWORD>",
database='project_dbms'
)
mycursor = mydb.cursor()
root = Toplevel()
# making Full screen as Default
root.attributes("-fullscreen", True)
# Title of the window
root.title('Delete User Account')
# add widgets here
# Heading Configurations
hed = Label(root, text="Delete User Account", bg="#4361ee",
fg="#e9ecef", font="Times 60 underline bold")
# placing the widget on the screen
hed.pack(fill=X)
# Bank details Heading Configurations
instruction1 = Label(root, text="Instruction:", bg="#7209b7",
fg="#ced4da", font="Times 25 bold", anchor=NW, padx=15, pady=10)
# placing the widget on the screen
instruction1.pack(pady=(20, 8), padx=100, fill=X)
fontstyle = tf.Font(family="Times", size=20, weight="bold")
# Bank Details Body Configurations
instruction_Message1 = Label(root,
text=""" 1.Enter valid Account Number \n 2.Press "Esc" key to Exit""", bg="#f72585", fg="#000000",
font=fontstyle, justify=LEFT, anchor=NW, padx=30, pady=10)
# placing the widget on the screen
instruction_Message1.pack(pady=0, padx=100, fill=X)
base_frame = Frame(root, bg="#4cc9f0")
account_number = Label(base_frame, text="Enter A/C No:",
bg="#7209b7", fg="#ced4da", font='Times 30 bold', width=11)
account_number.grid(row=0, column=0, padx=(0, 5))
account_entry = Entry(base_frame, font='Times 30 bold',
width=12, bg='#7209b7', fg='#ced4da')
account_entry.grid(row=0, column=1)
def fetch_values():
mycursor.execute("SELECT account_number FROM user_account")
numb = mycursor.fetchall()
numb_list = list(sum(numb, ()))
return numb_list
message_disp = StringVar()
message_disp.set('Waiting for user response')
def delete_account():
acc_no = int(account_entry.get())
numb_list = fetch_values()
if acc_no in numb_list:
message_disp.set('Your account has been deleted SUCCESSFULLY')
mycursor.execute(
"DELETE FROM user_account WHERE account_number = {}".format(acc_no))
mydb.commit()
account_entry.delete(0,20)
else:
message_disp.set('User account Not found please check it again')
account_entry.delete(0,10)
delete = Button(base_frame,
bg="#4361ee", fg="#e9ecef",
text='Delete Account',
anchor=CENTER,
font='Times 20 bold',
justify=CENTER,
activebackground="#e9ecef",
activeforeground="#4361ee",
command=delete_account
)
delete.grid(row=1, column=0, columnspan=2, padx=(27, 0), pady=35)
base_frame.pack(pady=80)
# display_text = {
# 'default': 'Waiting for User Response',
# 'delete': 'Your account has been deleted succesfully',
# 'notFound': 'Your account is not present'
# }
display_Message = Label(root,
textvariable=message_disp, bg="#f72585", fg="#000000",
font=fontstyle, justify=CENTER, padx=30, pady=10)
# placing the widget on the screen
display_Message.pack(pady=0, padx=100, fill=X)
# Adding an event to the Esc button for program termination
root.bind("<Escape>", lambda event: root.destroy())
# configuring the base window background
root.configure(bg="#4cc9f0")
# Termination of the Tkinter Process
mainloop()
def call_delete_user_account():
delete_user_account()
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.