id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
8131802
|
<gh_stars>0
from django.urls import path
from django.contrib.auth import views as auth_views
from . import views
urlpatterns = [
path('register/',views.doctor_register,name='register-doctor'),
path('my_appointments/',views.my_appointment,name='doctor-appointments'),
]
|
StarcoderdataPython
|
3514892
|
<gh_stars>1-10
#
# MIT License
#
# Copyright (c) 2020 <NAME>, @pablintino
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from sqlalchemy import Column, Integer, String, ForeignKey, Float, UniqueConstraint
from sqlalchemy.orm import relationship
from models.inventory.inventory_identificable_item_model import InventoryIdentificableItemModel
class InventoryItemModel(InventoryIdentificableItemModel):
__tablename__ = "inventory_item"
id = Column(Integer, primary_key=True)
mpn = Column(String(100), nullable=False, index=True)
manufacturer = Column(String(100), nullable=False, index=True)
name = Column(String(100), nullable=False)
description = Column(String(100))
last_buy_price = Column(Float)
dici = Column(String(70), nullable=False, index=True)
# relationships
component_id = Column(Integer, ForeignKey('component.id'))
component = relationship("ComponentModel", back_populates="inventory_item")
category_id = Column(Integer, ForeignKey('inventory_category.id'))
category = relationship('InventoryCategoryModel', back_populates="category_items", lazy='subquery')
stock_items = relationship("InventoryItemLocationStockModel", back_populates="item")
item_properties = relationship("InventoryItemPropertyModel", back_populates="item")
# Set a constraint that enforces Part Number - Manufacturer uniqueness for Iventory Item
__table_args__ = (UniqueConstraint('mpn', 'manufacturer', name='_mpn_manufacturer_item_uc'),)
|
StarcoderdataPython
|
1742823
|
<reponame>RL-OtherApps/website-addons<gh_stars>1-10
import datetime
import random
import werkzeug
from odoo import http
from odoo.http import request
class Chess(http.Controller):
# chess chat
@http.route("/chess/game/chat/init", type="json", auth="public")
def init_chat(self, game_id):
author_name = http.request.env.user.name # current user
author_id = http.request.env.user.id
return {"author_name": author_name, "author_id": author_id, "game_id": game_id}
@http.route("/chess/game/chat/history", type="json", auth="public")
def load_history(self, game_id):
history = request.env["chess.game.chat"].message_fetch(game_id, 100)
if len(history) == 0:
return False
hist = []
for e in history:
d = {
"author_name": str(e.author_id.name),
"message": e.message,
"date_message": e.date_message,
}
hist.append(d)
history = hist
return history
@http.route("/chess/game/chat/send/", type="json", auth="public")
def chat_message_send(self, message, game_id):
res = request.env["chess.game.chat"].broadcast(message, game_id)
return res
# game status
@http.route("/chess/game/status/", type="json", auth="public")
def game_status(self, game_id):
result = request.env["chess.game"].create_game_status(game_id)
return result.system_status
# chess game
@http.route("/chess/game/init/", type="json", auth="public")
def init_game(self, game_id):
result = request.env["chess.game"].browse(int(game_id)).game_information()
return result
@http.route("/chess/game/history", type="json", auth="public")
def load_move(self, game_id):
history = request.env["chess.game.line"].move_fetch(game_id)
if len(history) == 0:
return False
hist = []
for e in history:
d = {"source": str(e.source), "target": str(e.target)}
hist.append(d)
history = hist
return history
@http.route("/chess/game/system_history", type="json", auth="public")
def load_system_message(self, game_id):
history = request.env["chess.game"].system_fetch(game_id)
if history.status == "agreement":
status = str(history.status)
user = None
else:
if ":" in history.status:
history = history.status.split(":")
user = str(history[1])
status = str(history[0])
else:
status = str(history[0])
user = None
result = {"type": "system", "data": {"status": str(status), "user": str(user)}}
return result
@http.route("/chess/game/load_time", type="json", auth="public")
def load_time(self, game_id, turn):
result = request.env["chess.game"].load_time(game_id, turn)
return result
@http.route("/chess/game/send/", type="json", auth="public")
def move_send(self, message, game_id):
if message["type"] == "move":
result = request.env["chess.game.line"].move_broadcast(message, game_id)
return result
elif message["type"] == "system":
t = message["data"]
if t["status"] == "time":
result = request.env["chess.game"].system_time_broadcast(
message, game_id
)
else:
result = request.env["chess.game"].system_broadcast(message, game_id)
return "system"
@http.route("/chess/game/game_over/", type="json", auth="public")
def game_over(self, game_id, status=None, time_limit_id=None):
request.env["chess.game"].browse(int(game_id)).game_over(status, time_limit_id)
return True
# create game
@http.route("/chess/", auth="public", website=True)
def index(self, **kw):
users = http.request.env["res.users"].search(
[("id", "!=", http.request.env.user.id)]
)
return http.request.render("chess.chesspage", {"users": users})
@http.route("/chess/game/<int:games>/", auth="public", website=True)
def game(self, games, **kwargs):
games_object = http.request.env["chess.game"].search([("id", "=", games)])
user = http.request.env["res.users"].search(
[("id", "=", http.request.env.user.id)]
)
if len(games_object) == 0:
from werkzeug.exceptions import NotFound
raise NotFound()
return http.request.render(
"chess.gamepage",
{"games": games_object, "user": user, "dbname": request.cr.dbname},
)
@http.route("/chess/game/", auth="public", website=True)
def create_game(
self,
game_type=None,
second_user_id=None,
first_color_figure=None,
time_d=None,
time_h=None,
time_m=None,
time_s=None,
**kwargs
):
if request.httprequest.method != "POST":
from werkzeug.exceptions import NotFound
raise NotFound()
if second_user_id == "0":
users = http.request.env["res.users"].search(
[("id", "!=", http.request.env.user.id)]
)
users = [e.id for e in users]
user_list = random.sample(users, 1)
second_user_id = user_list[0]
if first_color_figure == "white":
second_color_figure = "black"
else:
second_color_figure = "white"
first_user_id = http.request.env.user.id
game_time = 0
if game_type == "blitz" or game_type == "limited time":
if (
time_d is not None
or time_h is not None
or time_m is not None
or time_s is not None
):
game_time = (
int(time_d) * 24 * 60 * 60
+ int(time_h) * 60 * 60
+ int(time_m) * 60
+ int(time_s)
)
else:
game_time = 0
import time
new_game = http.request.env["chess.game"].create(
{
"game_type": game_type,
"date_start": datetime.datetime.now(),
"first_user_id": first_user_id,
"second_user_id": second_user_id,
"first_color_figure": first_color_figure,
"second_color_figure": second_color_figure,
"second_user_time": game_time,
"first_user_time": game_time,
"first_time_date": float(time.time()),
"second_time_date": float(time.time()),
}
)
location = "/chess/game/" + str(new_game.id)
return werkzeug.utils.redirect(location)
@http.route("/chess/game/tournament", auth="public", website=True)
def create_tournament(
self, tournament_type=None, players=None, participate=None, **kwargs
):
if request.httprequest.method != "POST":
from werkzeug.exceptions import NotFound
raise NotFound()
players_clean_data = [int(x) for x in players.split(",")]
if participate:
players_clean_data.append(http.request.env.user.id)
tournament = http.request.env["chess.tournament"].create(
{
"tournament_type": tournament_type,
"start_date": datetime.datetime.now(),
"players": [(6, 0, [players_clean_data])],
"time_d": kwargs["time_d"],
"time_h": kwargs["time_h"],
"time_m": kwargs["time_m"],
"time_s": kwargs["time_s"],
}
)
location = "/chess/tournament/" + str(tournament.id)
return werkzeug.utils.redirect(location)
@http.route("/chess/tournament/<int:tournament>/", auth="public", website=True)
def tournament_table(self, tournament, **kwargs):
tournament = http.request.env["chess.tournament"].search(
[("id", "=", tournament)]
)
if len(tournament) == 0:
from werkzeug.exceptions import NotFound
raise NotFound()
return http.request.render(
"chess.tournament-page",
{"tournament": tournament.id, "uid": http.request.env.context.get("uid")},
)
@http.route("/chess/game/tournament/fetch", type="json", auth="public")
def fetch_tournament_data(self, tournament_id=None):
return request.env["chess.game"].send_games_data(int(tournament_id))
@http.route("/chess/game/tournament/create_game/", type="json", auth="public")
def frontend_create_tournament_game(self, **kwargs):
return request.env["chess.game"].create_tournament_game(**kwargs)
|
StarcoderdataPython
|
4860249
|
<reponame>drkitty/web-test<gh_stars>0
hash_rounds = 20000
app_config = {
'debug': True,
'secret_key': '',
}
database = {
'host': 'localhost',
'user': 'checklist',
'passwd': <PASSWORD>,
'db': 'checklistdb',
'echo': True,
}
|
StarcoderdataPython
|
1792577
|
<filename>datutils/datmeta.py<gh_stars>1-10
from datutils.utils import write_metadata
# py2/3 compatibility
try:
input = raw_input
except NameError:
pass
def datmeta(datfiles):
sr = False
while not sr:
try:
print("sampling rate:")
isr = float(input())
assert isr > 0
sr = isr
except:
pass
n_channels = False
while not n_channels:
try:
print("number of channels:")
in_channels = int(input())
assert in_channels > 0
n_channels = in_channels
except:
pass
dtype = False
while not dtype:
try:
print("datatype, probably int16 or float64:")
idtype = input()
assert idtype in ('int8', 'int16', 'int32', 'int64', 'float32',
'float64', 'float128', 'uint8', 'uint16',
'uint32', 'uint64')
dtype = idtype
except:
pass
for datfile in datfiles:
write_metadata(datfile,
n_channels=n_channels,
sampling_rate=sr,
dtype=dtype)
def main():
import argparse
p = argparse.ArgumentParser(prog="datmerge.py",
description="""
Generates metadata files for raw binary files, accepts multiple files
(assuming the have the same metadata parameters).
Feel free to create .meta files yourself and include other parameters.
The .meta files use YAML syntax.
""")
p.add_argument("dat", help="dat file(s)", nargs="+")
options = p.parse_args()
datmeta(options.dat)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
12819186
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# similarString.py
#
# Copyright 2014 Leandro <<EMAIL>>
#
import difflib
import random
def DNA(length):
return ''.join(random.choice('CGTA') for nucleot in xrange(length))
DNA1 = DNA(15)
print '-'*45
print 'Sequencia 1'
print '-'*45
print DNA1
print '-'*45
print
DNA2 = DNA(15)
print '-'*45
print 'Sequencia 2'
print '-'*45
print DNA2
print '-'*45
print
print
#Modulo o DIFFLIB
s = difflib.SequenceMatcher(None, DNA1, DNA2)
print '-'*45
print 'Retorna a quantidade de caracteres semelhantes'
print '-'*45
print s.find_longest_match(0,len(DNA1),0,len(DNA2)) #Retorna a quantidade de caracteres semelhantes
print '-'*45
print
print '-'*45
print 'Retorna a similaridade, entre 0 e 1'
print '-'*45
print difflib.SequenceMatcher(None, DNA1, DNA2).ratio() #Retorna a prob.
print '-'*45
|
StarcoderdataPython
|
4811823
|
"""
@brief Compute spectrum-weighted exposure correction for counts light
curves prepared by gtbin.
@author <NAME> <<EMAIL>>
"""
#
# $Header: /nfs/slac/g/glast/ground/cvs/users/jchiang/pyExposure/python/flux_lc.py,v 1.1 2006/05/28 14:40:27 jchiang Exp $
#
import numarray as num
from FunctionWrapper import FunctionWrapper
from readXml import SourceModel
import pyLikelihood as pyLike
from FitsNTuple import FitsNTuple
import pyExposure
def log_array(npts, xmin, xmax):
xstep = num.log(xmax/xmin)/(npts - 1)
return xmin*num.exp(num.arange(npts, type=num.Float)*xstep)
class ModelFunction(object):
_funcFactory = pyLike.SourceFactory_funcFactory()
def __init__(self, xmlFile, srcName):
srcModel = SourceModel(xmlFile)
spectrum = srcModel[srcName].spectrum
self.func = self._funcFactory.create(spectrum.type)
pars = spectrum.parameters
for name in pars.keys():
self.func.setParam(name, pars[name].value)
def __call__(self, ee):
foo = FunctionWrapper(lambda x : self.func.value(pyLike.dArg(x)))
return foo(ee)
class Exposure(object):
def __init__(self, lc_file, coords=None, ft2file='DC2_FT2_v2.fits',
energies=None, irfs='DC2'):
self.lc = FitsNTuple(lc_file, 'RATE')
cuts = pyExposure.Cuts(lc_file, 'RATE', False)
emin, emax = 20, 2e5
for i in range(cuts.size()):
my_cut = cuts.getCut(i)
if my_cut.type() == 'SkyCone':
my_cut = pyExposure.Cuts_castAsSkyConeCut(my_cut)
self.ra = my_cut.ra()
self.dec = my_cut.dec()
if my_cut.type() == 'range':
my_cut = pyExposure.Cuts_castAsRangeCut(my_cut)
if my_cut.colname() == 'ENERGY':
emin = my_cut.minVal()
emax = my_cut.maxVal()
energies = log_array(21, emin, emax)
times = list(self.lc.TIME - self.lc.TIMEDEL/2.)
times.append(self.lc.TIME[-1] + self.lc.TIMEDEL[-1]/2.)
self.exposure = pyExposure.Exposure(ft2file, times, energies,
self.ra, self.dec, irfs)
def __getattr__(self, attrname):
return getattr(self.exposure, attrname)
def __call__(self, time, energy):
return self.exposure.value(time, energy)
def weightedAvgs(self, dnde):
energies = self.energies()
dnde_vals = dnde(energies)
expvals = self.values()
avg_exps = []
dnde_avg = 0
for k in range(len(energies) - 1):
dnde_avg += ((dnde_vals[k+1]+dnde_vals[k])
*(energies[k+1]-energies[k])/2.)
for exprow in expvals:
avg_exps.append(0)
ff = dnde_vals*num.array(exprow)
for k in range(len(energies) - 1):
avg_exps[-1] += (ff[k+1]+ff[k])*(energies[k+1]-energies[k])/2.
return num.array(avg_exps)/dnde_avg
if __name__ == '__main__':
import hippoplotter as plot
ee = log_array(100, 20, 2e5)
bpl = ModelFunction('solar_flare_bpl_model.xml', 'Solar Flare')
# plot.scatter(ee, bpl(ee), xlog=1, ylog=1, pointRep='Line')
exposure = Exposure('flare_lc.fits')
my_exp = exposure.weightedAvgs(bpl)
times = exposure.lc.TIME
plot.xyplot(times - times[0]-644, exposure.lc.COUNTS/(my_exp+1),
xerr=exposure.lc.TIMEDEL/2.,
yerr=num.sqrt(exposure.lc.COUNTS)/(my_exp+1), ylog=1,
pointRep='Column')
|
StarcoderdataPython
|
252490
|
from django.conf.urls.defaults import patterns, url, include, handler404, handler500
from django.contrib import admin
import settings
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'django.views.generic.simple.direct_to_template',
{'template': 'home.html'}, name='home'),
(r'^accounts/', include('userena.urls')),
(r'^forums/', include('forums.urls')),
(r'^wiki/', include('wiki.urls')),
(r'^messages/', include('userena.contrib.umessages.urls')),
(r'^admin/', include(admin.site.urls)),
(r'^', include('core.urls')),
)
if settings.DEBUG:
urlpatterns += patterns('',
(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}),
)
|
StarcoderdataPython
|
5044828
|
<gh_stars>0
def change_frame(frame=0):
pass
def clear_render_border():
pass
def curves_point_set(point='BLACK_POINT'):
pass
def cycle_render_slot(reverse=False):
pass
def external_edit(filepath=""):
pass
def invert(invert_r=False, invert_g=False, invert_b=False, invert_a=False):
pass
def match_movie_length():
pass
def new(name="Untitled", width=1024, height=1024, color=(0.0, 0.0, 0.0, 1.0), alpha=True, generated_type='BLANK', float=False, gen_context='NONE', use_stereo_3d=False):
pass
def open(filepath="", directory="", files=None, filter_blender=False, filter_backup=False, filter_image=True, filter_movie=True, filter_python=False, filter_font=False, filter_sound=False, filter_text=False, filter_btx=False, filter_collada=False, filter_alembic=False, filter_folder=True, filter_blenlib=False, filemode=9, relative_path=True, show_multiview=False, use_multiview=False, display_type='DEFAULT', sort_method='FILE_SORT_ALPHA', use_sequence_detection=True):
pass
def pack(as_png=False):
pass
def project_apply():
pass
def project_edit():
pass
def properties():
pass
def read_renderlayers():
pass
def reload():
pass
def render_border(xmin=0, xmax=0, ymin=0, ymax=0):
pass
def replace(filepath="", filter_blender=False, filter_backup=False, filter_image=True, filter_movie=True, filter_python=False, filter_font=False, filter_sound=False, filter_text=False, filter_btx=False, filter_collada=False, filter_alembic=False, filter_folder=True, filter_blenlib=False, filemode=9, relative_path=True, show_multiview=False, use_multiview=False, display_type='DEFAULT', sort_method='FILE_SORT_ALPHA'):
pass
def sample():
pass
def sample_line(xstart=0, xend=0, ystart=0, yend=0, cursor=1002):
pass
def save():
pass
def save_as(save_as_render=False, copy=False, filepath="", check_existing=True, filter_blender=False, filter_backup=False, filter_image=True, filter_movie=True, filter_python=False, filter_font=False, filter_sound=False, filter_text=False, filter_btx=False, filter_collada=False, filter_alembic=False, filter_folder=True, filter_blenlib=False, filemode=9, relative_path=True, show_multiview=False, use_multiview=False, display_type='DEFAULT', sort_method='FILE_SORT_ALPHA'):
pass
def save_dirty():
pass
def save_sequence():
pass
def toolshelf():
pass
def unpack(method='USE_LOCAL', id=""):
pass
def view_all(fit_view=False):
pass
def view_ndof():
pass
def view_pan(offset=(0.0, 0.0)):
pass
def view_selected():
pass
def view_zoom(factor=0.0):
pass
def view_zoom_border(gesture_mode=0, xmin=0, xmax=0, ymin=0, ymax=0):
pass
def view_zoom_in(location=(0.0, 0.0)):
pass
def view_zoom_out(location=(0.0, 0.0)):
pass
def view_zoom_ratio(ratio=0.0):
pass
|
StarcoderdataPython
|
4988746
|
<filename>leetcode/p102.py
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def levelOrder(self, root: Optional[TreeNode]) -> List[List[int]]:
result = []
q = []
if root is None:
return None
q.append(root)
while len(q) > 0:
result.append([n.val for n in q])
new_q = []
for n in q:
if n.left is not None:
new_q.append(n.left)
if n.right is not None:
new_q.append(n.right)
q = new_q
return result
|
StarcoderdataPython
|
6657508
|
<filename>packages/VikiLabs_CIFAR_Wrapper.py<gh_stars>1-10
'''
PyTorch Wrapper to work with CIFAR Handwritten Digit Database
Author:
<NAME> (a) Viki
<EMAIL>
'''
import torchvision
from torchvision import transforms
import torch
from torchvision import datasets
import os
import errno
from VikiLabs_Logger import *
import matplotlib.pyplot as plt
import numpy as np
log = logger()
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
#classes = ('car', 'plane', 'dog', 'cat',
# 'deer', 'bird', 'frog', 'horse', 'ship', 'truck')
def Download_CIFAR_TrainingData(path):
print(log._st+ "DOWNLOADING CIFAR TRAINING DATA")
t = transforms
'''
Convert Image from range [0, 1] to range [-1 to 1]
image = (image - n_mean)/n_std
'''
#Mean of all 3 channels (depth, height, width)
#n_mean = (0.5, 0.5, 0.5)
#n_std = (0.5, 0.5, 0.5)
tf = t.Compose([t.ToTensor(), t.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
#tf = t.Compose([t.ToTensor(), t.Normalize(n_mean, n_std)])
data_object = datasets.CIFAR10(path, train=True, download=True, transform=tf)
print(log._ed+ "DOWNLOADING CIFAR TRAINING DATA")
return data_object
def Download_CIFAR_TestData(path):
print(log._st+ "DOWNLOADING CIFAR TEST DATA")
t = transforms
'''
Convert Image from range [0, 1] to range [-1 to 1]
image = (image - n_mean)/n_std
'''
n_mean = (0.5, 0.5, 0.5)
n_std = (0.5, 0.5, 0.5)
tf = t.Compose([t.ToTensor(), t.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
#tf = t.Compose([t.ToTensor(), t.Normalize(n_mean, n_std)])
data_object = datasets.CIFAR10(path, train=False, download=True, transform=tf)
print(log._ed+ "DOWNLOADING CIFAR TEST DATA")
return data_object
def Load_CIFAR_Data(data_object, batch_size):
print(log._st+ "LOADING CIFAR DATA")
tud = torch.utils.data
data = tud.DataLoader(data_object, batch_size=batch_size, shuffle=True, num_workers=2)
print(log._ed+ "LOADING CIFAR DATA")
return data
def Show_CIFAR_SAMPLE_Images(training_data):
dataiter = iter(training_data)
images, labels = dataiter.next()
num_sample_images = 5
fig, axes = plt.subplots(1, num_sample_images, figsize=(8, 6))
for i in range(0, num_sample_images):
axes[i].imshow(np.transpose((images[i,:,:,:]/2 + 0.5).numpy(), (1, 2, 0)), vmin=0, vmax=1)
axes[i].axis('off')
axes[i].set_title(classes[labels[i].item()])
print(labels[i].item())
plt.tight_layout()
plt.show()
def save_image(numpy_array, file_name):
image_name = file_name + str(".png")
tensor_array = torch.from_numpy(numpy_array)
torchvision.utils.save_image(tensor_array, image_name)
def StoreDataAsImage(cifar_data, dfolder):
try:
os.mkdir(dfolder)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
pass
file_base = "number"
'''
CIFAR training data has 938 records. Each record in CIFAR has the following
1. images of shape [64, 3, 32, 32] -> 64 handwritten digits
2. labels for images of shape [64] -> 64 label for the 64 handwritten digit images
'''
'''Full Download : ??'''
#no_records_to_store = len(cifar_data)
'''Only 64 Images Download'''
no_records_to_store = 1
#Iterate Over CIFAR DATA
for i, data in enumerate(cifar_data, 0):
if(i >= no_records_to_store):
break
images, labels = data
for j in range(len(images)):
file_name = dfolder+str("/")+file_base+"_"+str(labels[j].item())+"_"+str(i)+"_"+str(j)
'''
Pixel Values will be in range between -1 and 1
'''
n_std = 0.5
n_mean = 0.5
normalized_image = images[i,:,:,:]
denormalized_image = ((normalized_image * n_std) + n_mean).numpy()
image_np_array = np.transpose(denormalized_image, (1, 2, 0))
'''
Pixel Values will be in range between 0 and 1
'''
save_image(image_np_array, file_name)
'''
cifar_data_path = './data'
image_path = './images'
training_batch_size = 64
test_batch_size = 1000
training_object = Download_CIFAR_TrainingData(cifar_data_path)
test_object = Download_CIFAR_TestData(cifar_data_path)
training_data = Load_CIFAR_Data( training_object, training_batch_size )
test_data = Load_CIFAR_Data( test_object, test_batch_size )
Show_CIFAR_SAMPLE_Images(training_data)
#StoreDataAsImage(training_data, image_path)
'''
|
StarcoderdataPython
|
6591995
|
<filename>megfile/lib/compat.py
import sys
__all__ = ['PathLike', 'fspath']
if sys.version_info < (3, 6): # pragma: no cover
from pathlib import PurePath as PathLike
def fspath(path) -> str:
"""os.fspath replacement, useful to point out when we should replace it by the
real function once we drop py35.
"""
if hasattr(path, '__fspath__'):
return path.__fspath__()
elif isinstance(path, PathLike):
return str(path)
elif isinstance(path, bytes):
return path.decode()
elif isinstance(path, str):
return path
raise TypeError(
'expected str, bytes or PathLike object, not %s' %
type(path).__name__)
else:
from os import PathLike
from os import fspath as _fspath
def fspath(path) -> str:
result = _fspath(path)
if isinstance(result, bytes):
return result.decode()
return result
|
StarcoderdataPython
|
6500546
|
from kubernetes import client
from kubernetes.client.rest import ApiException
from .load_kube_config import kubeConfig
kubeConfig.load_kube_config()
core = client.CoreV1Api()
class K8sPods:
def get_pods(ns, logger):
try:
if ns == 'all':
logger.info ("Fetching all namespace pods data...")
pods = core.list_pod_for_all_namespaces(timeout_seconds=10)
else:
logger.info ("Fetching {} namespace pods data...".format(ns))
namespace = ns
pods = core.list_namespaced_pod(namespace, timeout_seconds=10)
return pods
except ApiException as e:
logger.info("Exception when calling CoreV1Api->list_pod_for_all_namespaces: %s\n" % e)
|
StarcoderdataPython
|
3350124
|
# Copyright 2021 - 2022 Universität Tübingen, DKFZ and EMBL
# for the German Human Genome-Phenome Archive (GHGA)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test data"""
import os
import uuid
from datetime import datetime, timezone
from pathlib import Path
from typing import Dict, List
from ghga_service_chassis_lib.object_storage_dao_testing import ObjectFixture, calc_md5
from ghga_service_chassis_lib.utils import TEST_FILE_PATHS
from pydantic.types import UUID4
from drs3 import models
from .config import DEFAULT_CONFIG
def get_study_id_example(index: int) -> str:
"Generate an example study ID."
return f"mystudy-{index}"
def get_file_id_example(index: int) -> str:
"Generate an example file ID."
return f"myfile-{index}"
class FileState:
def __init__(
self,
id: UUID4,
file_id: str,
grouping_label: str,
file_path: Path,
populate_db: bool = True,
populate_storage: bool = True,
):
"""
Initialize file state and create imputed attributes.
You may set `populate_db` or `populate_storage` to `False` to indicate that this
file should not be added to the database or the storage respectively.
"""
self.id = id
self.file_id = file_id
self.grouping_label = grouping_label
self.file_path = file_path
self.populate_db = populate_db
self.populate_storage = populate_storage
# computed attributes:
with open(self.file_path, "rb") as file:
self.content = file.read()
filename, file_extension = os.path.splitext(self.file_path)
self.md5 = calc_md5(self.content)
self.file_info = models.DrsObjectBase(
file_id=self.file_id,
md5_checksum=self.md5,
size=1000, # not the real size
creation_date=datetime.now(timezone.utc),
update_date=datetime.now(timezone.utc),
format=file_extension,
)
self.message = {
"file_id": self.file_id,
"grouping_label": self.grouping_label,
"md5_checksum": self.file_info.md5_checksum,
"size": self.file_info.size,
"creation_date": self.file_info.creation_date.isoformat(),
"update_date": self.file_info.update_date.isoformat(),
"format": self.file_info.format,
}
self.storage_objects: List[ObjectFixture] = []
if self.populate_storage:
self.storage_objects.append(
ObjectFixture(
file_path=self.file_path,
bucket_id=DEFAULT_CONFIG.s3_outbox_bucket_id,
object_id=str(self.file_id),
)
)
FILES: Dict[str, FileState] = {
"in_registry_in_storage": FileState(
id=uuid.uuid4(),
file_id=get_file_id_example(0),
grouping_label=get_study_id_example(0),
file_path=TEST_FILE_PATHS[0],
populate_db=True,
populate_storage=True,
),
"in_registry_not_in_storage": FileState(
id=uuid.uuid4(),
file_id=get_file_id_example(1),
grouping_label=get_study_id_example(1),
file_path=TEST_FILE_PATHS[1],
populate_db=True,
populate_storage=False,
),
"not_in_registry_not_in_storage": FileState(
id=uuid.uuid4(),
file_id=get_file_id_example(2),
grouping_label=get_study_id_example(2),
file_path=TEST_FILE_PATHS[2],
populate_db=False,
populate_storage=False,
),
}
|
StarcoderdataPython
|
90541
|
# %load_ext autoreload
# %autoreload 2
import numpy as np
from pyhamimports import *
from spectrum import Spectrum
import glob
from tqdm import tqdm
from subprocess import check_output
datestr = check_output(["/bin/date","+%F"])
datestr = datestr.decode().replace('\n', '')
singleTemp_dir = "resources/templates/"
SB2Temp_dir = "resources/templates_SB2/"
singleTemp_list = np.array([os.path.basename(x)
for x in glob.glob(singleTemp_dir + "*.fits")])
singleTemp_list.sort()
SB2Temp_list = np.array([os.path.basename(x)
for x in glob.glob(SB2Temp_dir + "*.fits")])
SB2Temp_list.sort()
# 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 = O, B, A, F, G, K, M, L, C, WD
single_letter_specTypes = np.array(['O', 'B', 'A', 'F', 'G', 'K', 'M', 'L', 'C', 'D'])
specTypes = np.array(['O', 'B', 'A', 'F', 'G', 'K', 'M', 'L', 'dC', 'DA'])
new_tempLines_0 = np.empty(singleTemp_list.size, dtype=int)
new_tempLines_1 = np.empty(singleTemp_list.size, dtype=np.float64)
new_tempLines_2 = np.empty(singleTemp_list.size, dtype=np.float64)
new_tempLines_3 = np.ones(singleTemp_list.size, dtype=int) * 5
new_tempLines_4 = []
for ii in range(singleTemp_list.size):
new_tempLines_0[ii] = np.where(
single_letter_specTypes == singleTemp_list[ii][0])[0][0]
if new_tempLines_0[ii] == 9:
new_tempLines_1[ii] = spec.splitSpecType(singleTemp_list[ii].replace(".fits", ""))[1]
else:
new_tempLines_1[ii] = singleTemp_list[ii][1]
if len(singleTemp_list[ii].replace("_", " ").split()) == 1:
new_tempLines_2[ii] = 0.
else:
new_tempLines_2[ii] = np.float64(
singleTemp_list[ii].replace("_", " ").split()[1])
spec = Spectrum()
ftype = None
print("Measuring lines for single star templates:")
for ii in tqdm(range(singleTemp_list.size)):
message, ftype = spec.readFile(singleTemp_dir + singleTemp_list[ii], ftype)
spec._lines = spec.measureLines()
lines = np.array(list(spec._lines.values()))[
np.argsort(list(spec._lines.keys()))]
new_tempLines_4.append(lines)
SB2_index_start = new_tempLines_0.max() + 1 # 10
new_tempLines_0 = np.append(new_tempLines_0, np.arange(
SB2_index_start, SB2_index_start + SB2Temp_list.size, step=1))
new_tempLines_1 = np.append(new_tempLines_1, np.zeros(SB2Temp_list.size))
new_tempLines_2 = np.append(new_tempLines_2, np.zeros(SB2Temp_list.size))
new_tempLines_3 = np.append(new_tempLines_3, np.ones(SB2Temp_list.size) * 5)
# new_tempLines_4 = new_tempLines_4
spec = Spectrum()
ftype = None
print("Measuring lines for SB2 templates:")
for ii, filename in enumerate(tqdm(SB2Temp_list)):
# temp_list = []
message, ftype = spec.readFile(SB2Temp_dir + filename, ftype)
measuredLines = spec.measureLines()
spec._lines = measuredLines
lines = np.array(list(spec._lines.values()))[
np.argsort(list(spec._lines.keys()))]
linesLabels = np.array(list(spec._lines.keys()))[
np.argsort(list(spec._lines.keys()))]
# temp_list.append(lines)
new_tempLines_4.append(lines)
new_tempLines = [new_tempLines_0, new_tempLines_1,
new_tempLines_2, new_tempLines_3, new_tempLines_4]
pklPath = os.path.join(spec.thisDir, 'resources',
f'tempLines_{datestr}.pickle')
with open(pklPath, 'wb') as pklFile:
pickle.dump(new_tempLines, pklFile)
|
StarcoderdataPython
|
3451712
|
import requests
response = requests.request('GET', 'https://wallhaven.cc')
print(response.reason)
|
StarcoderdataPython
|
5141851
|
<gh_stars>0
for i in range(1,10):
for j in range(1,i+1):
print("%d*%d=%2d" % (j,i,i*j),end=" ")
print("")
|
StarcoderdataPython
|
142738
|
# importiamo i pacchetti necessari
import pandas as pd
import matplotlib.pyplot as plt
# l'indirizzo da cui vogliamo scaricare la tabella
pageURL = 'https://it.wikipedia.org/wiki/Leone_d%27oro_al_miglior_film'
# facciamo scaricare la pagina direttamente a pandas, dando indizi su qual e' la tabella che ci interessa
# "match" : la tabella deve contenere la stringa "Anno"
# "header": la prima riga contiene i nomi delle colonne
tables = pd.read_html(pageURL, match='Anno', header=0)
# read_html restituisce una lista di tabelle, usiamo la prima
dataframe = tables[0]
# alcune righe non contengono l'anno (grazie Alessio!), che va preso dalla riga precedente
# dobbiamo inoltre cancellare le righe riguardanti gli anni in cui non sono stati assegnati premi
# 1 - convertiamo il dataframe in una lista di dizionari
records = dataframe.to_dict(orient='records')
# 2 - sistemiamo i record difettosi
corrected_records = []
current_year = None
for record in records:
if ('mostra non fu' in record['Film']) or ('non venne assegnato') in record['Film']:
continue
if not record['Anno'].isdigit():
corrected_record = {
'Anno' : current_year,
'Film' : record['Anno'],
'Regista' : record['Film'],
'Nazione' : record['Regista']
}
corrected_records.append(corrected_record)
else:
current_year = record['Anno']
corrected_records.append(record)
# 3 - riconvertiamo i dizionari in dataframe
dataframe = pd.DataFrame(corrected_records)
# salviamo in CSV
dataframe.to_csv('leoni.csv', index=None, quoting=1, encoding='utf8')
# pivot per contare i vincitori di ogni nazione
pivot = dataframe.groupby('Nazione').size().reset_index(name='Vincitori')
pivot = pivot.sort_values(by='Vincitori', ascending=False)
# salva CSV
pivot.to_csv('paesi_vincitori.csv', index=None, encoding='utf8')
# grafico a barre
pivot_sorted = pivot.sort_values(by='Vincitori', ascending=True)
pivot_sorted.plot.barh(x='Nazione', y='Vincitori')
plt.savefig('bars.png')
# PYTHON RULEZ
|
StarcoderdataPython
|
3505226
|
# -*- coding: utf-8 -*-
"""
coord_geog
manage geographical points, perform conversions, etc
2015.nov 0.2 mlabru pep8 style conventions
2014.nov 0.1 mlabru initial version (Linux/Python)
"""
# < imports >----------------------------------------------------------------------------------
# python library
import logging
import math
# local
import coords.coord_defs as cdf
import coords.coord_conv as cnv
# < logging >----------------------------------------------------------------------------------
# logger
M_LOG = logging.getLogger(__name__)
M_LOG.setLevel(cdf.DI_LOG_LEVEL)
# ---------------------------------------------------------------------------------------------
def __calc_gama(ff_lat_pto, ff_lng_pto, ff_lat_ref=cdf.M_REF_LAT, ff_lng_ref=cdf.M_REF_LNG):
"""
cálculo do ângulo entre a referência e o ponto
:param ff_lat_pto: latitude do ponto em graus
:param ff_lng_pto: longitude do ponto em graus
:param ff_lat_ref: latitude da referência em graus
:param ff_lng_ref: longitude da referência em graus
:returns: ângulo entre a referência e o ponto
"""
# logger
M_LOG.info(">> __calc_gama")
# check input
assert -90. <= ff_lat_pto <= 90.
assert -180. <= ff_lng_pto <= 180.
assert -90. <= ff_lat_ref <= 90.
assert -180. <= ff_lng_ref <= 180.
# verifica coincidência de pontos
if (ff_lat_pto == ff_lat_ref) and (ff_lng_pto == ff_lng_ref):
# pontos coincidentes
return 0.
# obtém as coordenadas do ponto de referência em radianos
lf_lat_ref = math.radians(ff_lat_ref)
lf_lng_ref = math.radians(ff_lng_ref)
lf_ir = math.cos(lf_lat_ref) * math.cos(lf_lng_ref)
lf_jr = math.cos(lf_lat_ref) * math.sin(lf_lng_ref)
lf_kr = math.sin(lf_lat_ref)
# obtém as coordenadas do ponto em radianos
lf_lat_pto = math.radians(ff_lat_pto)
lf_lng_pto = math.radians(ff_lng_pto)
lf_ip = math.cos(lf_lat_pto) * math.cos(lf_lng_pto)
lf_jp = math.cos(lf_lat_pto) * math.sin(lf_lng_pto)
lf_kp = math.sin(lf_lat_pto)
# distância entre a referência e o ponto
return (lf_ir * lf_ip) + (lf_jr * lf_jp) + (lf_kr * lf_kp)
# ---------------------------------------------------------------------------------------------
def decl_xyz_0(ff_x, ff_y, ff_z, ff_decl_mag):
"""
:param ff_x: DOCUMENT ME!
:param ff_y: DOCUMENT ME!
:param ff_z: DOCUMENT ME!
:param ff_decl_mag: DOCUMENT ME!
"""
# logger
M_LOG.info(">> decl_xyz_0")
M_LOG.debug("x, y, z / decl (A): {} {} {} {}".format(ff_x, ff_y, ff_z, ff_decl_mag))
# quadrante
li_quad = 0
# declinação magnética
lf_dec_mag = 0.
# ângulo e distância
lf_ang = 0.
lf_dst = 0.
# condições marginais
if (ff_x != 0.) or (ff_y != 0.):
# x = 0. ?
if 0. == ff_x:
if ff_y > 0.:
# determinação da distancia & ângulo trigonométrico
lf_dst = ff_y
lf_ang = cdf.D_RAD_PI_2
else:
# determinação da distancia & ângulo trigonométrico
lf_dst = abs(ff_y)
lf_ang = cdf.D_RAD_3PI_2
# y = 0. ?
elif ff_y == 0.:
if ff_x > 0.:
# determinação da distancia & ângulo trigonométrico
lf_dst = ff_x
lf_ang = 0.
else:
# determinação da distancia & ângulo trigonométrico
lf_dst = abs(ff_x)
lf_ang = math.pi
# senão,...
else:
# determinação do quadrante
if ff_x > 0.:
if ff_y > 0.:
li_quad = 1
else:
li_quad = 4
elif ff_y > 0.:
li_quad = 2
else:
li_quad = 3
# determinação da distancia & ângulo trigonométrico
lf_dst = math.sqrt((ff_x ** 2) + (ff_y ** 2))
lf_ang = math.atan(abs(ff_y) / abs(ff_x))
# correção do ângulo trigonométrico devido ao quadrante
if 2 == li_quad:
lf_ang = math.pi - lf_ang
elif 3 == li_quad:
lf_ang += math.pi
elif 4 == li_quad:
lf_ang = cdf.D_RAD_2PI - lf_ang
# converte o ângulo trigonométrico em radial
if lf_ang <= cdf.D_RAD_PI_2:
lf_ang = cdf.D_RAD_PI_2 - lf_ang
else:
lf_ang =(cdf.D_RAD_PI_2 * 5) - lf_ang
# converte a declinação magnética para radianos
lf_dec_mag = math.radians(ff_decl_mag)
# corrige a radial devido a declinação magnética
if ff_decl_mag < 0.:
lf_ang += lf_dec_mag
elif ff_decl_mag > 0.:
lf_ang -= lf_dec_mag
# converte a radial em ângulo trigonométrico
if lf_ang <= cdf.D_RAD_PI_2:
lf_ang = cdf.D_RAD_PI_2 - lf_ang
else:
lf_ang =(cdf.D_RAD_PI_2 * 5) - lf_ang
# calcula as novas coordenadas X e Y
ff_x = lf_dst * math.cos(lf_ang)
ff_y = lf_dst * math.sin(lf_ang)
#l_log.debug("x, y, z (D): {} {} {}".format(ff_x, ff_y, ff_z))
# return
return ff_x, ff_y, ff_z
# ---------------------------------------------------------------------------------------------
def decl_xyz(ff_x, ff_y, ff_z, ff_dcl_mag=cdf.M_DCL_MAG):
"""
negativo (O/W), gira no sentido horário
:param ff_x: DOCUMENT ME!
:param ff_y: DOCUMENT ME!
:param ff_z: DOCUMENT ME!
:param ff_decl_mag: DOCUMENT ME!
:returns: ponto declinado
"""
# logger
M_LOG.info(">> decl_xyz")
# declinação em radianos
lf_dcl_r = math.radians(abs(ff_dcl_mag))
# sin e cos da declinação
lf_dcl_sin = math.sin(lf_dcl_r)
lf_dcl_cos = math.cos(lf_dcl_r)
# declinação a leste ?
if ff_dcl_mag > 0.:
# ajuste da coordenada com a declinação magnética
ff_x = (ff_x * lf_dcl_cos) - (ff_y * lf_dcl_sin)
ff_y = (ff_y * lf_dcl_cos) + (ff_x * lf_dcl_sin)
# senão, declinação a oeste
else:
# ajuste da coordenada com a declinação magnética
ff_x = (ff_x * lf_dcl_cos) + (ff_y * lf_dcl_sin)
ff_y = (ff_y * lf_dcl_cos) - (ff_x * lf_dcl_sin)
# return
return ff_x, ff_y, ff_z
# ---------------------------------------------------------------------------------------------
def geo_azim(ff_lat_pto, ff_lng_pto, ff_lat_ref=cdf.M_REF_LAT, ff_lng_ref=cdf.M_REF_LNG):
"""
cálculo do azimute entre duas coordenadas geográficas
azimute cartesiano
000 090
270 090 180 000
180 270
:param ff_lat_pto: latitude do ponto em graus
:param ff_lng_pto: longitude do ponto em graus
:param ff_lat_ref: latitude da referência em graus
:param ff_lng_ref: longitude da referência em graus
:returns: azimute entre a referência e o ponto em radianos
"""
# logger
M_LOG.info(">> geo_azim")
# check input
assert -90. <= ff_lat_pto <= 90.
assert -180. <= ff_lng_pto <= 180.
assert -90. <= ff_lat_ref <= 90.
assert -180. <= ff_lng_ref <= 180.
# condições especiais de retorno
if (ff_lat_ref == ff_lat_pto) and (ff_lng_ref == ff_lng_pto):
# pontos coincidentes
return 0.
if (ff_lat_ref == ff_lat_pto) and (ff_lng_ref > ff_lng_pto):
# mesma linha à esquerda
return math.radians(270.)
if (ff_lat_ref == ff_lat_pto) and (ff_lng_ref < ff_lng_pto):
# mesma linha à direita
return math.radians(90.)
if (ff_lat_ref > ff_lat_pto) and (ff_lng_ref == ff_lng_pto):
# mesma coluna abaixo
return math.pi
if (ff_lat_ref < ff_lat_pto) and (ff_lng_ref == ff_lng_pto):
# mesma coluna acima
return 0.
# calcula o ângulo (rad)
lf_gama = __calc_gama(ff_lat_pto, ff_lng_pto, ff_lat_ref, ff_lng_ref)
if 1 == int(lf_gama):
lf_arc_gama = 0.
else:
lf_arc_gama = math.acos(lf_gama)
# cálculo do ângulo (rad) entre X e o ponto
lf_delta = __calc_gama(ff_lat_pto, ff_lng_pto, ff_lat_pto, ff_lng_ref)
if 1 == int(lf_delta):
lf_arc_delta = 0.
else:
lf_arc_delta = math.acos(lf_delta)
# cálculo do azimute básico
lf_aux = math.sin(lf_arc_delta) / math.sin(lf_arc_gama)
if lf_aux > 1.:
lf_aux = 1.
elif lf_aux < -1.:
lf_aux = -1.
lf_azim = math.asin(lf_aux)
li_quad = 0
# cálculo do azimute corrigido
if (ff_lat_ref < ff_lat_pto) and (ff_lng_ref < ff_lng_pto): li_quad = 1
if (ff_lat_ref < ff_lat_pto) and (ff_lng_ref > ff_lng_pto): li_quad = 4
if (ff_lat_ref > ff_lat_pto) and (ff_lng_ref > ff_lng_pto): li_quad = 3
if (ff_lat_ref > ff_lat_pto) and (ff_lng_ref < ff_lng_pto): li_quad = 2
if 2 == li_quad: lf_azim = math.pi - lf_azim
if 3 == li_quad: lf_azim = math.pi + lf_azim
if 4 == li_quad: lf_azim = (2 * math.pi) - lf_azim
# return
return lf_azim
# ---------------------------------------------------------------------------------------------
def geo_azim_bug(ff_lat_pto, ff_lng_pto, ff_lat_ref=cdf.M_REF_LAT, ff_lng_ref=cdf.M_REF_LNG):
"""
cálculo do azimute entre dois pontos geográficos
(válido par distâncias menores que 800NM)
:param ff_lat_pto: latitude do ponto em graus
:param ff_lng_pto: longitude do ponto em graus
:param ff_lat_ref: latitude da referência em graus
:param ff_lng_ref: longitude da referência em graus
:returns: azimute entre a referência e o ponto em NM
"""
# logger
M_LOG.info(">> geo_azim_bug")
# check input
assert -90. <= ff_lat_pto <= 90.
assert -180. <= ff_lng_pto <= 180.
assert -90. <= ff_lat_ref <= 90.
assert -180. <= ff_lng_ref <= 180.
# verifica coincidência de pontos
if (ff_lat_pto == ff_lat_ref) and (ff_lng_pto == ff_lng_ref):
# pontos coincidentes
return 0.
# calcula a distância em latitude (DLA)
lf_lat_dst = ff_lat_pto - ff_lat_ref
# calcula a distância em longitude (DLO)
lf_lng_dst = ff_lng_pto - ff_lng_ref
# retorna o azimute entre os pontos
return cnv.azm2ang(math.atan2(lf_lat_dst, lf_lng_dst))
# ---------------------------------------------------------------------------------------------
def geo_dist(ff_lat_pto, ff_lng_pto, ff_lat_ref=cdf.M_REF_LAT, ff_lng_ref=cdf.M_REF_LNG):
"""
cálculo da distância entre dois pontos geográficos
:param ff_lat_pto: latitude do ponto em graus
:param ff_lng_pto: longitude do ponto em graus
:param ff_lat_ref: latitude da referência em graus
:param ff_lng_ref: longitude da referência em graus
:returns: distância entre a referência e o ponto em NM
"""
# logger
M_LOG.info(">> geo_dist")
# check input
assert -90. <= ff_lat_pto <= 90.
assert -180. <= ff_lng_pto <= 180.
assert -90. <= ff_lat_ref <= 90.
assert -180. <= ff_lng_ref <= 180.
# verifica coincidência de pontos
if (ff_lat_pto == ff_lat_ref) and (ff_lng_pto == ff_lng_ref):
# pontos coincidentes
return 0.
# calcula o ângulo
lf_gama = __calc_gama(ff_lat_pto, ff_lng_pto, ff_lat_ref, ff_lng_ref)
# retorna o cálculo da distância entre a referência e o ponto
return math.acos(lf_gama) * cdf.D_EARTH_RADIUS_NM
# ---------------------------------------------------------------------------------------------
def geo_dist_2(ff_lat_pto, ff_lng_pto, ff_lat_ref=cdf.M_REF_LAT, ff_lng_ref=cdf.M_REF_LNG):
"""
cálculo da distância entre dois pontos geográficos
(válido par distâncias menores que 800NM)
:param ff_lat_pto: latitude do ponto em graus
:param ff_lng_pto: longitude do ponto em graus
:param ff_lat_ref: latitude da referência em graus
:param ff_lng_ref: longitude da referência em graus
:returns: distância entre a referência e o ponto em NM
"""
# logger
M_LOG.info(">> geo_dist_2")
# check input
assert -90. <= ff_lat_pto <= 90.
assert -180. <= ff_lng_pto <= 180.
assert -90. <= ff_lat_ref <= 90.
assert -180. <= ff_lng_ref <= 180.
# verifica coincidência de pontos
if (ff_lat_pto == ff_lat_ref) and (ff_lng_pto == ff_lng_ref):
# pontos coincidentes
return 0.
# calcula a distância em latitude
lf_lat_dst = (ff_lat_pto - ff_lat_ref) ** 2
# calcula a distância em longitude
lf_lng_dst = (ff_lng_pto - ff_lng_ref) ** 2
# retorna a distância entre a referência e o ponto
return math.sqrt(lf_lat_dst + lf_lng_dst) * cdf.D_CNV_G2NM
# ---------------------------------------------------------------------------------------------
def geo2pol(ff_lat_pto, ff_lng_pto, ff_lat_ref=cdf.M_REF_LAT, ff_lng_ref=cdf.M_REF_LNG):
"""
transforma coordenadas geográficas em coordenadas polares
:param ff_lat_pto: latitude em graus
:param ff_lng_pto: longitude em graus
:param ff_lat_ref: latitude do ponto de referência
:param ff_lng_ref: longitude do ponto de referência
:returns: coordenadas polares do ponto (azimute em graus, distância em NM)
"""
# logger
M_LOG.info(">> geo2pol")
# check input
assert -90. <= ff_lat_pto <= 90.
assert -180. <= ff_lng_pto <= 180.
assert -90. <= ff_lat_ref <= 90.
assert -180. <= ff_lng_ref <= 180.
# verifica se os pontos são coincidentes
if (ff_lat_ref == ff_lat_pto) and (ff_lng_ref == ff_lng_pto):
# ok, pontos coincidentes
return 0., 0.
# calcula o ângulo
lf_gama = math.acos(__calc_gama(ff_lat_pto, ff_lng_pto, ff_lat_ref, ff_lng_ref))
# calcula a distância
lf_dist = lf_gama * cdf.D_EARTH_RADIUS_NM
# calcula o ângulo
lf_delta = math.acos(__calc_gama(ff_lat_pto, ff_lng_ref, ff_lat_pto, ff_lng_pto))
# verificação do quadrante
li_quad = -1
if (ff_lat_ref < ff_lat_pto) and (ff_lng_ref == ff_lng_pto):
lf_azim = 0.
elif (ff_lat_ref == ff_lat_pto) and (ff_lng_ref < ff_lng_pto):
lf_azim = 90.
elif (ff_lat_ref > ff_lat_pto) and (ff_lng_ref == ff_lng_pto):
lf_azim = 180.
elif (ff_lat_ref == ff_lat_pto) and (ff_lng_ref > ff_lng_pto):
lf_azim = 270.
elif (ff_lat_ref < ff_lat_pto) and (ff_lng_ref < ff_lng_pto):
li_quad = 1
elif (ff_lat_ref > ff_lat_pto) and (ff_lng_ref < ff_lng_pto):
li_quad = 2
elif (ff_lat_ref > ff_lat_pto) and (ff_lng_ref > ff_lng_pto):
li_quad = 3
elif (ff_lat_ref < ff_lat_pto) and (ff_lng_ref > ff_lng_pto):
li_quad = 4
if -1 != li_quad:
# seno do azimute
lf_sin_azm = math.sin(lf_delta) / math.sin(lf_gama)
# calculo do azimute
if lf_sin_azm > 1.:
lf_sin_azm = 1.
elif lf_sin_azm < -1.:
lf_sin_azm = -1.
lf_azim = math.degrees(math.asin(lf_sin_azm))
if 2 == li_quad:
lf_azim = 180. - lf_azim
elif 3 == li_quad:
lf_azim = 180. + lf_azim
elif 4 == li_quad:
lf_azim = 360. - lf_azim
if lf_azim >= 360.:
lf_azim -= 360.
elif lf_azim < 0:
lf_azim += 360.
# return
return lf_azim, lf_dist
# ---------------------------------------------------------------------------------------------
def geo2xy(ff_lat_pto, ff_lng_pto, ff_lat_ref=cdf.M_REF_LAT, ff_lng_ref=cdf.M_REF_LNG):
"""
transforma coordenadas geográficas em coordenadas cartesianas
:param ff_lat_pto: latitude em graus
:param ff_lng_pto: longitude em graus
:param ff_lat_ref: latitude do ponto de referência
:param ff_lng_ref: longitude do ponto de referência
:returns: coordenadas polares do ponto (azimute, distância em NM)
"""
# logger
M_LOG.info(">> geo2xy")
# check input
assert -90. <= ff_lat_pto <= 90.
assert -180. <= ff_lng_pto <= 180.
assert -90. <= ff_lat_ref <= 90.
assert -180. <= ff_lng_ref <= 180.
# converte de geográfica para polar
lf_azim, lf_dist = geo2pol(ff_lat_pto, ff_lng_pto, ff_lat_ref, ff_lng_ref)
# converte de polar para cartesiana
lf_x = lf_dist * math.sin(math.radians(lf_azim))
lf_y = lf_dist * math.cos(math.radians(lf_azim))
# correção das coordenadas X e Y devido ao efeito da declinação magnetica
# lf_x, lf_y = decl_xyz(lf_x, lf_y, lf_z, f_ref.f_dcl_mag)
# return
return lf_x, lf_y
# ---------------------------------------------------------------------------------------------
def geo2xy_2(ff_lat_pto, ff_lng_pto, ff_lat_ref=cdf.M_REF_LAT, ff_lng_ref=cdf.M_REF_LNG):
"""
conversão de coordenadas geográficas
:param ff_lat_pto: latitude em graus
:param ff_lng_pto: longitude em graus
:param ff_lat_ref: coordenadas geográficas de referênica
:param ff_lng_ref: coordenadas geográficas de referênica
:returns: coordenadas X e Y do ponto
"""
# logger
M_LOG.info(">> geo2xy_2")
# check input
assert -90. <= ff_lat_pto <= 90.
assert -180. <= ff_lng_pto <= 180.
assert -90. <= ff_lat_ref <= 90.
assert -180. <= ff_lng_ref <= 180.
# cálculo da distância e do azimute geográficos do ponto
l_vd = geo_dist(ff_lat_pto, ff_lng_pto, ff_lat_ref, ff_lng_ref)
l_vr = geo_azim(ff_lat_pto, ff_lng_pto, ff_lat_ref, ff_lng_ref)
# converte o azimute para ângulo em radianos
l_vr = math.radians(cnv.azm2ang(math.degrees(l_vr)))
# cálculo das coordenadas X & Y do ponto
lf_x = l_vd * math.cos(l_vr)
lf_y = l_vd * math.sin(l_vr)
# existe declinação magnética ?
# if 0. != f_ref.f_dcl_mag:
# correção das coordenadas X e Y devido ao efeito da declinação magnética
# decl_xy(f_ref.f_dcl_mag)
# return x & y
return lf_x, lf_y
# ---------------------------------------------------------------------------------------------
def geo2xyz_3(ff_lat_pto, ff_lng_pto, ff_alt=0.):
"""
geodetic coordinates(latitude, longitude, height) can be converted into XY
"""
# logger
M_LOG.info(">> geo2xyz_3")
# check input
assert -90. <= ff_lat_pto <= 90.
assert -180. <= ff_lng_pto <= 180.
# calcula x
lf_x = (ff_lng_pto - float(cdf.M_REF_LNG)) * cdf.D_CNV_GR2M
# calcula y
lf_y = (ff_lat_pto - float(cdf.M_REF_LAT)) * cdf.D_CNV_GR2M
# elevação
lf_z = ff_alt
# retorna as coordenadas xyz
return lf_x, lf_y, lf_z
# ---------------------------------------------------------------------------------------------
def pol2xyz(ff_azim, ff_dist):
"""
transforma coordenadas polares em coordenadas cartesianas
:param ff_azim: azimute
:param ff_dist: distância
:returns: coordenadas cartesianas do ponto
"""
# logger
M_LOG.info(">> pol2xyz")
# check input
assert 0. <= ff_azim <= 360.
# converte a distância para metros
lf_dst = ff_dist
# converte a radial para ângulo trigonométrico
lf_azim = math.radians(cnv.azm2ang(math.degrees(ff_azim)))
# converte a distância e ângulo em X e Y
lf_x = lf_dst * math.cos(lf_azim)
lf_y = lf_dst * math.sin(lf_azim)
# return
return lf_x, lf_y, 0.
# ---------------------------------------------------------------------------------------------
def xyz2geo_3(ff_x, ff_y, ff_z=0.):
"""
conversão de coordenadas geográficas
geodetic coordinates (latitude, longitude, height) can be converted into xyz.
:param ff_x: coordenada x do ponto
:param ff_y: coordenada y do ponto
:param ff_z: coordenada z do ponto
"""
# logger
M_LOG.info(">> xyz2geo_3")
# calcula latitude
lf_lat = float(cdf.M_REF_LAT) + (ff_y / cdf.D_CNV_GR2M)
# calcula longitude
lf_lng = float(cdf.M_REF_LNG) + (ff_x / cdf.D_CNV_GR2M)
# calcula altitude
lf_alt = ff_z
# retorna as coordenadas lat/long
return lf_lat, lf_lng, lf_alt
# < the end >----------------------------------------------------------------------------------
|
StarcoderdataPython
|
8120033
|
from django.apps import AppConfig
class ServertestConfig(AppConfig):
name = 'servertest'
|
StarcoderdataPython
|
3310262
|
<reponame>eladshabi/banias<filename>src/test/Publish_events.py
from google.cloud import pubsub_v1
import json
import time
publisher = pubsub_v1.PublisherClient()
topic_path = publisher.topic_path("elad-playground", "Autofleer_Topic")
f = open("/src/test/test_events.txt", "r")
event = f.readline()
json_file = json.loads(event)
for event_num in range (1, 3):
for action_id in range (0, 20):
json_file['Event']['type']['event_version'] = str(event_num)
json_file['Event']['payload']['fleet_id'] = action_id
publisher.publish(topic_path, data=str(json_file).encode("utf-8"))
time.sleep(90)
|
StarcoderdataPython
|
341116
|
<reponame>bcgov/CIT
from .test_opportunity import *
|
StarcoderdataPython
|
9676782
|
from django.db import models
from django.forms import model_to_dict
from . import choices
class DateTime(models.Model):
year = models.PositiveSmallIntegerField(
default=None,
blank=True,
null=True,
)
month = models.PositiveSmallIntegerField(
default=None,
blank=True,
null=True,
)
day = models.PositiveSmallIntegerField(
default=None,
blank=True,
null=True,
)
hour = models.PositiveSmallIntegerField(
default=None,
blank=True,
null=True,
)
minute = models.PositiveSmallIntegerField(
default=None,
blank=True,
null=True,
)
qualifiers = models.CharField(
max_length=4,
choices=choices.QUALIFIERS_CHOICES,
default='EXC',
)
calendar = models.CharField(
max_length=9,
choices=choices.CALENDAR_CHOICES,
default='Gregorian',
)
link = models.PositiveSmallIntegerField(
default=0,
)
# unique_together = ((
# 'year',
# 'month',
# 'day',
# 'hour',
# 'minute',
# 'qualifiers',
# 'calendar',
# ),)
# between = models.OneToOneField(
# 'DateTime',
# on_delete=models.CASCADE,
# related_name='datetime_between',
# blank=True,
# null=True,
# default=None,
# )
@property
def datetime(self):
return model_to_dict(
DateTime.objects.get(pk=self.pk),
['year', 'month', 'day', 'hour', 'minute'],
)
@datetime.setter
def datetime(self, kwargs):
check_is_data = None
if kwargs:
kwargs.update({key: None for key, value in kwargs.items() if value == ''})
check_is_data = tuple(value for key, value in kwargs.items() if value is not None)
if check_is_data:
self.year = kwargs.get('year')
self.month = kwargs.get('month')
self.day = kwargs.get('day')
self.hour = kwargs.get('hour')
self.minute = kwargs.get('minute')
# if self.pk:
# # datetime = DateTime.objects.get(pk=self.pk)
# self.objects.update(
# day=value.get('day'),
# month=value.get('month'),
# year=value.get('year'),
# hour=value.get('hour'),
# minute=value.get('minute'),
# )
# else:
# DateTime.objects.create(
# day=value.get('day'),
# month=value.get('month'),
# year=value.get('year'),
# hour=value.get('hour'),
# minute=value.get('minute'),
# )
# elif instance_pk:
# DateTime.objects.get(pk=instance_pk).delete()
#
# def __str__(self):
# if self.year and self.month and self.day:
# return '%s' % (date(self.year, self.month, self.day).strftime('%d %b %Y'))
# # year = month = day = None
#
# if self.year is None:
# year = '----'
# else:
# year = self.year
#
# if self.month is None:
# month = '--'
# else:
# month = self.month
#
# if self.day is None:
# day = '--'
# else:
# day = self.day
#
# return '%s %s %s' % (day, month, year)
|
StarcoderdataPython
|
11242818
|
<gh_stars>10-100
#! /usr/bin/env python
# Copyright 2014-2017 <NAME> <<EMAIL>>
# Licensed under the GNU General Public License version 3 or higher
# I don't use the ez_setup module because it causes us to automatically build
# and install a new setuptools module, which I'm not interested in doing.
from setuptools import setup
setup (
name = 'bibtools',
version = '0.3.0.99',
# This package actually *is* zip-safe, but I've run into issues with
# installing it as a Zip: in particular, the install sometimes fails with
# "bad local file header", and backtraces don't include source lines.
# These are annoying enough and I don't really care so we just install it
# as flat files.
zip_safe = False,
packages = ['bibtools', 'bibtools.hacked_bibtexparser'],
install_requires = [
'pwkit >= 0.8.0',
'six >= 1.10',
],
package_data = {
'bibtools': ['*.sql', 'apj-issnmap.txt', 'defaults.cfg'],
},
entry_points = {
'console_scripts': ['bib = bibtools.cli:commandline'],
},
author = '<NAME>',
author_email = '<EMAIL>',
description = 'Command-line bibliography manager',
license = 'GPLv3',
keywords = 'bibliography',
url = 'https://github.com/pkgw/bibtools/',
)
|
StarcoderdataPython
|
3267401
|
# -*- coding: utf-8 -*-
def test_all_contains_only_valid_names():
import pycamunda.filter
for name in pycamunda.filter.__all__:
getattr(pycamunda.filter, name)
|
StarcoderdataPython
|
6676171
|
<reponame>TommasoPino/oem
import numpy as np
import warnings
from oem.tools import epoch_span_overlap, epoch_span_contains, time_range
REFERENCE_FRAMES = {
"inertial": ["EME2000", "GCRF", "ICRF", "MCI", "TEME", "TOD"],
"rotating": ["GRC", "ITRF2000", "ITRF-93", "ITRF-97", "TDR"]
}
class EphemerisCompare(object):
"""Comparison of two OrbitEphemerisMessage.
Only overlapping segments with identical reference frame and central bodies
will be compared. All comparisons are calculated in the segment reference
frame. Rotating reference frames are not supported for velocity-based or
RIC comparisons.
Attributes:
is_empty (bool): Flag indicating overlap between compared ephemerides.
Set to True if there is no overlap.
segments (list): List of SegmentCompare for matching EphemerisSegment
with overlapping spans.
Examples:
A comparison of two ephemerides is simply achieved through either a
direct call to EphemerisCompare or the subtraction interface on
the OrbitEphemerisMessage class. In general, the subtraction interface
is preferred.
>>> ephemeris1 = OrbitEphemerisMessage.open(file_path1)
>>> ephemeris2 = OrbitEphemerisMessage.open(file_path2)
>>> compare = ephemeris2 - ephemeris1
The EphemerisCompare object supports most of the same basic interfaces
as OrbitEphemerisMessage. To evaluate the at a particular epoch:
>>> compare(epoch)
To iterate through the compare at a fixed interval, use the `.steps`
method:
>>> for state_compare in compare.steps(60):
... # Operate on StateCompare
... pass
For multi-segment ephemerides, EphemerisCompare is iterable:
>>> for segment_compare in compare:
... for state_compare in segment_compare:
... pass
"""
def __init__(self, origin, target):
"""Create an EphemerisCompare.
Args:
origin (OrbitEphemerisMessage): Ephemeris at the origin of the
compare frame.
target (OrbitEphemerisMessage): Ephemeris compared against origin.
"""
segments = []
for origin_segment in origin:
for target_segment in target:
try:
segments.append(target_segment - origin_segment)
except ValueError:
continue
self._segments = [entry for entry in segments if not entry.is_empty]
def __call__(self, epoch):
for segment in self:
if epoch in segment:
return segment(epoch)
else:
raise ValueError(
f"Epoch {epoch} not contained in EphemerisCompare."
)
def __iter__(self):
return iter(self._segments)
def __contains__(self, epoch):
return any(epoch in segment for segment in self._segments)
def __repr__(self):
return f"EphemerisCompare(segments: {len(self.segments)})"
def steps(self, step_size):
"""Sample EphemerisCompare at equal time intervals.
This method returns a generator producing state compares at equal time
intervals spanning the useable duration of the parent EphemerisCompare.
Args:
step_size (float): Sample step size in seconds.
Yields:
state_compare: Sampled StateCompare.
"""
for segment in self:
for state in segment.steps(step_size):
yield state
@property
def is_empty(self):
return len(self._segments) == 0
@property
def segments(self):
return self._segments
class SegmentCompare(object):
"""Comparison of two EphemerisSegment.
Input segments must have identical reference frames and central bodies.
All comparisons are calculated in the input segment reference frame.
Rotating reference frames are not supported for velocity-based or
RIC comparisons.
Attributes:
is_empty (bool): Flag indicating overlap between compared segments. Set
to True if there is no overlap.
"""
def __init__(self, origin, target):
"""Create a SegmentCompare.
Args:
origin (EphemerisSegment): Segment at the origin of the
compare frame.
target (EphemerisSegment): Segment to compare against the
origin state.
"""
if (origin.metadata["REF_FRAME"] == target.metadata["REF_FRAME"]
and origin.metadata["CENTER_NAME"]
== target.metadata["CENTER_NAME"]):
self._span = epoch_span_overlap(origin.span, target.span)
self._origin = origin
self._target = target
else:
raise ValueError(
"Incompatible states: frame or central body mismatch."
)
def __contains__(self, epoch):
return (
self._span is not None and epoch_span_contains(self._span, epoch)
)
def __call__(self, epoch):
if epoch not in self:
raise ValueError(f"Epoch {epoch} not contained in SegmentCompare.")
return self._target(epoch) - self._origin(epoch)
def __repr__(self):
return f"SegmentCompare({str(self._span[0])}, {str(self._span[1])})"
def steps(self, step_size):
"""Sample SegmentCompare at equal time intervals.
This method returns a generator producing state compares at equal time
intervals spanning the useable duration of the parent SegmentCompare.
Args:
step_size (float): Sample step size in seconds.
Yields:
state_compare: Sampled StateCompare.
"""
for epoch in time_range(*self._span, step_size):
yield self(epoch)
@property
def is_empty(self):
return self._span is None
class StateCompare(object):
"""Comparison of two Cartesian states.
Input states must have identical epochs, reference frames, and central
bodies. All comparisons are calculated in the input state reference frame.
Rotating reference frames are not supported for velocity-based or
RIC comparisons.
Attributes:
epoch (Time): Epoch of the state compare.
range (float): Absolute distance between the two states.
range_rate (float): Absolute velocity between the two states.
position (ndarray): Relative position vector in the input frame.
velocity (ndarray): Relative velocity vector in the input frame.
position_ric (ndarray): Relative position vector in the RIC frame.
velocity_ric (ndarray): Relative velocity vector in the RIC frame.
Examples:
To compare two states, `origin` and `target`, either call the
StateCompare initializer directly
>>> compare = StateCompare(origin, target)
or simply difference the two states
>>> compare = origin - target
"""
def __init__(self, origin, target):
"""Create a StateCompare.
Args:
origin (State): State at the origin of the compare frame.
target (State): State to compare against the origin state.
Raises:
ValueError: Incompatible states: epoch, frame, or central
body mismatch.
"""
if (origin.epoch == target.epoch
and origin.frame == target.frame
and origin.center == target.center):
self._origin = origin
self._target = target
if self._origin.frame.upper() in REFERENCE_FRAMES["inertial"]:
self._inertial = True
elif self._origin.frame.upper() in REFERENCE_FRAMES["rotating"]:
self._inertial = False
else:
warnings.warn(
f"Nonstandard frame: '{self._origin.frame}'. "
"Assuming intertial. Override with ._inertial=False",
UserWarning
)
self._inertial = True
else:
raise ValueError(
"Incompatible states: epoch, frame, or central body mismatch."
)
def __repr__(self):
return f"StateCompare({str(self.epoch)})"
def _require_inertial(self):
if not self._inertial:
raise NotImplementedError(
"Velocity compares not supported for non-inertial frames. "
"To override, set ._inertial=True."
)
def _to_ric(self, vector):
self._require_inertial()
cross_track = np.cross(self._origin.position, self._origin.velocity)
in_track = np.cross(cross_track, self._origin.position)
R = np.array([
self._origin.position/np.linalg.norm(self._origin.position),
in_track/np.linalg.norm(in_track),
cross_track/np.linalg.norm(cross_track)
])
return R.dot(vector)
@property
def epoch(self):
return self._origin.epoch
@property
def range(self):
return np.linalg.norm(self._target.position - self._origin.position)
@property
def range_rate(self):
self._require_inertial()
return np.linalg.norm(self._target.velocity - self._origin.velocity)
@property
def position(self):
return self._target.position - self._origin.position
@property
def velocity(self):
self._require_inertial()
return self._target.velocity - self._origin.velocity
@property
def position_ric(self):
return self._to_ric(self.position)
@property
def velocity_ric(self):
w = self._to_ric(
np.cross(self._origin.position, self._origin.velocity)
/ np.linalg.norm(self._origin.position)**2
)
return self._to_ric(self.velocity) - np.cross(w, self.position_ric)
|
StarcoderdataPython
|
1774942
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from testrail_yak import Milestone
from testrail_yak.lib.testrail import APIClient
from tests import BASEURL, reqmock
client = APIClient(BASEURL)
m = Milestone(client)
def test_get(reqmock):
milestone_id = 1
reqmock.get(f"{BASEURL}/index.php?/api/v2/get_milestone/{milestone_id}",
status_code=200,
text='''{
"completed_on": 1389968184,
"description": "...",
"due_on": 1391968184,
"id": 1,
"is_completed": true,
"name": "Release 1.5",
"project_id": 1,
"url": "http:///testrail/index.php?/milestones/view/1"
}''')
res = m.get(milestone_id=milestone_id)
assert res is not None
assert type(res) == dict
assert "completed_on" in res.keys()
assert "description" in res.keys()
assert "due_on" in res.keys()
assert "id" in res.keys()
assert "is_completed" in res.keys()
assert "name" in res.keys()
assert "project_id" in res.keys()
assert "url" in res.keys()
def test_get_all(reqmock):
project_id = 1
reqmock.get(f"{BASEURL}/index.php?/api/v2/get_milestones/{project_id}",
status_code=200,
text='''[{
"completed_on": 1389968184,
"description": "...",
"due_on": 1391968184,
"id": 1,
"is_completed": true,
"name": "Release 1.5",
"project_id": 1,
"url": "http:///testrail/index.php?/milestones/view/1"
}]''')
res = m.get_all(project_id=project_id)
assert res is not None
assert type(res) == list
assert type(res[0]) == dict
def test_add(reqmock):
project_id = 1
reqmock.post(f"{BASEURL}/index.php?/api/v2/add_milestone/{project_id}",
status_code=200,
text='''{
"completed_on": 1389968184,
"description": "...",
"due_on": 1391968184,
"id": 1,
"is_completed": true,
"name": "Release 1.5",
"project_id": 1,
"url": "http:///testrail/index.php?/milestones/view/1"
}''')
data = {"name": "Release 1.5", "description": "...", "due_on": 1391968184}
res = m.add(project_id=project_id, data=data)
assert res is not None
def test_update(reqmock):
milestone_id = 1
reqmock.post(f"{BASEURL}/index.php?/api/v2/update_milestone/{milestone_id}",
status_code=200,
text='''{
"completed_on": 1389968184,
"description": "...",
"due_on": 1391968184,
"id": 1,
"is_completed": true,
"name": "Release 1.5",
"project_id": 1,
"url": "http:///testrail/index.php?/milestones/view/1"
}''')
data = {"name": "Release 1.5", "description": "...", "due_on": 1391968184}
res = m.update(milestone_id=milestone_id, data=data)
assert res is not None
def test_delete(reqmock):
milestone_id = 1
reqmock.post(f"{BASEURL}/index.php?/api/v2/delete_milestone/{milestone_id}",
status_code=200,
text='')
res = m.delete(milestone_id=milestone_id)
assert res is not None
|
StarcoderdataPython
|
4877771
|
<filename>genienlp/paraphrase/model_utils.py
import torch
import math
import os
import glob
import re
import logging
import shutil
import numpy as np
from .transformers_utils import SPIECE_UNDERLINE
from genienlp.metrics import computeBLEU
logger = logging.getLogger(__name__)
def sort_checkpoints(output_dir):
return list(sorted(glob.glob(os.path.join(output_dir, "checkpointepoch=*.ckpt"), recursive=True)))
def get_transformer_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, dimension):
num_warmup_steps = max(1, num_warmup_steps)
def lr_lambda(current_step):
current_step += 1
return 1. / math.sqrt(dimension) * min(1 / math.sqrt(current_step), current_step / (num_warmup_steps * math.sqrt(num_warmup_steps)))
return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda)
def _rotate_checkpoints(args, checkpoint_prefix, use_mtime=False):
if not args.save_total_limit:
return
if args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
glob_checkpoints = glob.glob(os.path.join(args.output_dir, '{}-*'.format(checkpoint_prefix)))
if len(glob_checkpoints) <= args.save_total_limit:
return
ordering_and_checkpoint_path = []
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match('.*{}-([0-9]+)'.format(checkpoint_prefix), path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - args.save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
shutil.rmtree(checkpoint)
def compute_metrics(generations, golds, reduction='average'):
"""
Inputs:
generations: a list of list of strings; generations[i] is a list of all generated outputs of the model for example i
golds: a list of strings; golds[i] is the gold answer for example i
reduction: how we should compute an example's metrics from its multiple generations
"""
total_bleu = 0.0
# all_bleu = []
total_exact_match = 0.0
count = 0.0
for idx, output in enumerate(generations):
bleu_score = 0.0
exact_match = 0.0
for sample in output:
if reduction == 'average':
bleu_score += computeBLEU([sample], [[golds[idx]]])
else:
bleu_score = max(bleu_score, computeBLEU([sample], [[golds[idx]]]))
if re.sub('\s+', '', sample).lower() == re.sub('\s+', '', golds[idx]).lower():
if reduction == 'average':
exact_match += 1
else:
exact_match = max(exact_match, 1)
if reduction == 'average':
bleu_score /= len(output)
exact_match /= len(output)
total_bleu += bleu_score
total_exact_match += exact_match
count += 1
return {'bleu': total_bleu/count, 'em': total_exact_match/count*100}
def compute_attention(sample_layer_attention, att_pooling):
sample_layer_attention_pooled = None
if att_pooling == 'mean':
sample_layer_attention_pooled = torch.mean(sample_layer_attention, dim=0, keepdim=False)
elif att_pooling == 'max':
sample_layer_attention_pooled = torch.max(sample_layer_attention, dim=0, keepdim=False)[0]
return sample_layer_attention_pooled
def replace_quoted_params(src_tokens, tgt_tokens, tokenizer, sample_layer_attention_pooled, model_type, tgt_lang):
# find positions of quotation marks in src and tgt
src2tgt_mapping = {}
src2tgt_mapping_index = {}
## FIXED: quotation marks are exclusively used to wrap parameters so just check if they are present in target token
# quote_wordpiece = tokenizer.tokenize('"')[0]
# quote_token = '"'
src_quotation_symbols = ['"']
tgt_quotation_symbols = ['"']
if tgt_lang == 'ru':
tgt_quotation_symbols.extend(['«', '»'])
src_spans_ind = [index for index, token in enumerate(src_tokens) if
any([symbol in token for symbol in src_quotation_symbols])]
tgt_spans_ind = [index for index, token in enumerate(tgt_tokens) if
any([symbol in token for symbol in tgt_quotation_symbols])]
if model_type == 'marian':
src_strings = tokenizer.spm_source.DecodePieces(src_tokens)
tgt_strings = tokenizer.spm_target.DecodePieces(tgt_tokens)
else:
src_strings = tokenizer.convert_tokens_to_string(src_tokens)
tgt_strings = tokenizer.convert_tokens_to_string(tgt_tokens)
if len(src_spans_ind) % 2 != 0:
logging.error('corrupted span in src string: [{}]'.format(src_strings))
return tgt_strings, False
if len(tgt_spans_ind) % 2 != 0:
logging.error('corrupted span in tgt string: [{}] with src string: [{}]\n'
'outputting example without reverting the parameter'.format(tgt_strings, src_strings))
return tgt_strings, False
# arrange spans and exclude quotation mark indices
src_spans = [(src_spans_ind[i] + 1, src_spans_ind[i + 1] - 1) for i in range(0, len(src_spans_ind), 2)]
tgt_spans = [(tgt_spans_ind[i] + 1, tgt_spans_ind[i + 1] - 1) for i in range(0, len(tgt_spans_ind), 2)]
if len(src_spans) != len(tgt_spans):
logging.error('numbers of spans in src and tgt strings do not match: [{}], [{}]\n'
'outputting example without reverting the parameter'.format(src_strings, tgt_strings))
return tgt_strings, False
tgt_span_success = set()
for src_idx, (beg, end) in enumerate(src_spans):
i = beg
tgt_span_idx = None
while i <= end:
max_tgt_att_idx = torch.argmax(sample_layer_attention_pooled[:, i]).item()
# find span in tgt that contains this index
for tgt_idx, (s1, s2) in enumerate(tgt_spans):
if s1 <= max_tgt_att_idx <= s2 and (s1, s2) not in tgt_span_success:
tgt_span_idx = tgt_idx
src2tgt_mapping[(beg, end)] = (s1, s2)
src2tgt_mapping_index[src_idx] = tgt_span_idx
tgt_span_success.add((s1, s2))
break
if tgt_span_idx is not None:
break
else:
# span could not be found; check the next wordpiece
i += 1
if tgt_span_idx is None:
logger.error(
'Could not find a corresponding span in tgt for ({}, {}) src span in src string: [{}]'.format(beg, end,
src_strings))
return tgt_strings, False
####
# replacing in word-piece space is not clean since Marian uses different spm models for src and tgt
####
# # replace property values (wrapped in quotation marks) in target text with source values
# tgt2src_mapping = {v: k for k, v in src2tgt_mapping.items()}
# tgt_begin2span = {k[0]: k for k, v in tgt2src_mapping.items()}
# all_tgt_begins = set(tgt_begin2span.keys())
#
# new_tgt_tokens = []
# i = 0
# while i < len(tgt_tokens):
# if i in all_tgt_begins:
# tgt_span = tgt_begin2span[i]
# src_span = tgt2src_mapping[tgt_span]
# new_tgt_tokens.extend(src_tokens[src_span[0]: src_span[1]+1])
# i += tgt_span[1] - tgt_span[0] + 1
# else:
# new_tgt_tokens.append(tgt_tokens[i])
# i += 1
# final_output = tokenizer.convert_tokens_to_ids(new_tgt_tokens)
src_quoted_pattern_maybe_space = re.compile(r'[{0}]\s?([^{0}]*?)\s?[{0}]'.format(''.join(src_quotation_symbols)))
tgt_quoted_pattern_maybe_space = re.compile(r'[{0}]\s?([^{0}]*?)\s?[{0}]'.format(''.join(tgt_quotation_symbols)))
src_matches = list(re.finditer(src_quoted_pattern_maybe_space, src_strings))
tgt_matches = list(re.finditer(tgt_quoted_pattern_maybe_space, tgt_strings))
tgt2src_mapping_index = {v: k for k, v in src2tgt_mapping_index.items()}
# move through characters
tokens = []
curr = 0
for pos, match in enumerate(tgt_matches):
start, end = match.span()
if start > curr:
tokens.append(tgt_strings[curr:start])
replace_match = src_matches[tgt2src_mapping_index[pos]]
tokens.append(replace_match.group(0))
curr = end
if curr < len(tgt_strings):
tokens.append(tgt_strings[curr:])
text = ' '.join(tokens)
return text, True
def force_replace_quoted_params(src_tokens, tgt_tokens, tokenizer, sample_layer_attention_pooled, model_type):
# find positions of quotation marks in src
src2tgt_mapping = {}
src_spans_ind = [index for index, token in enumerate(src_tokens) if '"' in token]
tgt_is_piece = [1 if token[0] == SPIECE_UNDERLINE else 0 for token in tgt_tokens]
tgt_piece2word_mapping = list(np.cumsum(tgt_is_piece) - 1)
if len(src_spans_ind) % 2 != 0:
logging.error('corrupted span in src string: [{}]'.format(tokenizer.spm_source.DecodePieces(src_tokens)))
# this almost never happens but if it does it is usually because quotation is missing from the end of src_tokens
# we temporary fix this by adding '"' to the end of src_tokens
src_tokens += tokenizer.tokenize('"')
src_spans_ind = [index for index, token in enumerate(src_tokens) if '"' in token]
if model_type == 'marian':
src_strings = tokenizer.spm_source.DecodePieces(src_tokens)
tgt_strings = tokenizer.spm_target.DecodePieces(tgt_tokens)
else:
src_strings = tokenizer.convert_tokens_to_string(src_tokens)
tgt_strings = tokenizer.convert_tokens_to_string(tgt_tokens)
# arrange spans and exclude quotation mark indices
src_spans = [(src_spans_ind[i] + 1, src_spans_ind[i + 1] - 1) for i in range(0, len(src_spans_ind), 2)]
for src_idx, (beg, end) in enumerate(src_spans):
s1 = torch.argmax(sample_layer_attention_pooled[:, beg]).item()
s2 = torch.argmax(sample_layer_attention_pooled[:, end]).item()
# clamp values to max tgt_tokens length
s1 = min(s1, len(tgt_tokens) - 1)
s2 = min(s2, len(tgt_tokens) - 1)
src2tgt_mapping[(beg, end)] = (s1, s2)
quoted_pattern_maybe_space = re.compile(r'\"\s?([^"]*?)\s?\"')
src_matches = list(re.finditer(quoted_pattern_maybe_space, src_strings))
# update src2tgt_mapping to map to word indices in response
for key, value in src2tgt_mapping.items():
s1, s2 = value
try:
src2tgt_mapping[key] = (
max(0, tgt_piece2word_mapping[s1] - 1), min(tgt_piece2word_mapping[s2] + 1, len(tgt_tokens)))
except:
raise ValueError('corrupted span in tgt string: [{}] with src string: [{}]\n'
'outputting example without reverting the parameter'.format(tgt_strings, src_strings))
# move through words
tgt_strings_words = tgt_strings.split(' ')
tokens = []
curr = 0
for i, (key, value) in enumerate(src2tgt_mapping.items()):
start, end = value
if start > curr:
tokens.extend(tgt_strings_words[curr:start])
replace_match = src_matches[i]
tokens.append(replace_match.group(0))
curr = end
if curr < len(tgt_strings_words):
tokens.extend(tgt_strings_words[curr:])
text = ' '.join(tokens)
return text
|
StarcoderdataPython
|
3302194
|
<filename>halotools/mock_observables/void_statistics/void_prob_func.py
"""
Module containing the `~halotools.mock_observables.void_prob_func`
and `~halotools.mock_observables.underdensity_prob_func` used to calculate void statistics.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from six.moves import xrange as range
from astropy.utils.misc import NumpyRNGContext
from ..pair_counters import npairs_per_object_3d
from ...utils.array_utils import array_is_monotonic
from ...custom_exceptions import HalotoolsError
__all__ = ('void_prob_func', )
__author__ = ['<NAME>', '<NAME>']
np.seterr(divide='ignore', invalid='ignore') # ignore divide by zero in e.g. DD/RR
def void_prob_func(sample1, rbins, n_ran=None, random_sphere_centers=None,
period=None, num_threads=1,
approx_cell1_size=None, approx_cellran_size=None, seed=None):
"""
Calculate the void probability function (VPF), :math:`P_0(r)`,
defined as the probability that a random
sphere of radius *r* contains zero points in the input sample.
See the :ref:`mock_obs_pos_formatting` documentation page for
instructions on how to transform your coordinate position arrays into the
format accepted by the ``sample1`` argument.
See also :ref:`galaxy_catalog_analysis_tutorial8`
Parameters
----------
sample1 : array_like
Npts1 x 3 numpy array containing 3-D positions of points.
See the :ref:`mock_obs_pos_formatting` documentation page, or the
Examples section below, for instructions on how to transform
your coordinate position arrays into the
format accepted by the ``sample1`` and ``sample2`` arguments.
Length units are comoving and assumed to be in Mpc/h, here and throughout Halotools.
rbins : float
size of spheres to search for neighbors
Length units are comoving and assumed to be in Mpc/h, here and throughout Halotools.
n_ran : int, optional
integer number of randoms to use to search for voids.
If ``n_ran`` is not passed, you must pass ``random_sphere_centers``.
random_sphere_centers : array_like, optional
Npts x 3 array of randomly selected positions to drop down spheres
to use to measure the `void_prob_func`. If ``random_sphere_centers``
is not passed, ``n_ran`` must be passed.
period : array_like, optional
Length-3 sequence defining the periodic boundary conditions
in each dimension. If you instead provide a single scalar, Lbox,
period is assumed to be the same in all Cartesian directions.
If set to None, PBCs are set to infinity. In this case, it is still necessary
to drop down randomly placed spheres in order to compute the VPF. To do so,
the spheres will be dropped inside a cubical box whose sides are defined by
the smallest/largest coordinate distance of the input ``sample1``.
Length units are comoving and assumed to be in Mpc/h, here and throughout Halotools.
num_threads : int, optional
Number of threads to use in calculation, where parallelization is performed
using the python ``multiprocessing`` module. Default is 1 for a purely serial
calculation, in which case a multiprocessing Pool object will
never be instantiated. A string 'max' may be used to indicate that
the pair counters should use all available cores on the machine.
approx_cell1_size : array_like, optional
Length-3 array serving as a guess for the optimal manner by how points
will be apportioned into subvolumes of the simulation box.
The optimum choice unavoidably depends on the specs of your machine.
Default choice is to use Lbox/10 in each dimension,
which will return reasonable result performance for most use-cases.
Performance can vary sensitively with this parameter, so it is highly
recommended that you experiment with this parameter when carrying out
performance-critical calculations.
approx_cellran_size : array_like, optional
Analogous to ``approx_cell1_size``, but for randoms. See comments for
``approx_cell1_size`` for details.
seed : int, optional
Random number seed used to randomly lay down spheres, if applicable.
Default is None, in which case results will be stochastic.
Returns
-------
vpf : numpy.array
*len(rbins)* length array containing the void probability function
:math:`P_0(r)` computed for each :math:`r` defined by input ``rbins``.
Notes
-----
This function requires the calculation of the number of pairs per randomly placed
sphere, and thus storage of an array of shape(n_ran,len(rbins)). This can be a
memory intensive process as this array becomes large.
Examples
--------
For demonstration purposes we create a randomly distributed set of points within a
periodic unit cube.
>>> Npts = 10000
>>> Lbox = 1.0
>>> period = np.array([Lbox,Lbox,Lbox])
>>> x = np.random.random(Npts)
>>> y = np.random.random(Npts)
>>> z = np.random.random(Npts)
We transform our *x, y, z* points into the array shape used by the pair-counter by
taking the transpose of the result of `numpy.vstack`. This boilerplate transformation
is used throughout the `~halotools.mock_observables` sub-package:
>>> coords = np.vstack((x,y,z)).T
>>> rbins = np.logspace(-2,-1,20)
>>> n_ran = 1000
>>> vpf = void_prob_func(coords, rbins, n_ran=n_ran, period=period)
See also
----------
:ref:`galaxy_catalog_analysis_tutorial8`
"""
(sample1, rbins, n_ran, random_sphere_centers,
period, num_threads, approx_cell1_size, approx_cellran_size) = (
_void_prob_func_process_args(sample1, rbins, n_ran, random_sphere_centers,
period, num_threads, approx_cell1_size, approx_cellran_size, seed))
result = npairs_per_object_3d(random_sphere_centers, sample1, rbins,
period=period, num_threads=num_threads,
approx_cell1_size=approx_cell1_size,
approx_cell2_size=approx_cellran_size)
num_empty_spheres = np.array(
[sum(result[:, i] == 0) for i in range(result.shape[1])])
return num_empty_spheres/n_ran
def _void_prob_func_process_args(sample1, rbins,
n_ran, random_sphere_centers, period, num_threads,
approx_cell1_size, approx_cellran_size, seed):
"""
"""
sample1 = np.atleast_1d(sample1)
rbins = np.atleast_1d(rbins)
try:
assert rbins.ndim == 1
assert len(rbins) > 1
assert np.min(rbins) > 0
if len(rbins) > 2:
assert array_is_monotonic(rbins, strict=True) == 1
except AssertionError:
msg = ("\n Input ``rbins`` must be a monotonically increasing \n"
"1-D array with at least two entries. All entries must be strictly positive.")
raise HalotoolsError(msg)
if period is None:
xmin, xmax = np.min(sample1), np.max(sample1)
ymin, ymax = np.min(sample1), np.max(sample1)
zmin, zmax = np.min(sample1), np.max(sample1)
else:
period = np.atleast_1d(period)
if len(period) == 1:
period = np.array([period, period, period])
elif len(period) == 3:
pass
else:
msg = ("\nInput ``period`` must either be a float or length-3 sequence")
raise HalotoolsError(msg)
xmin, xmax = 0., float(period[0])
ymin, ymax = 0., float(period[1])
zmin, zmax = 0., float(period[2])
if (n_ran is None):
if (random_sphere_centers is None):
msg = ("You must pass either ``n_ran`` or ``random_sphere_centers``")
raise HalotoolsError(msg)
else:
random_sphere_centers = np.atleast_1d(random_sphere_centers)
try:
assert random_sphere_centers.shape[1] == 3
except AssertionError:
msg = ("Your input ``random_sphere_centers`` must have shape (Nspheres, 3)")
raise HalotoolsError(msg)
n_ran = float(random_sphere_centers.shape[0])
else:
if random_sphere_centers is not None:
msg = ("If passing in ``random_sphere_centers``, do not also pass in ``n_ran``.")
raise HalotoolsError(msg)
else:
with NumpyRNGContext(seed):
xran = np.random.uniform(xmin, xmax, n_ran)
yran = np.random.uniform(ymin, ymax, n_ran)
zran = np.random.uniform(zmin, zmax, n_ran)
random_sphere_centers = np.vstack([xran, yran, zran]).T
return (sample1, rbins, n_ran, random_sphere_centers,
period, num_threads, approx_cell1_size, approx_cellran_size)
|
StarcoderdataPython
|
3548045
|
import numpy as np
import os
# data_dir = '/users/hzhang2/projects/Cavs/apps/lstm/sst'
data_dir = '/users/shizhenx/projects/Cavs/apps/lstm/data/sst'
# splits = ['train', 'test', 'dev']
splits = ['train']
class Vocab(object):
def __init__(self, path):
self.words = []
self.word2idx = {}
self.idx2word = {}
self.load(path)
def load(self, path):
with open(path, 'r') as f:
for line in f:
w = line.strip()
assert w not in self.words
self.words.append(w)
self.word2idx[w] = len(self.words) - 1
self.idx2word[self.word2idx[w]] = w
def encode(self, word):
return self.word2idx[word]
def decode(self, idx):
return self.idx2word[idx]
print('Build vocabulary...')
vocab_path = os.path.join(data_dir, 'vocab-cased.txt')
vocab = Vocab(vocab_path)
def transform(sentence):
sentence = [w for w in sentence.strip().split()]
indices = []
for w in sentence:
idx = vocab.encode(w)
assert(idx >= 0)
indices.append(idx)
assert(len(indices) == len(sentence))
return indices, len(sentence)
max_length = 0
max_graph_length = 0
print('Transform from sentences to indices...')
for split in splits:
word_path = os.path.join(data_dir, split, 'sents.txt')
label_path = os.path.join(data_dir, split, 'labels.txt')
graph_path = os.path.join(data_dir, split, 'parents.txt')
indices_path = os.path.join(data_dir, split, 'sents_idx.txt')
with open(word_path, 'r') as wordfile, open(indices_path, 'w') as indices_file:
while True:
sentence = wordfile.readline()
if not sentence:
break
indices, length = transform(sentence)
if length > max_length:
max_length = length
# write the indices to a new file
for i in range(len(indices)):
indices_file.write('%d' % (indices[i]))
if i < len(indices)-1 :
indices_file.write(' ')
else:
indices_file.write('\n')
with open(graph_path, 'r') as graphfile:
while True:
graph = graphfile.readline()
if not graph:
break
parents = [w for w in graph.strip().split()]
graph_len = len(parents)
if graph_len > max_graph_length:
max_graph_length = graph_len
print('Done...Max sentence length: %d' % max_length)
print('Done...Max graph length: %d' % max_graph_length)
|
StarcoderdataPython
|
6655668
|
<reponame>hidaruma/caty
# coding: utf-8
import os
from caty.jsontools import prettyprint, TaggedValue, TagOnly
import caty.jsontools.stdjson as stdjson
def initialize(conf):
# 初期化時にディレクトリの作成は行う
if not os.path.exists(conf['data_dir']):
os.mkdir(conf['data_dir'])
def connect(conf):
return FileStorageConnection(conf['data_dir'])
class FileStorageConnection(object):
def __init__(self, data_dir):
self.data_dir = data_dir
self._data_map = {'apps': {}, 'global': {}}
def _load(self, app_name, collection_name):
if app_name:
if not app_name in self._data_map['apps']:
self._data_map['apps'][app_name] = {}
if collection_name in self._data_map['apps'][app_name]:
return
path = self.data_dir + '/' + app_name + '/' + collection_name + '.json'
if os.path.exists(path):
self._data_map['apps'][app_name][collection_name] = stdjson.loads(open(path).read())
else:
self._load('', collection_name)
else:
path = self.data_dir + '/' + collection_name + '.json'
if collection_name in self._data_map['global']:
return
if os.path.exists(path):
c = open(path).read()
if c:
self._data_map['global'][collection_name] = stdjson.loads(c)
def create_collection(self, app_name, collection_name, schema_name):
self._load(app_name, collection_name)
if app_name:
if collection_name in self._data_map['apps'][app_name]:
return
self._data_map['apps'][app_name][collection_name] = {'appName': app_name, 'schema': schema_name, 'collectionName': collection_name, 'data': []}
else:
if collection_name in self._data_map['global']:
return
self._data_map['global'][collection_name] = {'appName': app_name, 'schema': schema_name, 'collectionName': collection_name, 'data': []}
self.commit()
def drop(self, app_name, collection_name):
self.get_collection(app_name, collection_name)['delete'] = True
def load_collection(self, app_name, collection_name):
self._load(app_name, collection_name)
return self.get_collection(app_name, collection_name)
def get_collection(self, app_name, collection_name):
if app_name:
if collection_name in self._data_map['apps'][app_name]:
return self._data_map['apps'][app_name][collection_name]
else:
return self.get_collection('', collection_name)
elif collection_name in self._data_map['global']:
return self._data_map['global'][collection_name]
else:
throw_caty_exception(u'CollectionNotFound', u'Collection does not found: collection name=$colname, defined at $denined, called by $callee', colname=collection_name, defined=app_name, callee=app_name)
def insert(self, app_name, collection_name, obj):
self.get_collection(app_name, collection_name)['data'].append(obj)
def commit(self):
for tbl_name, tbl_data in self._data_map['global'].items():
path = self.data_dir + '/' + tbl_name + '.json'
if tbl_data.get('delete') and os.path.exists(path):
os.unlink(path)
else:
open(path, 'wb').write(stdjson.dumps(tbl_data, indent=4))
for app_name, tbl_map in self._data_map['apps'].items():
for tbl_name, tbl_data in tbl_map.items():
path = self.data_dir + '/' + app_name + '/' + tbl_name + '.json'
if tbl_data.get('delete'):
if os.path.exists(path):
os.unlink(path)
else:
if not os.path.exists(self.data_dir + '/' + app_name + '/'):
os.mkdir(self.data_dir + '/' + app_name + '/')
open(path, 'wb').write(prettyprint(tbl_data))
def rollback(self):
self._data_map = {'apps': {}, 'global': {}}
self.commit()
class CollectionFactory(object):
def __init__(self, conn, finder, collection_name, app_name=u'', current_app=None):
self._conn = conn
self._finder = finder
self._collection_name = collection_name
self._current_app_name = current_app.name if current_app else u''
self._app_name = app_name if app_name else self._current_app_name
def create(self, schema_name, global_collection=False):
u"""コレクション名とスキーマ名の対応表に新たな値を作り、
新規のコレクションを作成する。
"""
app_name = self._current_app_name if not global_collection else u''
self._conn.create_collection(app_name, self._collection_name, schema_name)
class CollectionManipulator(object):
def __init__(self, conn, finder, collection_name, app_name=u'', current_app=None):
self._conn = conn
self._finder = finder
self._app = current_app
self._collection_name = collection_name
self._current_app_name = current_app.name if current_app else u''
self._app_name = app_name if app_name else self._current_app_name
self._schema = self._load_schema()
assert self._schema, (repr(self._schema), collection_name)
def _load_schema(self):
r = self._conn.load_collection(self._app_name, self._collection_name)
try:
sn = r['schema']
ap = r['appName']
if not ap:
return self._finder.get_type(sn)
else:
return self._finder.get_type(ap + '::' + sn)
except:
import traceback
traceback.print_exc()
return None
@property
def schema(self):
return self._schema
def drop(self):
self._conn.drop(self._app_name, self._collection_name)
def insert(self, obj):
self._conn.insert(self._app_name, self._collection_name, obj)
def select(self, obj, limit=-1, offset=0, reverse=False):
data = self._conn.get_collection(self._app_name, self._collection_name)['data']
r = []
for d in data:
if self._match(d, obj):
if offset:
offset-=1
elif limit != -1 and len(r) >= limit:
break
else:
r.append(d)
if reverse:
r.reverse()
return iter(r)
def _match(self, obj, queries):
if not queries:
queries = {}
if not isinstance(queries, dict):
return self._process_query(obj, queries)
for k, q in queries.items():
if k not in obj:
return False
v = obj[k]
if not self._process_query(v, q):
return False
return True
def _process_query(self, val, query):
if isinstance(query, (TagOnly, TaggedValue)):
if query.tag == '_NOT_NULL':
return val is not None
elif query.tag == '_ANY':
return True
elif query.tag == '_LT':
return val < query.value
elif query.tag == '_LE':
return val <= query.value
elif query.tag == '_GT':
return val > query.value
elif query.tag == '_GE':
return val >= query.value
elif query.tag == '_CONTAINS':
return any(map(lambda v: self._process_query(v, query.value), val))
elif query.tag == '_EACH':
return all(map(lambda v: self._process_query(v, query.value), val))
elif query.tag == '_OR':
return any(map(lambda v: self._process_query(val, v), query.value))
elif query.tag == '_AND':
return all(map(lambda v: self._process_query(val, v), query.value))
elif query.tag == '_LIKE':
import re
ptn = re.compile(query.value
.replace('.', '\\.')
.replace('*', '.*')
.replace('%', '.*')
.replace('?', '.')
.replace('_', '.')
.replace('#', '[0-9]')
.replace('[!', '[^'))
return ptn.match(val)
elif query.tag == '_BETWEEN':
return query.value[0] <= val < query.value[1]
else:
return val == query
def select1(self, obj):
v = list(self.select(obj))
assert len(v) == 1
return v[0]
def delete(self, obj):
data = self._conn.get_collection(self._app_name, self._collection_name)['data']
for s in self.select(obj):
data.remove(s)
def update(self, oldobj, newobj):
data = self._conn.get_collection(self._app_name, self._collection_name)['data']
for s in self.select(oldobj):
pos = data.index(s)
data[pos].update(newobj)
def dump(self):
return self._conn.get_collection(self._app_name, self._collection_name)['data']
def restore(self, objects):
col = self._conn.get_collection(self._app_name, self._collection_name)
col.update(objects)
@property
def schema(self):
return self._schema
def collections(conn):
for r, d, f in os.walk(conn.data_dir):
for e in f:
if e.endswith('.json'):
o = stdjson.loads(open(os.path.join(r, e)).read())
yield {
'collectionName': o['collectionName'],
'schema': o['schema'],
'appName': o['appName']
}
|
StarcoderdataPython
|
8183462
|
<filename>app/utils/db/database.py
from typing import Optional
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm.session import Session
from sqlmodel import SQLModel
from env_config import settings
from sqlalchemy.engine import create_engine, Engine
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.pool import QueuePool
class SQLConnector:
engine: Optional[Engine] = None
@classmethod
def get_engine(cls) -> Engine:
engine = create_async_engine(
settings.DB_URL,
pool_size=settings.DB_POOL_SIZE,
poolclass=QueuePool,
echo=True,
future=True
)
return engine
@classmethod
async def init_db(cls):
async with cls.get_engine() as conn:
await conn.run_sync(SQLModel.metadata.create_all)
@classmethod
async def get_session(cls) -> AsyncSession:
async_session = sessionmaker(
cls.get_engine(), class_=AsyncSession, expire_on_commit=False
)
async with async_session() as session:
yield session
Base = declarative_base()
|
StarcoderdataPython
|
3537241
|
import json
import os
import shutil
from enum import Enum
from typing import List
COLOR_ESC = '\033['
COLOR_RESET = f'{COLOR_ESC}0m'
COLOR_GREEN = f'{COLOR_ESC}32m'
COLOR_RED = f'{COLOR_ESC}31m'
COLOR_CYAN = f'{COLOR_ESC}36m'
COLOR_GRAY = f'{COLOR_ESC}30;1m'
class Board(Enum):
SLIMEVR = "BOARD_SLIMEVR"
WROOM32 = "BOARD_WROOM32"
class DeviceConfiguration:
def __init__(self, platform: str, board: Board, platformio_board: str) -> None:
self.platform = platform
self.board = board
self.platformio_board = platformio_board
def get_platformio_section(self) -> str:
return f"""
[env:{self.platformio_board}]
platform = {self.platform}
board = {self.platformio_board}
"""
def filename(self) -> str:
return f"{self.platformio_board}.bin"
def build_header(self) -> str:
sda = ""
scl = ""
imu_int = ""
imu_int2 = ""
battery_level = ""
leds = True
if self.board == Board.SLIMEVR:
sda = "4"
scl = "5"
imu_int = "10"
imu_int2 = "13"
battery_level = "17"
elif self.board == Board.WROOM32:
sda = "21"
scl = "22"
imu_int = "23"
imu_int2 = "25"
battery_level = "36"
else:
raise Exception(f"Unknown board: {self.board.value}")
return f"""
#define IMU IMU_BNO085
#define SECOND_IMU IMU
#define BOARD {self.board.value}
#define BATTERY_MONITOR BAT_EXTERNAL
#define PIN_IMU_SDA {sda}
#define PIN_IMU_SCL {scl}
#define PIN_IMU_INT {imu_int}
#define PIN_IMU_INT_2 {imu_int2}
#define PIN_BATTERY_LEVEL {battery_level}
#define ENABLE_LEDS {leds.__str__().lower()}
#define BATTERY_SHIELD_RESISTANCE 180
#define IMU_ROTATION DEG_90
#define SECOND_IMU_ROTATION DEG_90
"""
def __str__(self) -> str:
return f"{self.platform}@{self.board.<EMAIL>}"
def get_matrix() -> List[DeviceConfiguration]:
matrix: List[DeviceConfiguration] = []
configFile = open("./ci/devices.json", "r")
config = json.load(configFile)
for deviceConfig in config:
matrix.append(DeviceConfiguration(
deviceConfig["platform"], Board[deviceConfig["board"]], deviceConfig["platformio_board"]))
return matrix
def prepare() -> None:
print(f"🡢 {COLOR_CYAN}Preparation{COLOR_RESET}")
print(f" 🡢 {COLOR_GRAY}Backing up src/defines.h{COLOR_RESET}")
shutil.copy("src/defines.h", "src/defines.h.bak")
print(f" 🡢 {COLOR_GRAY}Backing up platformio.ini{COLOR_RESET}")
shutil.copy("./platformio.ini", "platformio.ini.bak")
print(f" 🡢 {COLOR_GRAY}Copying over build/platformio.ini{COLOR_RESET}")
shutil.copy("./ci/platformio.ini", "platformio.ini")
if os.path.exists("./build"):
print(f" 🡢 {COLOR_GRAY}Removing existing build folder...{COLOR_RESET}")
shutil.rmtree("./build")
print(f" 🡢 {COLOR_GRAY}Creating build folder...{COLOR_RESET}")
os.mkdir("./build")
print(f" 🡢 {COLOR_GREEN}Success!{COLOR_RESET}")
def cleanup() -> None:
print(f"🡢 {COLOR_CYAN}Cleanup{COLOR_RESET}")
print(f" 🡢 {COLOR_GRAY}Restoring src/defines.h...{COLOR_RESET}")
shutil.copy("src/defines.h.bak", "src/defines.h")
print(f" 🡢 {COLOR_GRAY}Removing src/defines.h.bak...{COLOR_RESET}")
os.remove("src/defines.h.bak")
print(f" 🡢 {COLOR_GRAY}Restoring platformio.ini...{COLOR_RESET}")
shutil.copy("platformio.ini.bak", "platformio.ini")
print(f" 🡢 {COLOR_GRAY}Removing platformio.ini.bak...{COLOR_RESET}")
os.remove("platformio.ini.bak")
print(f" 🡢 {COLOR_GREEN}Success!{COLOR_RESET}")
def build() -> int:
print(f"🡢 {COLOR_CYAN}Build{COLOR_RESET}")
failed_builds: List[str] = []
code = 0
matrix = get_matrix()
with open("./platformio.ini", "a") as f1:
for device in matrix:
f1.write(device.get_platformio_section())
for device in matrix:
print(f" 🡢 {COLOR_CYAN}Building for {device.platform}{COLOR_RESET}")
status = build_for_device(device)
if status == False:
failed_builds.append(device.platformio_board)
if len(failed_builds) > 0:
print(f" 🡢 {COLOR_RED}Failed!{COLOR_RESET}")
for failed_build in failed_builds:
print(f" 🡢 {COLOR_RED}{failed_build}{COLOR_RESET}")
code = 1
else:
print(f" 🡢 {COLOR_GREEN}Success!{COLOR_RESET}")
return code
def build_for_device(device: DeviceConfiguration) -> bool:
success = True
print(f"::group::Build {device}")
with open("src/defines.h", "wt") as f:
f.write(device.build_header())
code = os.system(
f"platformio run -e {device.platformio_board}")
if code == 0:
shutil.copy(f".pio/build/{device.platformio_board}/firmware.bin",
f"build/{device.filename()}")
print(f" 🡢 {COLOR_GREEN}Success!{COLOR_RESET}")
else:
success = False
print(f" 🡢 {COLOR_RED}Failed!{COLOR_RESET}")
print(f"::endgroup::")
return success
def main() -> None:
prepare()
code = build()
cleanup()
os._exit(code)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
6444578
|
# Size Settings
CHUNK_SIZE = 16
TILE_SIZE = 16
# Networking
HEADER_SIZE = 32
# Game Settings, should be configurable
CAMERA_SPEED = 4
LOAD_DISTANCE = 2
VIEWPORT_SIZE = (
round(CHUNK_SIZE*TILE_SIZE*1.5),
round(CHUNK_SIZE*TILE_SIZE)
)
# Layers
WORLD_LAYERS = [
"ground",
"player"
]
UI_LAYERS = [
]
# Tile Colours, will be moved into the mods system
# TODO: Add actual tile resources.
TILES = {
0: "#07259e", # Deep Water
1: "#2eb1cc", # Shallow Water
2: "#f4f484", # Sand
3: "#85e24f", # Grass
4: "#075405", # Forest
5: "#515151", # Mountain/Rock
6: "#ed5f0a", # Magma
}
|
StarcoderdataPython
|
11391443
|
import numpy as np
from typing import Dict, NamedTuple
from mlagents.torch_utils import torch, default_device
from mlagents.trainers.buffer import AgentBuffer
from mlagents.trainers.torch.components.reward_providers.base_reward_provider import (
BaseRewardProvider,
)
from mlagents.trainers.settings import CuriositySettings
from mlagents_envs.base_env import BehaviorSpec
from mlagents.trainers.torch.agent_action import AgentAction
from mlagents.trainers.torch.action_flattener import ActionFlattener
from mlagents.trainers.torch.utils import ModelUtils
from mlagents.trainers.torch.networks import NetworkBody
from mlagents.trainers.torch.layers import LinearEncoder, linear_layer
from mlagents.trainers.settings import NetworkSettings, EncoderType
from mlagents.trainers.trajectory import ObsUtil
class ActionPredictionTuple(NamedTuple):
continuous: torch.Tensor
discrete: torch.Tensor
class CuriosityRewardProvider(BaseRewardProvider):
beta = 0.2 # Forward vs Inverse loss weight
loss_multiplier = 10.0 # Loss multiplier
def __init__(self, specs: BehaviorSpec, settings: CuriositySettings) -> None:
super().__init__(specs, settings)
self._ignore_done = True
self._network = CuriosityNetwork(specs, settings)
self._network.to(default_device())
self.optimizer = torch.optim.Adam(
self._network.parameters(), lr=settings.learning_rate
)
self._has_updated_once = False
def evaluate(self, mini_batch: AgentBuffer) -> np.ndarray:
with torch.no_grad():
rewards = ModelUtils.to_numpy(self._network.compute_reward(mini_batch))
rewards = np.minimum(rewards, 1.0 / self.strength)
return rewards * self._has_updated_once
def update(self, mini_batch: AgentBuffer) -> Dict[str, np.ndarray]:
self._has_updated_once = True
forward_loss = self._network.compute_forward_loss(mini_batch)
inverse_loss = self._network.compute_inverse_loss(mini_batch)
loss = self.loss_multiplier * (
self.beta * forward_loss + (1.0 - self.beta) * inverse_loss
)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return {
"Losses/Curiosity Forward Loss": forward_loss.item(),
"Losses/Curiosity Inverse Loss": inverse_loss.item(),
}
def get_modules(self):
return {f"Module:{self.name}": self._network}
class CuriosityNetwork(torch.nn.Module):
EPSILON = 1e-10
def __init__(self, specs: BehaviorSpec, settings: CuriositySettings) -> None:
super().__init__()
self._action_spec = specs.action_spec
state_encoder_settings = NetworkSettings(
normalize=False,
hidden_units=settings.encoding_size,
num_layers=2,
vis_encode_type=EncoderType.SIMPLE,
memory=None,
)
self._state_encoder = NetworkBody(
specs.observation_shapes, state_encoder_settings
)
self._action_flattener = ActionFlattener(self._action_spec)
self.inverse_model_action_encoding = torch.nn.Sequential(
LinearEncoder(2 * settings.encoding_size, 1, 256)
)
if self._action_spec.continuous_size > 0:
self.continuous_action_prediction = linear_layer(
256, self._action_spec.continuous_size
)
if self._action_spec.discrete_size > 0:
self.discrete_action_prediction = linear_layer(
256, sum(self._action_spec.discrete_branches)
)
self.forward_model_next_state_prediction = torch.nn.Sequential(
LinearEncoder(
settings.encoding_size + self._action_flattener.flattened_size, 1, 256
),
linear_layer(256, settings.encoding_size),
)
def get_current_state(self, mini_batch: AgentBuffer) -> torch.Tensor:
"""
Extracts the current state embedding from a mini_batch.
"""
n_obs = len(self._state_encoder.processors)
np_obs = ObsUtil.from_buffer(mini_batch, n_obs)
# Convert to tensors
tensor_obs = [ModelUtils.list_to_tensor(obs) for obs in np_obs]
hidden, _ = self._state_encoder.forward(tensor_obs)
return hidden
def get_next_state(self, mini_batch: AgentBuffer) -> torch.Tensor:
"""
Extracts the next state embedding from a mini_batch.
"""
n_obs = len(self._state_encoder.processors)
np_obs = ObsUtil.from_buffer_next(mini_batch, n_obs)
# Convert to tensors
tensor_obs = [ModelUtils.list_to_tensor(obs) for obs in np_obs]
hidden, _ = self._state_encoder.forward(tensor_obs)
return hidden
def predict_action(self, mini_batch: AgentBuffer) -> ActionPredictionTuple:
"""
In the continuous case, returns the predicted action.
In the discrete case, returns the logits.
"""
inverse_model_input = torch.cat(
(self.get_current_state(mini_batch), self.get_next_state(mini_batch)), dim=1
)
continuous_pred = None
discrete_pred = None
hidden = self.inverse_model_action_encoding(inverse_model_input)
if self._action_spec.continuous_size > 0:
continuous_pred = self.continuous_action_prediction(hidden)
if self._action_spec.discrete_size > 0:
raw_discrete_pred = self.discrete_action_prediction(hidden)
branches = ModelUtils.break_into_branches(
raw_discrete_pred, self._action_spec.discrete_branches
)
branches = [torch.softmax(b, dim=1) for b in branches]
discrete_pred = torch.cat(branches, dim=1)
return ActionPredictionTuple(continuous_pred, discrete_pred)
def predict_next_state(self, mini_batch: AgentBuffer) -> torch.Tensor:
"""
Uses the current state embedding and the action of the mini_batch to predict
the next state embedding.
"""
actions = AgentAction.from_dict(mini_batch)
flattened_action = self._action_flattener.forward(actions)
forward_model_input = torch.cat(
(self.get_current_state(mini_batch), flattened_action), dim=1
)
return self.forward_model_next_state_prediction(forward_model_input)
def compute_inverse_loss(self, mini_batch: AgentBuffer) -> torch.Tensor:
"""
Computes the inverse loss for a mini_batch. Corresponds to the error on the
action prediction (given the current and next state).
"""
predicted_action = self.predict_action(mini_batch)
actions = AgentAction.from_dict(mini_batch)
_inverse_loss = 0
if self._action_spec.continuous_size > 0:
sq_difference = (
actions.continuous_tensor - predicted_action.continuous
) ** 2
sq_difference = torch.sum(sq_difference, dim=1)
_inverse_loss += torch.mean(
ModelUtils.dynamic_partition(
sq_difference,
ModelUtils.list_to_tensor(mini_batch["masks"], dtype=torch.float),
2,
)[1]
)
if self._action_spec.discrete_size > 0:
true_action = torch.cat(
ModelUtils.actions_to_onehot(
actions.discrete_tensor, self._action_spec.discrete_branches
),
dim=1,
)
cross_entropy = torch.sum(
-torch.log(predicted_action.discrete + self.EPSILON) * true_action,
dim=1,
)
_inverse_loss += torch.mean(
ModelUtils.dynamic_partition(
cross_entropy,
ModelUtils.list_to_tensor(
mini_batch["masks"], dtype=torch.float
), # use masks not action_masks
2,
)[1]
)
return _inverse_loss
def compute_reward(self, mini_batch: AgentBuffer) -> torch.Tensor:
"""
Calculates the curiosity reward for the mini_batch. Corresponds to the error
between the predicted and actual next state.
"""
predicted_next_state = self.predict_next_state(mini_batch)
target = self.get_next_state(mini_batch)
sq_difference = 0.5 * (target - predicted_next_state) ** 2
sq_difference = torch.sum(sq_difference, dim=1)
return sq_difference
def compute_forward_loss(self, mini_batch: AgentBuffer) -> torch.Tensor:
"""
Computes the loss for the next state prediction
"""
return torch.mean(
ModelUtils.dynamic_partition(
self.compute_reward(mini_batch),
ModelUtils.list_to_tensor(mini_batch["masks"], dtype=torch.float),
2,
)[1]
)
|
StarcoderdataPython
|
11383444
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import src.functions as fns
import src.sparse_grids as spg
from src.xtDG_projection import ProjectionXtDG
import mesh_generator.io_mesh as meshio
# projection of exact solution, full-grid 2d
# noinspection PyUnusedLocal
def projectionFG_2d(cfg, dir_mesh, test_case, integrator, lx, ne_tMesh):
xMesh_file = dir_mesh + 'mesh_l%d.h5' % lx
print('\n Read mesh file: ', xMesh_file)
print(' Uniform time series, number of intervals: ', ne_tMesh)
xMesh = meshio.read_mesh_h5(xMesh_file)
tMesh = fns.get_uniform_time_series(test_case.T, ne_tMesh)
xtDGproj = ProjectionXtDG()
xtDGproj.set(cfg, test_case, xMesh)
u, ndof = xtDGproj.eval(tMesh)
return tMesh, xMesh, u, ndof
# time integrator + spatial_discretisation solver, full-grid 2d
def schemeFG_2d(cfg, dir_mesh, test_case, integrator, lx, ne_tMesh):
xMesh_file = dir_mesh + 'mesh_l%d.h5' % lx
print('\n Read mesh file: ', xMesh_file)
print(' Uniform time series, number of intervals: ', ne_tMesh)
xMesh = meshio.read_mesh_h5(xMesh_file)
tMesh = fns.get_uniform_time_series(test_case.T, ne_tMesh)
cfg.update({'mesh directory': dir_mesh, 'mesh level': lx})
integrator.set(cfg, test_case, xMesh)
u, ndof = integrator.run(tMesh)
return tMesh, xMesh, u, ndof
# time integrator + xdG solver, sparse-grid 2d
def SG_2d(cfg, dir_mesh, test_case, solverFG, integrator, L0x, Lx, L0t, Lt):
sparse_grids = spg.SparseGrids()
sparse_grids.set_init(cfg, dir_mesh, test_case, L0x, Lx, L0t, Lt)
print('\n Sparse space-time levels:\n', sparse_grids.levels)
nSG = sparse_grids.levels.shape[0]
u_solutions = []
tMeshes = []
xMeshes = []
ndof = np.zeros(nSG, dtype=np.int32)
for k in range(0, nSG):
lx = sparse_grids.levels[k, 0] + 1
lt = sparse_grids.levels[k, 1] + 1
ne_tMesh = 2 ** lt
tMesh, xMesh, u, ndof[k] = solverFG(cfg, dir_mesh, test_case, integrator, lx, ne_tMesh)
u_solutions.append(u)
if k <= Lx - L0x:
xMeshes.append(xMesh)
tMeshes.append(tMesh)
return u_solutions, tMeshes, xMeshes, sparse_grids, np.sum(ndof)
# End of file
|
StarcoderdataPython
|
1772613
|
import unittest
from pokeman import BasicConfig
class BasicConfigTests(unittest.TestCase):
def setUp(self):
self.basic_config = BasicConfig(
connection_attempts=5,
heartbeat=7200,
retry_delay=2
)
def test_base_name(self):
self.assertEqual(self.basic_config.__class__.__name__, 'BasicConfig')
def test_base_initialization(self):
self.assertEqual(self.basic_config.CONNECTION_ATTEMPTS, 5)
self.assertEqual(self.basic_config.HEARTBEAT, 7200)
self.assertEqual(self.basic_config.RETRY_DELAY, 2)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
11314489
|
<gh_stars>1-10
import sys
from datetime import datetime, timedelta
import logging
import argparse
import boto3
from elasticsearch import helpers
from elasticsearch import Elasticsearch
from elasticsearch.client.utils import _make_path
from elasticsearch.exceptions import NotFoundError
from iris.service.content.file.document import StorageType, File
logger = logging.getLogger('cleanup_s3')
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
logger.addHandler(ch)
class S3Cleaner(object):
"""Cleanup utility for unused files.
This class iterates over all locally known S3 files with a given minimum
age and checks if each file is used as a file relation. If a file is not
referenced anywhere it will be deleted on S3. If it has been deleted
successfully on S3 it will be deleted in Crate as well.
"""
def __init__(self,
hosts,
min_age,
bucket,
aws_access_key_id,
aws_secret_access_key,
region_name,
dry_run=False):
self.min_age = min_age
self.bucket_name = bucket
self.dry_run = dry_run
self.init_boto(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region_name
)
self.init_es(hosts)
def init_boto(self, **kwargs):
self.s3 = boto3.resource('s3', **kwargs)
def init_es(self, hosts):
self.es = Elasticsearch(hosts=hosts)
File.ES = self.es
def _in_use(self, file_id):
"""Determine if given file_id is referenced anywhere.
NOTE: Usage is checked on all indices but only on fields called
'relations.images.id'. How to determine new relations to Files?
"""
try:
exists_query = {
"query": {"term": {"relations.images.id": file_id}}
}
path = _make_path('_all', 'default', '_search', 'exists')
self.es.transport.perform_request(
'GET',
path,
params=None,
body=exists_query
)
return True
except NotFoundError:
return False
except:
return True
def _delete(self, file_id):
"""Delete given file.
Args:
file_id: the id (or hash) of the file to delete
If not dry_run was set delete the file on S3. If successful delete the
file in elasticsearch. If one file couldn't have been deleted on S3
an exception will be raised.
"""
logger.debug("Deleting file %s", file_id)
if not self.dry_run:
obj = self.s3.Object(self.bucket_name, file_id)
obj.delete()
# this call will raise an exception if the file couldn't have been
# deleted on S3
obj.wait_until_not_exists()
f = File.get(file_id)
if f:
f.delete()
def _s3_files(self):
"""Get all locally known S3 files older than MIN_AGE
Return a generator over all S3 files
"""
min_age = datetime.now() - timedelta(days=self.min_age)
s3_files_query = {
"fields": [],
"query": {
"bool": {
"must": [
{"term": {"storage_type": StorageType.S3}},
{"range": {"dc.created": {"lte": min_age}}}
]
}
}
}
return helpers.scan(
self.es,
query=s3_files_query,
index=File.INDEX
)
def clean(self):
"""Remove all stale files on S3 and in Crate.
"""
deleted, kept = 0, 0
for f in self._s3_files():
f_id = f['_id']
if not self._in_use(f_id):
deleted += 1
self._delete(f_id)
else:
kept += 1
logger.debug("Keep file %s", f_id)
print "Deleted:\t", deleted, "\tKept:\t", kept, "\r",
sys.stdout.flush()
logger.info("Deleted:\t%i\tKept:\t%i", deleted, kept)
def main():
parser = argparse.ArgumentParser(description='Cleanup unused files.')
parser.add_argument(
'--es-hosts',
required=True,
dest='hosts',
help='Comma seperated list of es hosts'
)
parser.add_argument(
'--aws-key',
dest='aws_access_key_id',
help='AWS access key'
)
parser.add_argument(
'--aws-secret',
dest='aws_secret_access_key',
help='AWS secret key'
)
parser.add_argument(
'--s3-bucket',
dest='bucket',
help='The S3 bucket containing files'
)
parser.add_argument(
'--aws-region',
dest='region_name',
default='eu-central-1',
help='AWS region (optional)'
)
parser.add_argument(
'--min-age',
dest='min_age',
default=1,
type=int,
help='Minimum age of files to delete. Value indicates days'
)
parser.add_argument(
'--dry-run',
dest='dry_run',
action='store_true',
default=False,
help='Do a dry run'
)
args = parser.parse_args()
if args.dry_run:
logger.setLevel(logging.DEBUG)
args.hosts = args.hosts.split(',')
cleaner = S3Cleaner(**dict(args._get_kwargs()))
cleaner.clean()
|
StarcoderdataPython
|
9640294
|
<filename>python/Math/polar coordintes.py
import cmath
num=input()
comp_num=complex(num)
result=cmath.phase(comp_num)
print((comp_num.real**2+comp_num.imag**2)**0.5)
print(result)
|
StarcoderdataPython
|
4921797
|
import os
from typing import Callable
import vlc
vlc_instance = None
player = None
playback_end_callback = lambda: NotImplemented
current_mrl = None
def init(callback: Callable[[], None]) -> None:
global vlc_instance
global player
global playback_end_callback
vlc_instance = vlc.Instance("--no-xlib")
player = vlc_instance.media_player_new()
playback_end_callback = callback
player.event_manager().event_attach(
vlc.EventType.MediaPlayerEndReached,
callback
)
def set_mrl(mrl: str) -> None:
global current_mrl
current_mrl = mrl
player.set_mrl(mrl)
def play() -> None:
player.play()
if not player.will_play():
player.stop()
playback_end_callback()
def stop() -> None:
player.stop()
def pause() -> None:
player.pause()
def available() -> bool:
return os.path.exists(current_mrl)
|
StarcoderdataPython
|
4818508
|
<filename>exploringShipLogbooks/classification.py
import collections
import exploringShipLogbooks
import rpy2
import warnings
import numpy as np
import pandas as pd
import os.path as op
from .basic_utils import extract_logbook_data
from .basic_utils import isolate_columns
from .basic_utils import isolate_training_data
from .basic_utils import clean_data
from .basic_utils import encode_data_df
from .config import *
from .fuzz_replacement import fuzzy_wuzzy_classification
from .wordcount import count_key_words
from rpy2.robjects import pandas2ri
from sklearn import preprocessing
from sklearn.naive_bayes import MultinomialNB
from sklearn import tree
pandas2ri.activate()
class LogbookClassifier:
"""
Handles loading, cleaning, and classification of ship logbook data.
"""
def __init__(self, classification_algorithm='Decision Tree'):
# initialize classifier based on desired algorithm
if classification_algorithm == "Decision Tree":
self.classifier = tree.DecisionTreeClassifier()
elif classification_algorithm == "Naive Bayes":
self.classifier = MultinomialNB(alpha=1.0, class_prior=None,
fit_prior=True)
else:
raise KeyError("Please enter a valid classification type",
" (Decision Trees or Naive Bayes)")
self.classification_algorithm = classification_algorithm
def load_data(self, data_sets=['slave_voyages', 'cliwoc'], data_type='sav'):
"""
Load data. All data is stored in the sub-directory "data".
slave_voyage_logs is data from www.slavevoyages.org.
- stored in pickle format, because website .csv file is corrupted
and the .sav file format requires r package to read.
cliwoc_data is data from kaggle website, collected for a NOAA project.
- extracted from zip file
"""
if 'slave_voyages' in data_sets:
if data_type == 'pickles':
data_path = op.join(exploringShipLogbooks.__path__[0], 'data')
file_name = data_path + '/tastdb-exp-2010'
self.slave_voyage_logs = pd.read_pickle(file_name)
elif data_type == 'sav':
data_path = op.join(exploringShipLogbooks.__path__[0], 'data')
filename = data_path + '/tastdb-exp-2010.sav'
self.slave_voyage_logs = rpy2.robjects.r('foreign::read.spss("%s",to.data.frame=TRUE)'% filename)
if 'cliwoc' in data_sets:
self.cliwoc_data = extract_logbook_data('CLIWOC15.csv')
if 'slave_voyages' not in data_sets and 'cliwoc' not in data_sets:
warning('Warning: no data loaded. Currently data extraction is',
' only implemented for cliwoc15 data (cliwoc) and slave',
'voyages logs (slave_voyages).')
def find_logs_that_mention_slaves(self):
"""
Use word count function to find all logs in cliwoc_data that explicitly
mention slaves in logbook text. This will later be used as a validation
data set for classification.
"""
self.slave_mask = count_key_words(self.cliwoc_data, text_columns, slave_words)
print('Found ', len(self.slave_mask[self.slave_mask]),
' logs that mention slaves')
def find_training_data(self, criteria={'ShipName': non_slave_ships}):
"""
Isolate training data from cliwoc_data. This training data will be used
as negative (non-slave-trade) training data. Default is to isolate by
ship name for ships that have been proven to be non-slave ships by
historical research.
Criteria is given as a dictionary with key as column name,
and a list of desired values.
"""
self.training_mask = isolate_training_data(self.cliwoc_data, criteria)
def encode_ship_IDs(self):
"""
Convert ship ID for each voyage in CLIWOC data to numerical values.
"""
label_encoding = preprocessing.LabelEncoder().fit(self.cliwoc_data['LogbookIdent']).classes_
self.cliwoc_data['LogbookIdent'] = preprocessing.LabelEncoder().fit_transform(self.cliwoc_data['LogbookIdent'])
def clean_and_sort_data(self):
"""
Cleans data sets before joining slave_voyage_logs and cliwoc_data.
Performs the following operations:
- adds "slave_logs" column which contains a numerical ID indicating
- converts cliwoc_data from all logs to one entry per voyage (voyages
determined by LogbookIdent). If any logs in a voyage mention slaves,
the voyage is considered a slave ship (ID 2, see below)
- the data classification. IDs are as follows:
- 0 = unclassified data
- 1 = negative training data (from cliwoc_data)
- 2 = positive training/validadtion data (from cliwoc_data)
- 3 = slave_voyages_data (positive training data)
- Drops undesired columns from all data
- Changes column names in slave_voyage_logs to match cliwoc data
- Re-indexes slave_voyage_logs to start indexes after end of cliwoc_data.
This will prevent duplicate indices after joining data sets.
"""
# set slave logs column to 0 for cliwoc data
self.cliwoc_data['slave_logs'] = np.zeros(len(self.cliwoc_data))
# searches all values in a voyage to determine
# if it contains slave mentions
slave_log_locations = self.cliwoc_data['LogbookIdent'].isin(list(self.cliwoc_data['LogbookIdent']
[self.slave_mask].unique()))
# set value of slave log columns for training and validation data
self.cliwoc_data.loc[self.training_mask, 'slave_logs'] = 1
self.cliwoc_data.loc[slave_log_locations, 'slave_logs'] = 2
# sort by logbookIdent and set as index
self.cliwoc_data = self.cliwoc_data.sort_values('LogbookIdent', ascending=True)
self.cliwoc_data_all = self.cliwoc_data.set_index('LogbookIdent', drop=False).copy()
self.cliwoc_data = self.cliwoc_data.set_index('LogbookIdent', drop=False)
self.cliwoc_data = self.cliwoc_data.drop_duplicates('LogbookIdent')
# isolate desired columns from cliwoc data
self.cliwoc_data = isolate_columns(self.cliwoc_data, desired_columns)
# drop slave_voyage_logs with empty year column
year_ind = ~(self.slave_voyage_logs['yeardep'].isnull())
self.slave_voyage_logs = self.slave_voyage_logs[year_ind]
# drop cliwoc data befor 1750 (only one instance)
self.cliwoc_data = self.cliwoc_data[self.cliwoc_data['Year'] > 1750]
# drop slave_voyage data from before beginning of cliwoc data
ind = (self.slave_voyage_logs['yeardep'] > self.cliwoc_data['Year'].min()) \
& (self.slave_voyage_logs['yeardep'] < self.cliwoc_data['Year'].max())
self.slave_voyage_logs = self.slave_voyage_logs[ind]
# clean slave_voyage logs to have columns that match cliwoc
slave_voyage_desired_cols = list(slave_voyage_conversions.keys())
self.slave_voyage_logs = isolate_columns(self.slave_voyage_logs, slave_voyage_desired_cols)
self.slave_voyage_logs.rename(columns=slave_voyage_conversions, inplace=True)
self.slave_voyage_logs['slave_logs'] = 3
self.slave_voyage_indices = (range(len(self.slave_voyage_logs)) + (self.cliwoc_data.tail(1).index[0] + 1))
self.slave_voyage_logs = self.slave_voyage_logs.set_index(self.slave_voyage_indices)
def join_data(self):
"""
Join cliwoc and slave_voyage_logs data sets and clean data by converting
all strings to lower case.
This operation should be performed after cleaning the data.
"""
self.all_data = pd.concat([self.cliwoc_data, self.slave_voyage_logs],
ignore_index=True)
self.all_data = clean_data(self.all_data)
del self.cliwoc_data, self.slave_voyage_logs
def match_similar_words(self):
"""
Uses fuzzy string comparison to match similar values in the data.
This operation is optional, but can help to match cognates in different
languages and eliminate typos in data transcription.
For example, frigate (English) and fregate (Spanish) would be converted
to the same value before classification.
"""
fuzz_columns = ['Nationality', 'ShipType', 'VoyageFrom', 'VoyageTo']
for col in fuzz_columns:
self.all_data = fuzzy_wuzzy_classification(self.all_data, col)
def encode_data(self):
"""
Encode categorical values before classification.
For Decision Trees classification, label encoding is used and all unique
string values in the data are converted to unique numerical values.
For Naive Bayes Classification, label encoding is performed followed by
one-hot-encoding, which creates a column of boolean values for each unique
category in the data set.
See ski-kit-learn prepcessing documention for further description of
encoding algorithms.
"""
# encode data
self.all_data = encode_data_df(self.all_data, self.classification_algorithm)
# drop NaNs from one hot encoded data
if self.classification_algorithm == 'Naive Bayes':
self.all_data['no_data'] = self.all_data['nan'].apply(lambda x: x.any(), axis=1).astype(int)
self.all_data = self.all_data.drop('nan', axis=1)
def extract_data_sets(self, multiplier=True):
"""
After encoding and cleaning data, extract training and validation data sets.
"""
# extract logs to classify later
self.unclassified_logs = self.all_data[self.all_data['slave_logs'] == 0]
# extract first validation data set
self.validation_set_1 = self.all_data[self.all_data['slave_logs'] == 2]
# reserve first 20% of slave_voyage_logs as validation set 2
validation_set_2_indices = range(self.slave_voyage_indices.min(),
self.slave_voyage_indices.min() + round(len(self.slave_voyage_indices) * .2))
self.validation_set_2 = self.all_data.iloc[validation_set_2_indices]
# extract training data for positive and negative
training_logs_pos = self.all_data.drop(validation_set_2_indices)
training_logs_pos = training_logs_pos[training_logs_pos['slave_logs'] == 3]
training_logs_neg = self.all_data[self.all_data['slave_logs'] == 1]
# calculate multiplier to make data sets equal size
if multiplier:
repeat_multiplier = round(len(training_logs_pos) / len(training_logs_neg))
else:
# set multiplier to one if no multipler is desired
repeat_multiplier = 1
# create list of classes for training data
# (0 is for non-slave, 1 is for slave)
# index matches training_data
training_classes = np.zeros(len(training_logs_neg)).repeat(repeat_multiplier)
self.training_classes = np.append(training_classes,
np.ones(len(training_logs_pos)))
# join training data
neg_rep = pd.concat([training_logs_neg] * repeat_multiplier)
self.training_data = pd.concat([neg_rep, training_logs_pos],
ignore_index=True)
del self.all_data
def fit_classifier(self):
"""
Fit classifier with training data.
"""
columns = list(self.training_data.columns)
columns.remove('slave_logs')
self.classifier.fit(self.training_data[columns], self.training_classes)
def validate_classifier(self):
"""
Determine predicted classes of validation data sets, and print results.
For the current configuration, all validation data sets are expected to
be positively identified as slave ships.
"""
validation_sets = [self.validation_set_1, self.validation_set_2]
for i, validation_set in enumerate(validation_sets):
columns = list(validation_set.columns)
columns.remove('slave_logs')
predictions = self.classifier.predict(validation_set[columns])
counts = collections.Counter(predictions)
print('validation set', i, ' results: ', counts)
def classify(self):
"""
Classify remaining unclassified data and print results.
"""
# predict class of data (for all columns except for slave_logs, which
# will hold the classification result)
columns = list(self.unclassified_logs.columns)
columns.remove('slave_logs')
predictions = self.classifier.predict(self.unclassified_logs[columns])
# revalue slave_log ID column to indicate classification
self.unclassified_logs['slave_logs'] = predictions + 4
# print statstics
counts = collections.Counter(predictions)
for key in counts:
percent = (counts[key] / (len(predictions)) * 100)
print(round(percent, 2), 'of data was classified as ', key)
def export_data(self, save_filename='classifier_results.csv'):
"""
Export results to be plotted in Fusion Tables google app.
"""
# assign the classifier results to the cliwoc data frame
for val in self.unclassified_logs['slave_logs'].unique():
ind = self.unclassified_logs[self.unclassified_logs['slave_logs'] == val].index
self.cliwoc_data_all.loc[ind, 'slave_logs'] = val
# isolate the columns that we would like to save
columns = ['ShipName', 'ShipType', 'slave_logs',
'Nationality', 'Year', 'Lat3', 'Lon3']
self.cliwoc_data_all = isolate_columns(self.cliwoc_data_all, columns)
# save the altered cliwoc dataframe to a csv file
self.cliwoc_data_all.to_csv(save_filename)
return
def load_clean_and_classify(self, fuzz=False, export_csv=True):
"""
Perform all functions, and print status updates.
Input: fuzz = boolean value, default is false. Fuzzy string matching will
only be performed if fuzz = True.
"""
print("Loading data...")
self.load_data()
self.encode_ship_IDs()
print("Finding ship logs that mention slaves...")
self.find_logs_that_mention_slaves()
print("Finding training data...")
self.find_training_data({'ShipName': non_slave_ships})
print("Cleaning data...")
self.clean_and_sort_data()
print("Joining data sets...")
self.join_data()
if fuzz:
print("Matching similar string values with fuzzy wuzzy...")
self.match_similar_words()
print("Encoding data...")
self.encode_data()
print("Extracting training and validation data...")
self.extract_data_sets()
print("Fitting classifier...")
self.fit_classifier()
print("Validating classifier...")
print()
self.validate_classifier()
print("Classifying unknown data...")
print()
self.classify()
if export_csv:
print("Exporting data...")
self.export_data()
|
StarcoderdataPython
|
6492236
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .. import bar
import base
import urllib
import urllib2
import gobject
import threading
try:
import json
except ImportError:
import simplejson as json
class BitcoinTicker(base._TextBox):
''' A bitcoin ticker widget, data provided by the MtGox API
Format options:
buy, sell
'''
QUERY_URL = "http://data.mtgox.com/api/1/BTC%s/ticker_fast"
currency_code = {'dollar': 'USD', 'euro': 'EUR'}
defaults = [
## One of (location, woeid) must be set.
(
'currency',
'dollar',
'The currency the value of bitcoin is displayed in'
),
('format', 'BTC Buy: {buy}, Sell: {sell}', 'Display format'),
('update_interval', 600, 'Update interval in seconds')
]
def __init__(self, **config):
base._TextBox.__init__(self, 'N/A', width=bar.CALCULATED, **config)
def _configure(self, qtile, bar):
base._TextBox._configure(self, qtile, bar)
self.add_defaults(BitcoinTicker.defaults)
self.timeout_add(self.update_interval, self.wx_updater)
def button_press(self, x, y, button):
self.update(self.fetch_data())
def wx_updater(self):
self.log.info('adding WX widget timer')
def worker():
data = self.fetch_data()
gobject.idle_add(self.update, data)
threading.Thread(target=worker).start()
return True
def fetch_data(self):
res = urllib2.urlopen(
self.QUERY_URL % self.currency_code[self.currency]
)
raw = json.loads(res.read())
data = {
'sell': raw['return']['sell']['display'],
'buy': raw['return']['buy']['display']
}
return data
def update(self, data):
if data:
self.text = self.format.format(**data)
else:
self.text = 'N/A'
self.bar.draw()
return False
|
StarcoderdataPython
|
3248140
|
# -*- coding: utf-8 -*-
## \package globals.threads
#
# WARNING: Standard Python module 'threads' cannot be imported here
# MIT licensing
# See: docs/LICENSE.txt
import threading
from dbr.log import Logger
thr = threading
## Standard thread class with renamed methods
class Thread(thr.Thread):
def __init__(self, function, *args):
thr.Thread.__init__(self, target=function, args=args)
self.Active = False
# TODO: Retrieve target exit value
self.ExitVal = None
def __del__(self):
Logger.Debug(__name__, u'Destroying Thread instance; Thread is active: {}'.format(self.IsActive()))
## Exits the thread & sets inactive
#
# Alias of globals.threads.Thread.Join
def Exit(self):
return self.Join()
## Retrieves the thread identifier
def GetId(self):
return self.ident
## Tests if thread is active
def IsActive(self):
return self.Active
## Exits the thread & sets inactive
def join(self):
if self.IsActive():
Logger.Debug(__name__, u'Joining thread ...')
thr.Thread.join(self)
self.Active = False
## Exits the thread & sets inactive
#
# Alias of globals.threads.Thread.join
def Join(self):
return self.join()
## Executes target under new thread
def start(self):
try:
thr.Thread.start(self)
self.Active = True
# Do not try to restart thread if already started
except RuntimeError:
Logger.Debug(__name__, u'ThreadStart: Thread is active, cannot restart')
# In case active state has been changed
self.Active = True
pass
return self.IsActive()
## Alias for start method
def Start(self):
return self.start()
active_threads = []
## Creates a new thread for processing
#
# \return
# \b \e Integer thread ID if successfully activated
def CreateThread(function, *args):
global active_threads
new_thread = Thread(function, args)
thread_id = new_thread.GetId()
if new_thread.IsActive() and thread_id not in active_threads:
active_threads.append(thread_id)
return thread_id
return None
## Ends an active thread
#
# TODO: Define
# \param thread_id
# \b \e Integer ID of the thread to kill
# \return
# \b \e True if thread was successfully killed
def KillThread(thread_id):
global active_threads
if thread_id not in active_threads:
return False
# REMOVEME:
return False
|
StarcoderdataPython
|
18498
|
# code jam: Qualification Round 2017: Problem C. Bathroom Stalls
def read_int():
return int(raw_input())
def read_int_n():
return map(int, raw_input().split())
def get_y_z(n, k):
if k == 1:
if n & 1 == 0:
# Even Number
return n >> 1, (n >> 1) - 1
else:
# Odd Number
return n >> 1, n >> 1
else:
if n & 1 == 0:
# Even Number
if k & 1 == 0:
# Even Number
return get_y_z(n >> 1, k >> 1)
else:
# Odd Number
return get_y_z((n >> 1) - 1, k >> 1)
else:
# Odd Number
return get_y_z(n >> 1, k >> 1)
T = read_int()
x = 1
while x <= T:
N, K = read_int_n()
y, z = get_y_z(N, K)
print 'Case #{}: {} {}'.format(x, y, z)
x += 1
|
StarcoderdataPython
|
3302001
|
"""Tests for interacting with Postgres database"""
from typing import Dict, Any
import json
from driver.collector.collector_factory import get_postgres_version, connect_postgres
from driver.database import collect_data_from_database
from driver.collector.postgres_collector import PostgresCollector
# pylint: disable=missing-function-docstring
def _get_conf(
pg_user: str, pg_password: str, pg_host: str, pg_port: str, pg_database: str
) -> Dict[str, str]:
conf = {
"user": pg_user,
"password": <PASSWORD>,
"host": pg_host,
"port": pg_port,
"dbname": pg_database,
}
return conf
def _get_driver_conf(
db_type: str,
pg_user: str,
pg_password: str,
pg_host: str,
pg_port: str,
pg_database: str,
) -> Dict[str, str]:
# pylint: disable=too-many-arguments
conf = {
"db_user": pg_user,
"db_password": <PASSWORD>,
"db_host": pg_host,
"db_port": pg_port,
"db_name": pg_database,
"db_type": db_type,
"db_provider": "on_premise",
"db_key": "test_key",
"organization_id": "test_organization",
}
return conf
def test_postgres_collector_version(
pg_user: str, pg_password: str, pg_host: str, pg_port: str, pg_database: str
) -> None:
conf = _get_conf(pg_user, pg_password, pg_host, pg_port, pg_database)
conn = connect_postgres(conf)
version = get_postgres_version(conn)
collector = PostgresCollector(conn, version)
conn.close()
assert collector.get_version() == version
def test_postgres_collector_permission(
pg_user: str, pg_password: str, pg_host: str, pg_port: str, pg_database: str
) -> None:
conf = _get_conf(pg_user, pg_password, pg_host, pg_port, pg_database)
conn = connect_postgres(conf)
version = get_postgres_version(conn)
collector = PostgresCollector(conn, version)
perm_res = collector.check_permission()
conn.close()
assert perm_res[1] == []
assert perm_res[0] is True
def _verify_postgres_knobs(knobs: Dict[str, Any]) -> None:
assert int(knobs["global"]["global"]["shared_buffers"]) >= 0
assert knobs["local"] is None
def test_postgres_collector_knobs(
pg_user: str, pg_password: str, pg_host: str, pg_port: str, pg_database: str
) -> None:
conf = _get_conf(pg_user, pg_password, pg_host, pg_port, pg_database)
conn = connect_postgres(conf)
version = get_postgres_version(conn)
collector = PostgresCollector(conn, version)
knobs = collector.collect_knobs()
conn.close()
# the knob json should not contain any field that cannot be converted to a string,
# like decimal type and datetime type
json.dumps(knobs)
_verify_postgres_knobs(knobs)
def _verify_postgres_metrics(metrics: Dict[str, Any]) -> None:
assert metrics["global"]["pg_stat_archiver"]["archived_count"] >= 0
assert metrics["global"]["pg_stat_bgwriter"]["checkpoints_req"] >= 0
assert metrics["local"]["database"]["pg_stat_database"][1]["datname"] == "template1"
assert (
metrics["local"]["database"]["pg_stat_database_conflicts"][1]["datname"]
== "template1"
)
assert metrics["local"]["table"]["pg_stat_user_tables"] is not None
assert metrics["local"]["table"]["pg_statio_user_tables"] is not None
assert metrics["local"]["index"]["pg_stat_user_indexes"] is not None
assert metrics["local"]["index"]["pg_statio_user_indexes"] is not None
def test_postgres_collector_metrics(
pg_user: str, pg_password: str, pg_host: str, pg_port: str, pg_database: str
) -> None:
conf = _get_conf(pg_user, pg_password, pg_host, pg_port, pg_database)
conn = connect_postgres(conf)
version = get_postgres_version(conn)
collector = PostgresCollector(conn, version)
metrics = collector.collect_metrics()
conn.close()
# the metric json should not contain any field that cannot be converted to a string,
# like decimal type and datetime type
json.dumps(metrics)
_verify_postgres_metrics(metrics)
def test_collect_data_from_database(
db_type: str,
pg_user: str,
pg_password: str,
pg_host: str,
pg_port: str,
pg_database: str,
) -> None:
# pylint: disable=too-many-arguments
driver_conf = _get_driver_conf(
db_type, pg_user, pg_password, pg_host, pg_port, pg_database
)
observation = collect_data_from_database(driver_conf)
knobs = observation["knobs_data"]
metrics = observation["metrics_data"]
summary = observation["summary"]
version_str = summary["version"]
_verify_postgres_knobs(knobs)
_verify_postgres_metrics(metrics)
assert summary["observation_time"] > 0
assert len(version_str) > 0
|
StarcoderdataPython
|
11342357
|
class Solution(object):
def evalRPN(self, tokens):
"""
:type tokens: List[str]
:rtype: int
"""
stck=[]
for i in xrange(len(tokens)):
# deal with negative
if tokens[i].isdigit() or (tokens[i][0]=='-' and tokens[i][1:].isdigit()):
stck.append(int(tokens[i]))
elif tokens[i]=='+':
num1=stck.pop()
num2=stck.pop()
stck.append(num2+num1)
elif tokens[i]=='*':
num1=stck.pop()
num2=stck.pop()
stck.append(num2*num1)
elif tokens[i]=='-':
num1=stck.pop()
num2=stck.pop()
stck.append(num2-num1)
elif tokens[i]=='/':
num1=stck.pop()
num2=stck.pop()
# python 2 division problem
stck.append(num2/num1 if num2*num1>0 else -(abs(num2)/abs(num1)))
return stck[0] if stck else 0
|
StarcoderdataPython
|
6491953
|
<gh_stars>1-10
"""
Functions for interacting with the database server.
"""
from server_common.utilities import dehex_and_decompress, print_and_log
from server_common.channel_access import ChannelAccess
from server_common.pv_names import DatabasePVNames
import json
import traceback
def get_iocs(prefix):
"""
Get the list of available IOCs from DatabaseServer.
Args:
prefix : The PV prefix for this instrument.
Returns:
A list of the names of available IOCs.
"""
#
try:
rawjson = dehex_and_decompress(bytes(ChannelAccess.caget(prefix + DatabasePVNames.IOCS, as_string=True),
encoding="utf-8")).decode("utf-8")
return json.loads(rawjson).keys()
except Exception:
print_and_log(f"Could not retrieve IOC list: {traceback.format_exc()}", "MAJOR")
return []
|
StarcoderdataPython
|
385818
|
#Naemazam(github:@naemazam)
#R-PAss (Remember Password)
from tkinter import *
from tkinter import messagebox
import json
import pyperclip
# password generator
from password_generator import password_generator
# color
WINDOW_BG = "#020203"
FIELD_COLORS = "#272b2b"
FIELD_FONT_COLOR = "#07d6fa"
LABEL_COLOR = "#10cf02"
FONT = ("Courier", 12, "normal")
# Password Genator
def get_password():
password = password_generator()
pyperclip.copy(password)
password_entry.delete(0, END)
password_entry.insert(END, password)
# save password
def database_manager(new_user_entry):
try:
with open("data.json", mode="r") as old_password_file:
password_data = json.load(old_password_file)
except (FileNotFoundError, json.decoder.JSONDecodeError):
with open("data.json", mode="w") as new_password_file:
json.dump(new_user_entry, new_password_file, indent=4)
else:
password_data.update(new_user_entry)
with open("data.json", mode="w") as old_password_file:
json.dump(password_data, old_password_file, indent=4)
finally:
website_entry.delete(0, END)
password_entry.delete(0, END)
def save_password():
website = website_entry.get()
email = email_entry.get()
password = password_entry.get()
if len(website) == 0 or len(password) == 0:
messagebox.showinfo(title="warning", message="No!! fields can empty")
else:
is_ok = messagebox.askokcancel(title="Confirm Again", message=f"These are the Information you entered\n"
f"Email: {email}"
f"\nPassword: {password}\nIs it okay to save ?")
if is_ok:
pyperclip.copy(password)
new_entry_in_json = {
website:
{
"Email": email,
"Password": password
}
}
database_manager(new_entry_in_json)
#save password
def search_password():
website = website_entry.get()
if len(website) == 0:
messagebox.showinfo(title="Warning", message="Enter a website to search")
else:
try:
with open("data.json", mode="r") as old_password_file:
password_data = json.load(old_password_file)
except (FileNotFoundError, json.decoder.JSONDecodeError):
messagebox.showinfo(title="No!!!", message="Sorry, No!! Data")
else:
if website in password_data:
email = password_data[website]["Email"]
password = password_data[website]["Password"]
is_clipboard = messagebox.askokcancel(title=website, message=f"Email: {email}\nPassword: {password}"
f"\n\nCopied to clipboard ?")
if is_clipboard:
pyperclip.copy(password)
messagebox.showinfo(title="Copied to clipboard", message="Password has been Copied")
else:
messagebox.showinfo(title=" not Data for this website", message=f"The password for {website}\n"
f"has not been stored")
# Call UI
window = Tk()
window.title("Remember Passwords")
window.config(padx=20, pady=20, bg=WINDOW_BG)
PASS_IMG = PhotoImage(file="logo.png")
canvas = Canvas(width=200, height=200, bg=WINDOW_BG, highlightthickness=0)
canvas.config()
canvas.create_image(100, 100, image=PASS_IMG)
canvas.grid(column=1, row=0)
website_label = Label(text="Website", bg=WINDOW_BG, padx=20, font=FONT, fg=LABEL_COLOR)
website_label.grid(column=0, row=1, sticky=W)
email_label = Label(text="Email/Username", bg=WINDOW_BG, padx=20, font=FONT, fg=LABEL_COLOR)
email_label.grid(column=0, row=2, sticky=W)
password_label = Label(text="Password", bg=WINDOW_BG, padx=20, font=FONT, fg=LABEL_COLOR)
password_label.grid(column=0, row=3,sticky=W)
window.grid_columnconfigure(1, weight=1)
website_entry = Entry(width=30, bg=FIELD_COLORS, fg=FIELD_FONT_COLOR, font=FONT)
website_entry.insert(END, string="")
website_entry.grid(column=1, row=1)
website_entry.focus()
email_entry = Entry(width=30, bg=FIELD_COLORS, fg=FIELD_FONT_COLOR, font=FONT)
email_entry.insert(END, string="")
email_entry.grid(column=1, row=2)
email_entry.insert(0, "<EMAIL>")
password_entry = Entry(width=30, bg=FIELD_COLORS, fg=FIELD_FONT_COLOR, font=FONT)
password_entry.insert(END, string="")
password_entry.grid(column=1, row=3)
search_button = Button(text="Search", padx=95, font=FONT, command=search_password)
search_button.grid(column=3, row=1)
generate_button = Button(text="Generate Password", command=get_password, font=FONT)
generate_button.grid(column=3, row=3)
add_button = Button(text="Save", width=36, command=save_password, font=FONT)
add_button.grid(column=1, row=5, columnspan=2, sticky=W)
dummy_label = Label(bg=WINDOW_BG)
dummy_label.grid(column=0, row=4, sticky=W)
window.mainloop()
|
StarcoderdataPython
|
3295030
|
<filename>create_tf_records.py
#!/usr/bin/env python
import glob
import random
import tensorflow as tf
import os
import cv2
def int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def int64_list_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def bytes_list_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def float_list_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def create_tf_example(example, MIN_WIDTH=2, MAX_WIDTH=200, MIN_HEIGHT=2, MAX_HEIGHT=400):
ss = example.split(' ')
if len(ss) < 2:
raise ValueError('Invalid example:' + example)
path = ss[0]
img = cv2.imread(path)
if img is None:
raise ValueError('Missed image:' + path)
height, width, depth = img.shape
with tf.gfile.GFile(path, 'rb') as fid:
encoded_image = fid.read()
format = path.split('.')[-1]
image_format = format.encode()
enc_path = path.encode()
xmins = [] # List of normalized left x coordinates in bounding box (1 per box)
xmaxs = [] # List of normalized right x coordinates in bounding box
# (1 per box)
ymins = [] # List of normalized top y coordinates in bounding box (1 per box)
ymaxs = [] # List of normalized bottom y coordinates in bounding box
# (1 per box)
classes_text = [] # List of string class name of bounding box (1 per box)
classes = [] # List of integer class id of bounding box (1 per box)
num_boxes = int(ss[1])
for i in range(0, num_boxes):
n = i * 4 + 2
x_min = int(ss[n])
y_min = int(ss[n + 1])
x_max = int(ss[n + 2])
y_max = int(ss[n + 3])
if x_min >= x_max:
x_min, x_max = x_max, x_min
if y_min >= y_max:
y_min, y_max = y_max, y_min
width = x_max - x_min
height = y_max - y_min
if x_max >= width:
x_max = width - 1
if y_max > height:
y_max = height - 1
if width < MIN_WIDTH:
raise ValueError('Box width smaller than min (' + str(width) + 'x' + str(height) + ') at ' + example)
if width > MAX_WIDTH:
raise ValueError('Box width bigger than max (' + str(width) + 'x' + str(height) + ') at ' + example)
if height < MIN_HEIGHT:
raise ValueError('Box height smaller than min (' + str(width) + 'x' + str(height) + ') at ' + example)
if height > MAX_HEIGHT:
raise ValueError('Box height bigger than max (' + str(width) + 'x' + str(height) + ') at ' + example)
xmins.append(float(x_min / width))
xmaxs.append(float(x_max / width))
ymins.append(float(y_min / height))
ymaxs.append(float(y_max / height))
classes_text.append('tl'.encode())
classes.append(int(1))
tf_example = tf.train.Example(features=tf.train.Features(feature={
'image/height': int64_feature(height),
'image/width': int64_feature(width),
'image/filename': bytes_feature(enc_path),
'image/source_id': bytes_feature(enc_path),
'image/encoded': bytes_feature(encoded_image),
'image/format': bytes_feature(image_format),
'image/object/bbox/xmin': float_list_feature(xmins),
'image/object/bbox/xmax': float_list_feature(xmaxs),
'image/object/bbox/ymin': float_list_feature(ymins),
'image/object/bbox/ymax': float_list_feature(ymaxs),
'image/object/class/text': bytes_list_feature(classes_text),
'image/object/class/label': int64_list_feature(classes),
}))
return tf_example
def get_all_labels(annotation='*/train*.txt'):
lines = []
for file in glob.glob(annotation):
with open(file) as f:
lines.extend(f.read().splitlines())
return lines
def create_tf_record(record_filename, examples):
writer = tf.python_io.TFRecordWriter(record_filename)
counter = 1
len_examples = len(examples)
step = len_examples / 100
i = 1
for example in examples:
try:
tf_example = create_tf_example(example)
writer.write(tf_example.SerializeToString())
except ValueError as err:
print(err.args)
if counter > step:
print("Percent done", i)
i += 1
counter = 0
else:
counter += 1
writer.close()
def main(_):
dir = 'tf_records_'+FLAGS.set
if not os.path.exists(dir):
os.makedirs(dir)
train_output_path = os.path.join(dir, 'train.record')
val_output_path = os.path.join(dir, 'val.record')
path = '/data/traffic_lights/{}/train*.txt'.format(FLAGS.set)
print('using annotatilns from', path)
examples = get_all_labels(path)
#examples = examples[:10] # for testing
len_examples = len(examples)
print("Loaded ", len(examples), "examples")
# Test images are not included in the downloaded data set, so we shall perform
# our own split.
random.seed(42)
random.shuffle(examples)
num_train = int(0.7 * len_examples)
train_examples = examples[:num_train]
val_examples = examples[num_train:]
print('%d training and %d validation examples.', len(train_examples), len(val_examples))
print('Creating training record...')
create_tf_record(train_output_path, train_examples)
print('Creating validation record...')
create_tf_record(val_output_path, val_examples)
if __name__ == '__main__':
flags = tf.app.flags
flags.DEFINE_string('set','','')
# flags.DEFINE_string('input_examples', '/data/traffic_lights/*/train*.txt', 'Path to examples')
# flags.DEFINE_string('output_dir', './tf_records', 'Path to output TFRecord')
FLAGS = flags.FLAGS
with tf.device('/device:GPU:0'):
tf.app.run()
|
StarcoderdataPython
|
6554716
|
<reponame>pointcloudAI/libDepthEye
###
# PointCloud Python Sample : ShowDepthNoGUI.
#
# Copyright (c) 2018 PointCloud.AI Inc.
#
# Author : Adam.Han
#
# Functional description:
# Show simple usage with python to get depth information from DepthEye Camera
#
# Exit shortcut keys:
# Input [Enter] Key
###
import sys
import os
def getDefaultSdkPath():
path = ''
if sys.platform == 'win32':
import win32api
path = win32api.GetLongPathName(os.path.dirname(os.path.realpath(sys.argv[0]))+"\\..\\..\\..")
print('path',path)
path = path + os.sep + "libs"+os.sep +"windows"
elif sys.platform == 'darwin':
path = sys.path[0]+ "/../../.." + os.sep + "libs"+os.sep +"macos"
else :
path = sys.path[0]+ "/../../.." + os.sep + "libs"+os.sep +"ubuntu"
print('path',path)
if os.path.isdir(path):
return path
else:
print('Failed to get default PointCloud SDK path')
return None
sdkPath = getDefaultSdkPath()
libPath = sdkPath + os.sep + "lib"
pythonPath = libPath + os.sep + "python3"
print("pythonPath ",pythonPath)
sys.path.append(libPath)
sys.path.append(pythonPath)
print("***** SDK path:",sdkPath," libPath:",libPath)
import PointCloud
import numpy as np
import sys
def createWindow():
global window
if window == None:
window = MainWindow(cameraSystem)
return
class MainWindow():
def __init__(self, cameraSystem):
print("MainWindow init")
self.depthCamera = cameraSystem.connect(devices[0])
self.data = {}
if self.depthCamera:
res = self.depthCamera.Init(0)
if not res :
print(" Init fail")
else:
print(" Init ok")
self.depthCamera.clearAllCallbacks()
self.depthCamera.registerCallback(PointCloud.DepthCamera.FRAME_RAW_FRAME_PROCESSED, self.processPhaseAmpFrame)
#self.depthCamera.registerCallback(PointCloud.DepthCamera.FRAME_DEPTH_FRAME, self.processDepthFrame)
#self.depthCamera.registerCallback(PointCloud.DepthCamera.FRAME_XYZI_POINT_CLOUD_FRAME, self.processPointCloudFrame)
# CameraType camType = TOF_CAMERA;
if not self.depthCamera.start():
print(" start fail")
else:
print(" start ok")
def processPhaseAmpFrame(self, depthCamera, frame, type):
if frame is None:
return
tofFrame = PointCloud.PhaAmpFrame.typeCast(frame)
if not tofFrame:
return
size = tofFrame.size
print("frame size",size.width, size.height)
# self.phaseArray = np.array(tofFrame.pha_amp, copy = True)
# a = np.array(tofFrame.pha_amp[size.height*size.width:])
p = np.array(tofFrame.pha_amp[:size.height*size.width], copy = True)
phaseArray = np.transpose(p.reshape((size.height, size.width)))
print(" conter point phase :", phaseArray[size.width/2,size.height/2])
def processDepthFrame(self, depthCamera, frame, type):
#frame = self.depthQueue.get(timeout=0.25)
if frame is None:
return
depthFrame = PointCloud.DepthFrame.typeCast(frame)
if not depthFrame:
return
d = np.array(depthFrame.depth)
d1 = np.transpose(d.reshape((depthFrame.size.height, depthFrame.size.width)))
print("point(x:40,y:30)'s distance is %s meter." %d1[40][30])
def processPointCloudFrame(self, depthCamera, frame, type):
if frame is None:
return
pointCloudFrame = PointCloud.XYZIPointCloudFrame.typeCast(frame)
if not pointCloudFrame:
return
pcf = np.array(pointCloudFrame, copy=True)
print("pointCloudFrame.size: %s" %pointCloudFrame.size())
### point.i is intensity, which come from amplitude
### point.z is distance, which come from depth
# for index in range(pointCloudFrame.size()):
# point = pointCloudFrame.points[index]
# print("current point : index %s [ x : %s , y: %s ,z : %s ,i : %s]" %(index, point.x,point.y,point.z,point.i))
def stop(self):
if self.depthCamera:
print("depthCamera getrefcount 1: " ,sys.getrefcount(self.depthCamera))
self.depthCamera.stop()
self.depthCamera.clearAllCallbacks()
print("depthCamera getrefcount 2: " ,sys.getrefcount(self.depthCamera))
print(" before cameraSystem. disconnect")
cameraSystem.disconnect(self.depthCamera, True)
print(" after cameraSystem. disconnect")
print("depthCamera getrefcount 3: " ,sys.getrefcount(self.depthCamera))
del self.depthCamera
print(" after del self.depthCamera")
# print("depthCamera getrefcount 4: " ,sys.getrefcount(self.depthCamera))
self.depthCamera = None
print(" after self.depthCamera is None")
# print("depthCamera getrefcount 5: " ,sys.getrefcount(self.depthCamera))
cameraSystem = PointCloud.CameraSystem()
devices = cameraSystem.scan()
if len(devices) == 1:
print(" Find one device.")
window = MainWindow(cameraSystem)
key = input("Input enter key to quit.")
print(" Quit now.")
window.stop()
else:
print(" No device found.")
print(" before del cameraSystem.")
del cameraSystem
print(" after del cameraSystem.")
cameraSystem = None
|
StarcoderdataPython
|
12857322
|
from django.contrib import admin
from django.db.models import Model
__all__ = ["register_all"]
def register_all(models, admin_class=admin.ModelAdmin):
"""
Easily register Models to Django admin site.
::
from yourapp import models
from django_boost.admin.sites import register_all
register_all(models)
Register all models defined in `models.py` in Django admin site.
Custom admin classes are also available.
::
from your_app import models
from your_app import admin
from django_boost.admin.sites import register_all
register_all(models, admin_class=admin.CustomAdmin)
"""
for attr in dir(models):
attr = getattr(models, attr, None)
if isinstance(attr, type):
if issubclass(attr, Model) and not attr._meta.abstract:
try:
admin.site.register(attr, admin_class)
except admin.sites.AlreadyRegistered:
pass
|
StarcoderdataPython
|
9792337
|
<gh_stars>0
from flask import render_template, url_for, flash, redirect, request, redirect, g, current_app, Markup
import os
from flask_login import login_user, current_user, logout_user, login_required
from .. import db, bcrypt
import secrets
from werkzeug.urls import url_parse
from flask_dance.consumer.backend.sqla import SQLAlchemyBackend
from . import bp, batman_example, googlex
from flask_dance.contrib.google import google
from ..decorators import admin_required, permission_required
from .email import send_reset_email
from datetime import datetime
from sqlalchemy import and_
from ..models import User, Role, Permission, OAuth, Rewards
from wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError
from flask_dance.consumer import oauth_authorized
from .forms import (SignUpForm, LoginForm, RequestResetForm, WelcomeForm, ResetPasswordForm, ProtagonistCatForm, ProtagonistSubCatASportsForm,
ProtagonistSubCatSportsForm, ProtagonistSubCatMusicForm, WTKForm)
from app.main.forms import SearchForm
from sqlalchemy.orm.exc import NoResultFound
batman_example.backend = SQLAlchemyBackend(
OAuth, db.session, user=current_user, user_required=False)
googlex.backend = SQLAlchemyBackend(
OAuth, db.session, user=current_user, user_required=False)
@bp.route("/register", methods=['GET', 'POST'])
def register():
form = SignUpForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(
form.password.data).decode('utf-8')
user = User(username=form.username.data, email=form.email.data,
password=<PASSWORD>, tandc=form.tandc.data)
db.session.add(user)
db.session.commit()
message = Markup(
'''Welcome! to THE TRIBAL BOX the one stop Platform please update your profile and choose your <a href="/role">role</a>''')
flash(message, 'success')
user = User.query.filter_by(email=form.email.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
login_user(user)
re = Rewards.query.filter_by(user_id=user.id).first()
try:
if re.action != "sign up":
random_hex = secrets.token_hex(8)
rewards = Rewards(points=10, action="sign up",address=random_hex, Protagonist=user)
db.session.add(rewards)
db.session.commit()
except:
if re == None:
random_hex = secrets.token_hex(8)
rewards = Rewards(points=10, action="sign up",address=random_hex, Protagonist=user)
db.session.add(rewards)
db.session.commit()
return redirect(url_for('main.home'))
flash('Login Unsuccessful. Please check email and password', 'danger')
return render_template('auth/register.html', title='Register', form=form)
@bp.route('/facebook')
def facebook():
if not batman_example.session.authorized:
return redirect(url_for("batman-example.login"))
resp = batman_example.session.get("me")
assert resp.ok
return resp.text
@oauth_authorized.connect_via(batman_example)
def facebook_logged_in(blueprint, token):
account_info = blueprint.session.get('me')
email_a = blueprint.session.get('me?fields=email')
if email_a.ok:
email_a_json = email_a.json()
email = email_a_json["email"]
if account_info.ok:
account_info_json = account_info.json()
username = account_info_json["name"]
password = account_info_json["id"]
query = User.query.filter_by(username=username)
try:
user = query.one()
except NoResultFound:
user = User(username=username,
password=password,
email=email
)
db.session.add(user)
db.session.commit()
flash('Welcome to THE TRIBAL BOX the one stop Platform please update your profile and choose your role in our platform', 'success')
login_user(user)
return redirect(url_for('main.home'))
@bp.route('/googlelogin')
def googlelogin():
if not google.authorized:
return redirect(url_for("google.login"))
resp = google.get("/oauth2/v2/userinfo")
assert resp.ok
return resp.text
@oauth_authorized.connect_via(googlex)
def google_logged_in(blueprint, token):
account_info = blueprint.session.get('/oauth2/v2/userinfo')
if account_info.ok:
account_info_json = account_info.json()
username = account_info_json["name"]
password = <PASSWORD>["id"]
email = account_info_json["email"]
query = User.query.filter_by(username=username)
try:
user = query.one()
except NoResultFound:
user = User(username=username,
password=password,
email=email
)
db.session.add(user)
db.session.commit()
flash('Welcome to THE TRIBAL BOX the one stop Platform please update your profile and choose your role in our platform', 'success')
login_user(user)
return redirect(url_for('main.home'))
# Now we are going to use the Blueprint we have created in the __init__.py file in auth directory
@bp.before_request
def before_request():
if current_user.is_authenticated:
current_user.last_seen = datetime.utcnow()
g.search_form = SearchForm()
db.session.commit()
@bp.route('/admin')
@login_required
@admin_required
def for_admins_only():
return "For administrators!"
@bp.route('/moderate')
@login_required
@permission_required(Permission.MODERATE)
def for_moderators_only():
return "For comment moderators!"
@bp.route("/login", methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('donation.donation_view'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
re = Rewards.query.filter_by(user_id=user.id).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
login_user(user, remember=form.remember.data)
try:
if re.action!="login" :
random_hex = secrets.token_hex(8)
rewards = Rewards(points=4, action="login", address=random_hex, Protagonist=user)
db.session.add(rewards)
db.session.commit()
except:
if re == None:
random_hex = secrets.token_hex(8)
rewards = Rewards(points=4, action="login",address=random_hex, Protagonist=user)
db.session.add(rewards)
db.session.commit()
if user.is_role == "Creators":
return redirect(url_for('donation.donation_view'))
if user.is_role == "Brand":
return redirect(url_for('donation.donation_view'))
if user.is_role == "Mentor":
return redirect(url_for('donation.donation_view'))
else:
return redirect(url_for('donation.donation_view'))
return render_template('auth/login.html', title='Sign In', form=form)
@bp.route("/logout")
def logout():
logout_user()
return redirect(url_for('main.landing'))
# This part of the code is related to resetting of password and other user authentication
@bp.route("/reset_password", methods=['GET', 'POST'])
def reset_request():
if current_user.is_authenticated:
return redirect(url_for('main.home'))
form = RequestResetForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
send_reset_email(user)
flash('An email has been sent with instructions to reset your password.', 'info')
return redirect(url_for('auth.login'))
return render_template('auth/reset_request.html', title='Reset Password', form=form)
@bp.route("/reset_password/<token>", methods=['GET', 'POST'])
def reset_token(token):
if current_user.is_authenticated:
return redirect(url_for('main.home'))
user = User.verify_reset_token(token)
if user is None:
flash('That is an invalid or expired token', 'warning')
return redirect(url_for('auth.reset_request'))
form = ResetPasswordForm()
if form.validate_on_submit():
hashed_password = <PASSWORD>.generate_password_hash(form.password.data).decode('utf-8')
user.password = <PASSWORD>
db.session.commit()
flash('Your password has been updated! You are now able to log in', 'success')
return redirect(url_for('auth.login'))
return render_template('auth/reset_token.html', title='Reset Password', form=form)
|
StarcoderdataPython
|
6643973
|
from pyrtcdc import ffi, lib
from time import sleep
from threading import Thread
from base64 import b64encode, b64decode
RTCDC_CHANNEL_STATE_CLOSED = 0
RTCDC_CHANNEL_STATE_CONNECTING = 1
RTCDC_CHANNEL_STATE_CONNECTED = 2
RTCDC_DATATYPE_STRING = 0
RTCDC_DATATYPE_BINARY = 1
@ffi.def_extern()
def onopen_cb(channel, userdata):
ffi.from_handle(userdata)._onOpen(channel)
@ffi.def_extern()
def onmessage_cb(channel, datatype, data, length, userdata):
if datatype == RTCDC_DATATYPE_STRING:
message = ffi.cast("char *", data)
message = ffi.string(message)
message = message[:length].decode("UTF-8")
if datatype == RTCDC_DATATYPE_BINARY:
message = ffi.cast("char *", data)
message = ffi.buffer(message, length)[:]
if userdata:
ffi.from_handle(userdata)._onMessage(message)
@ffi.def_extern()
def onclose_cb(channel, userdata):
ffi.from_handle(userdata)._onClose(channel)
@ffi.def_extern()
def onchannel_cb(peer, dc, userdata):
dc.on_message = lib.onmessage_cb
dc.user_data = userdata
ffi.from_handle(userdata)._onChannel(peer, dc)
@ffi.def_extern()
def oncandidate_cb(peer, candidate, userdata):
candidate = ffi.string(candidate)
ffi.from_handle(userdata)._onCandidate(peer, candidate)
@ffi.def_extern()
def onconnect_cb(peer, userdata):
ffi.from_handle(userdata)._onConnect(peer, userdata)
class DataChannel():
def _onOpen(self, channel):
self.dc_open = True
self.onOpen(channel)
def _onMessage(self, message):
self.onMessage(message)
def _onClose(self, channel):
self.dc_open = False
self.onClose(channel)
def _onChannel(self, peer, channel):
self.dc_open = True
self.onChannel(peer, channel)
def _onCandidate(self, peer, candidate):
self.onCandidate(peer, candidate)
def _onConnect(self, peer, userdata):
lib.rtcdc_create_data_channel(peer, self.dcName, self.protocol, lib.onopen_cb, lib.onmessage_cb, lib.onclose_cb, userdata)
self.onConnect(peer)
def onOpen(self, channel):
pass
def onMessage(self, message):
pass
def onClose(self, channel):
pass
def onChannel(self, peer, channel):
pass
def onCandidate(self, peer, candidate):
pass
def onConnect(self, peer):
pass
def __init__(self, dcName="test-dc", stunServer="stun.services.mozilla.com", port=3418, protocol=""):
self._handle = ffi.new_handle(self)
self.dc_open = False
self.dcName = bytes(dcName, "UTF-8")
self.protocol = bytes(protocol, "UTF-8")
port = int(port)
self.peer = lib.rtcdc_create_peer_connection(lib.onchannel_cb, lib.oncandidate_cb, lib.onconnect_cb, bytes(stunServer, "UTF-8"), port, self._handle)
Thread(target=lib.rtcdc_loop, args=(self.peer, ),).start()
def generate_offer_sdp(self):
offerSDP = lib.rtcdc_generate_offer_sdp(self.peer)
offerSDP = ffi.string(offerSDP)
return b64encode(offerSDP)
def generate_local_candidate(self):
candidateSDP = lib.rtcdc_generate_local_candidate_sdp(self.peer)
candidateSDP = ffi.string(candidateSDP)
return b64encode(candidateSDP)
def parse_offer_sdp(self, offerSDP):
try:
remoteSDP = b64decode(offerSDP)
except TypeError:
print("Invalid base64!")
parse_offer = lib.rtcdc_parse_offer_sdp(self.peer, remoteSDP)
if parse_offer >= 0:
return self.generate_offer_sdp()
else:
print("Error in parsing offer SDP")
return None
def parse_candidates(self, candidate):
try:
remoteCand = b64decode(candidate)
except TypeError:
print("Invalid base64!")
parse_cand = lib.rtcdc_parse_candidate_sdp(self.peer, remoteCand)
return (parse_cand > 0)
def send_message(self, message):
length_msg = len(message)
if type(message) is str:
datatype = RTCDC_DATATYPE_STRING
message = bytes(message, "UTF-8")
elif type(message) is bytes:
datatype = RTCDC_DATATYPE_BINARY
if (self.peer[0].initialized > 0):
if (self.dc_open == True and self.peer[0].channels[0].state > RTCDC_CHANNEL_STATE_CLOSED):
channel = self.peer[0].channels[0]
return (lib.rtcdc_send_message(channel, datatype, message, length_msg) == 0)
else:
return False
else:
return False
|
StarcoderdataPython
|
11389449
|
from BuildFrame import BuildDataFrame
from ArbitrageHunter import ArbitrageHunter
import time
import yfinance
#Test 1
while True:
start_time = time.time()
DD20210205call ,DD20210205put = BuildDataFrame('DD','2021-02-05')
DD20210212call ,DD20210212put = BuildDataFrame('DD','2021-02-12')
DD20210219call ,DD20210219put = BuildDataFrame('DD','2021-02-19')
DD20210226call ,DD20210226put = BuildDataFrame('DD','2021-02-26')
DD20210305call ,DD20210305put = BuildDataFrame('DD','2021-03-05')
DD20210312call ,DD20210312put = BuildDataFrame('DD','2021-03-12')
DD20210319call ,DD20210319put = BuildDataFrame('DD','2021-03-19')
DD20210416call ,DD20210416put = BuildDataFrame('DD','2021-04-16')
DD20210716call ,DD20210716put = BuildDataFrame('DD','2021-07-16')
DD20220121call ,DD20220121put = BuildDataFrame('DD','2022-01-21')
DD20230120call ,DD20230120put = BuildDataFrame('DD','2023-01-20')
ArbitrageHunter([DD20210205call, DD20210212call, DD20210219call, DD20210226call, DD20210305call, DD20210312call, DD20210319call, DD20210416call, DD20210716call, DD20220121call, DD20230120call])
ArbitrageHunter([DD20210205put, DD20210212put, DD20210219put, DD20210226put, DD20210305put, DD20210312put, DD20210319put, DD20210416put, DD20210716put, DD20220121put, DD20230120put])
time.sleep(90)
print("Operation took %s seconds ---" % (time.time() - start_time))
AMC20210205call ,AMC20210205put = BuildDataFrame('AMC','2021-02-05')
AMC20210212call ,AMC20210212put = BuildDataFrame('AMC','2021-02-12')
AMC20210219call ,AMC20210219put = BuildDataFrame('AMC','2021-02-19')
AMC20210226call ,AMC20210226put = BuildDataFrame('AMC','2021-02-26')
AMC20210305call ,AMC20210305put = BuildDataFrame('AMC','2021-03-05')
AMC20210312call ,AMC20210312put = BuildDataFrame('AMC','2021-03-12')
AMC20210319call ,AMC20210319put = BuildDataFrame('AMC','2021-03-19')
AMC20210618call ,AMC20210618put = BuildDataFrame('AMC','2021-06-18')
AMC20210917call ,AMC20210917put = BuildDataFrame('AMC','2021-09-17')
AMC20220121call ,AMC20220121put = BuildDataFrame('AMC','2022-01-21')
AMC20230120call ,AMC20230120put = BuildDataFrame('AMC','2023-01-20')
ArbitrageHunter([AMC20210205call, AMC20210212call, AMC20210219call, AMC20210226call, AMC20210305call, AMC20210312call, AMC20210319call, AMC20210618call, AMC20210917call, AMC20220121call, AMC20230120call])
ArbitrageHunter([AMC20210205put, AMC20210212put, AMC20210219put, AMC20210226put, AMC20210305put, AMC20210312put, AMC20210319put, AMC20210618put, AMC20210917put, AMC20220121put, AMC20230120put])
time.sleep(90)
print("Operation took %s seconds ---" % (time.time() - start_time))
NOK20210205call ,NOK20210205put = BuildDataFrame('NOK','2021-02-05')
NOK20210212call ,NOK20210212put = BuildDataFrame('NOK','2021-02-12')
NOK20210219call ,NOK20210219put = BuildDataFrame('NOK','2021-02-19')
NOK20210226call ,NOK20210226put = BuildDataFrame('NOK','2021-02-26')
NOK20210305call ,NOK20210305put = BuildDataFrame('NOK','2021-03-05')
NOK20210312call ,NOK20210312put = BuildDataFrame('NOK','2021-03-12')
NOK20210319call ,NOK20210319put = BuildDataFrame('NOK','2021-03-19')
NOK20210416call ,NOK20210416put = BuildDataFrame('NOK','2021-04-16')
NOK20210618call ,NOK20210618put = BuildDataFrame('NOK','2021-06-18')
NOK20210716call ,NOK20210716put = BuildDataFrame('NOK','2021-07-16')
NOK20220121call ,NOK20220121put = BuildDataFrame('NOK','2022-01-21')
NOK20230120call ,NOK20230120put = BuildDataFrame('NOK','2023-01-20')
ArbitrageHunter([NOK20210205call, NOK20210212call, NOK20210219call, NOK20210226call, NOK20210305call, NOK20210312call, NOK20210319call, NOK20210416call, NOK20210618call, NOK20210716call, NOK20220121call, NOK20230120call])
ArbitrageHunter([NOK20210205put, NOK20210212put, NOK20210219put, NOK20210226put, NOK20210305put, NOK20210312put, NOK20210319put, NOK20210416put, NOK20210618put, NOK20210716put, NOK20220121put, NOK20230120put])
time.sleep(90)
print("Operation took %s seconds ---" % (time.time() - start_time))
CCL20210205call ,CCL20210205put = BuildDataFrame('CCL','2021-02-05')
CCL20210212call ,CCL20210212put = BuildDataFrame('CCL','2021-02-12')
CCL20210219call ,CCL20210219put = BuildDataFrame('CCL','2021-02-19')
CCL20210226call ,CCL20210226put = BuildDataFrame('CCL','2021-02-26')
CCL20210305call ,CCL20210305put = BuildDataFrame('CCL','2021-03-05')
CCL20210312call ,CCL20210312put = BuildDataFrame('CCL','2021-03-12')
CCL20210319call ,CCL20210319put = BuildDataFrame('CCL','2021-03-19')
CCL20210416call ,CCL20210416put = BuildDataFrame('CCL','2021-04-16')
CCL20210716call ,CCL20210716put = BuildDataFrame('CCL','2021-07-16')
CCL20220121call ,CCL20220121put = BuildDataFrame('CCL','2022-01-21')
CCL20230120call ,CCL20230120put = BuildDataFrame('CCL','2023-01-20')
ArbitrageHunter([CCL20210205call, CCL20210212call, CCL20210219call, CCL20210226call, CCL20210305call, CCL20210312call, CCL20210319call, CCL20210416call, CCL20210716call, CCL20220121call, CCL20230120call])
ArbitrageHunter([CCL20210205put, CCL20210212put, CCL20210219put, CCL20210226put, CCL20210305put, CCL20210312put, CCL20210319put, CCL20210416put, CCL20210716put, CCL20220121put, CCL20230120put])
time.sleep(90)
print("Operation took %s seconds ---" % (time.time() - start_time))
SLV20210212call ,SLV20210212put = BuildDataFrame('SLV','2021-02-12')
SLV20210219call ,SLV20210219put = BuildDataFrame('SLV','2021-02-19')
SLV20210226call ,SLV20210226put = BuildDataFrame('SLV','2021-02-26')
SLV20210305call ,SLV20210305put = BuildDataFrame('SLV','2021-03-05')
SLV20210416call ,SLV20210416put = BuildDataFrame('SLV','2021-04-16')
SLV20210630call ,SLV20210630put = BuildDataFrame('SLV','2021-06-30')
SLV20210716call ,SLV20210716put = BuildDataFrame('SLV','2021-07-16')
SLV20210930call ,SLV20210930put = BuildDataFrame('SLV','2021-09-30')
SLV20211231call ,SLV20211231put = BuildDataFrame('SLV','2021-12-31')
SLV20220121call ,SLV20220121put = BuildDataFrame('SLV','2022-01-21')
SLV20220617call ,SLV20220617put = BuildDataFrame('SLV','2022-06-17')
SLV20230120call ,SLV20230120put = BuildDataFrame('SLV','2023-01-20')
ArbitrageHunter([SLV20210212call, SLV20210219call, SLV20210226call, SLV20210305call,SLV20210416call, SLV20210630call, SLV20210716call, SLV20210930call, SLV20211231call,
SLV20220121call, SLV20220617call, SLV20230120call])
ArbitrageHunter([SLV20210212put, SLV20210219put, SLV20210226put, SLV20210305put,SLV20210416put, SLV20210630put, SLV20210716put, SLV20210930put, SLV20211231put, SLV20220121put, SLV20220617put, SLV20230120put])
time.sleep(90)
print("Operation took %s seconds ---" % (time.time() - start_time))
BB20210205call ,BB20210205put = BuildDataFrame('BB','2021-02-05')
BB20210212call ,BB20210212put = BuildDataFrame('BB','2021-02-12')
BB20210219call ,BB20210219put = BuildDataFrame('BB','2021-02-19')
BB20210226call ,BB20210226put = BuildDataFrame('BB','2021-02-26')
BB20210305call ,BB20210305put = BuildDataFrame('BB','2021-03-05')
BB20210312call ,BB20210312put = BuildDataFrame('BB','2021-03-12')
BB20210319call ,BB20210319put = BuildDataFrame('BB','2021-03-19')
BB20210618call ,BB20210618put = BuildDataFrame('BB','2021-06-18')
BB20210917call ,BB20210917put = BuildDataFrame('BB','2021-09-17')
BB20220121call ,BB20220121put = BuildDataFrame('BB','2022-01-21')
BB20230120call ,BB20230120put = BuildDataFrame('BB','2023-01-20')
ArbitrageHunter([BB20210205call, BB20210212call, BB20210219call, BB20210226call, BB20210305call, BB20210312call, BB20210319call, BB20210618call, BB20210917call, BB20220121call, BB20230120call])
ArbitrageHunter([BB20210205put, BB20210212put, BB20210219put, BB20210226put, BB20210305put, BB20210312put, BB20210319put, BB20210618put, BB20210917put, BB20220121put, BB20230120put])
time.sleep(90)
print("Operation took %s seconds ---" % (time.time() - start_time))
|
StarcoderdataPython
|
218116
|
#!/usr/bin/env python3
import os
import re
from setuptools import setup, find_packages
version = None
def find(haystack, *needles):
regexes = [(index, re.compile(r'^{}\s*=\s*[\'"]([^\'"]*)[\'"]$'.format(needle))) for index, needle in enumerate(needles)]
values = ['' for needle in needles]
for line in haystack:
if len(regexes) == 0:
break
for rindex, (vindex, regex) in enumerate(regexes):
match = regex.match(line)
if match:
values[vindex] = match.groups()[0]
del regexes[rindex]
break
if len(needles) == 1:
return values[0]
else:
return values
with open(os.path.join(os.path.dirname(__file__), 'domainhook', '__init__.py'), 'r') as domainhook:
version = find(domainhook, '__version__')
setup(
name='domainhook',
version=version,
description='a webhook service for domain management automation',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
install_requires=['fooster-web', 'fooster-db', 'httpx'],
packages=find_packages(),
entry_points={'console_scripts': ['domainhook = domainhook.__main__:main']},
)
|
StarcoderdataPython
|
6595998
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import datetime
# noinspection PyUnresolvedReferences,PyProtectedMember
from ._lib import _perl as perl5
from .vendor_perl import PERL_PACKAGE
__version__ = 1.0
class Loader(perl5.Loader):
PACKAGE = "PyPerl5::Loader"
class Proxy(perl5.Proxy):
pass
class CodeRefProxy(perl5.CodeRefProxy):
pass
class TypeMapper(perl5.TypeMapper):
BOOLEAN_PACKAGE = "PyPerl5::Boolean"
object_proxy = Proxy
coderef_proxy = CodeRefProxy
def __init__(self, vm):
super(TypeMapper, self).__init__(vm)
vm.use(self.BOOLEAN_PACKAGE, lazy=True)
self.date_time_package = None
def __load_datetime_package(self):
try:
self.vm.use("DateTime")
self.date_time_package = "DateTime"
return
except ImportError:
pass
try:
self.vm.use("Time::Piece")
self.date_time_package = "Time::Piece"
return
except ImportError:
pass
def map_from_python(self, ctx, obj):
if isinstance(obj, bool):
return self.vm.new(self.BOOLEAN_PACKAGE, method=("true" if obj else "false"))
elif isinstance(obj, datetime.datetime):
if self.date_time_package is None:
self.__load_datetime_package()
if self.date_time_package == "DateTime":
args = {
"year": obj.year, "month": obj.month, "day": obj.day,
"hour": obj.hour, "minute": obj.minute, "second": obj.second,
"nanosecond": obj.microsecond * 1000,
}
if obj.tzname():
args["time_zone"] = obj.tzname()
ret = self.vm.new("DateTime", args)
return ret
elif self.date_time_package == "Time::Piece":
st = obj.timetuple()
args = [st.tm_sec, st.tm_min, st.tm_hour, st.tm_mday, st.tm_mon - 1, st.tm_year - 1900,
st.tm_wday + 1, st.tm_yday - 1, 1 if obj.dst() else 0]
return self.vm.new("Time::Piece", (args,))
else:
obj = obj.isoformat()
return super(TypeMapper, self).map_from_python(ctx, obj)
def map_to_python(self, ctx, ref):
if ref.can("is_bool") and ref.is_bool():
return True if ref.bool() else False
if ref.isa("DateTime"):
return datetime.datetime.fromtimestamp(ref.epoch() + (ref.microsecond() / 10 ** 6))
if ref.isa("Time::Piece"):
return datetime.datetime.fromtimestamp(ref.epoch())
return super(TypeMapper, self).map_to_python(ctx, ref)
class VM(perl5.VM):
def __init__(self, loader_cls=Loader, type_mapper_cls=TypeMapper, include_directory=PERL_PACKAGE):
super(VM, self).__init__(loader_cls, type_mapper_cls, include_directory)
def call(self, subroutine, *args, **kwargs):
if args:
ret = self._call(None, subroutine, args)
elif kwargs:
ret = self._call(None, subroutine, kwargs)
else:
ret = self._call(None, subroutine, None)
return ret
|
StarcoderdataPython
|
1600326
|
<filename>service_api/services/__init__.py<gh_stars>0
from dataclasses import dataclass
from typing import Optional
from urllib.parse import urlencode
import aiohttp
import aioredis
import ujson
from aioredis.commands import Redis
from sanic.log import logger
from service_api.domain.decorators import asyncio_task
@dataclass
class ResponseWrapper:
request_url: str
headers: dict
status: int
data: dict
@property
def ok(self):
return self.status // 200 == 1
class RedisCacheManager:
conn: Redis = None
@classmethod
async def get_conn(cls, redis_url: str) -> None:
if not cls.conn:
cls.conn = await aioredis.create_redis(redis_url)
@classmethod
async def close_conn(cls):
if cls.conn:
cls.conn.close()
await cls.conn.wait_closed()
@classmethod
async def check_cache(cls, url: str, headers: Optional[dict] = None) -> Optional[bytes]:
return await cls.conn.get(cls.__create_key(url, headers))
@classmethod
@asyncio_task
async def cache_data(cls, url: str, response: ResponseWrapper, request_headers: Optional[dict],
ttl: int = 180) -> int:
key = cls.__create_key(url, request_headers)
hash_data = {
'status': response.status,
'headers': response.headers,
'data': response.data
}
json = ujson.dumps(hash_data, ensure_ascii=False)
return await cls.conn.set(key, json, expire=ttl)
@staticmethod
def __create_key(url: str, headers: Optional[dict] = None) -> int:
data: dict = headers or {}
data['url'] = url
return hash(frozenset(data))
class BaseRestClient:
# 0 means that aiohttp will never interrupt connection by itself
REQUEST_TIMEOUT = 0
api_url = ''
__cache_manager = RedisCacheManager
@classmethod
async def get(cls, url: str, headers: Optional[dict] = None, **kwargs) -> ResponseWrapper:
params = urlencode(kwargs, True)
request_url = f'{cls.api_url}/{url}?{params}'
cache = await cls.__cache_manager.check_cache(request_url, headers)
if cache:
cache_data = ujson.loads(cache)
return ResponseWrapper(request_url=request_url, headers=cache_data['headers'], status=cache_data['status'],
data=cache_data['data'])
else:
response = await cls.__make_http_request('GET', url, headers, params=params)
if response.ok:
await cls.__cache_manager.cache_data(request_url, response, request_headers=headers)
return response
@classmethod
async def post(cls, url: str, headers: Optional[dict] = None, data: Optional[dict] = None) -> ResponseWrapper:
return await cls.__make_http_request('POST', url, headers, data=data)
@classmethod
async def put(cls, url: str, headers: Optional[dict] = None, data: Optional[dict] = None) -> ResponseWrapper:
return await cls.__make_http_request('PUT', url, headers, data=data)
@classmethod
async def patch(cls, url: str, headers: Optional[dict] = None, data: Optional[dict] = None) -> ResponseWrapper:
return await cls.__make_http_request('PATCH', url, headers, data=data)
@classmethod
async def delete(cls, url: str, headers: Optional[dict] = None, **kwargs) -> ResponseWrapper:
params = urlencode(kwargs, True)
return await cls.__make_http_request('DELETE', url, headers, params=params)
@classmethod
async def __make_http_request(cls, method: str, url: str, headers: Optional[dict] = None,
params: Optional[str] = None, data: Optional[dict] = None) -> ResponseWrapper:
request_url = f'{cls.api_url}/{url}?{params}'
async with aiohttp.ClientSession() as session:
logger.debug(f'Sending {method} request to {url}, headers: {headers}')
async with session.request(method=method, url=request_url, data=data, headers=headers,
timeout=cls.REQUEST_TIMEOUT) as response:
logger.debug(f'Got response from {request_url}, status {response.status}')
try:
resp_data = await response.json()
# type: ignore
except aiohttp.ContentTypeError:
return ResponseWrapper(request_url=request_url, headers=dict(response.headers),
status=response.status, data=dict(error=response.text))
else:
return ResponseWrapper(request_url=request_url, headers=dict(response.headers),
status=response.status,
data=resp_data)
|
StarcoderdataPython
|
1815154
|
# This script shows how to filter an existing target as a pose
# This is useful for a robot that has been calibrated and we need to get the filtered pose
# Important: It is assumed that the robot will reach the pose with the calculated configuration
from robolink import * # API to communicate with RoboDK
from robodk import * # basic matrix operations
def XYZWPR_2_Pose(xyzwpr):
# Convert X,Y,Z,A,B,C to a pose
return KUKA_2_Pose(xyzwpr)
def Pose_2_XYZWPR(pose):
# Convert a pose to X,Y,Z,A,B,C
return Pose_2_KUKA(pose)
# Start the RoboDK API and retrieve the robot:
RDK = Robolink()
robot = RDK.Item('', ITEM_TYPE_ROBOT)
if not robot.Valid():
raise Exception("Robot not available")
# Define the TCP
pose_tcp = XYZWPR_2_Pose([0, 0, 200, 0, 0, 0])
# Define the reference frame
pose_ref = XYZWPR_2_Pose([400, 0, 0, 0, 0, 0])
# Update the robot TCP and reference frame
robot.setTool(pose_tcp)
robot.setFrame(pose_ref)
# Accuracy can be ON or OFF:
# Very important for SolveFK and SolveIK (Forward/Inverse kinematics)
robot.setAccuracyActive(False)
# Define a nominal target in the joint space:
joints = [0, 0, 90, 0, 90, 0]
# Calculate the nominal robot position for the joint target:
# robot flange with respect to the robot base (4x4 pose)
pose_rob = robot.SolveFK(joints)
# Calculate pose_target: the TCP with respect to the reference frame
# (same value as shown in the Cartesian jog of the robot)
pose_target = invH(pose_ref)*pose_rob*pose_tcp
# The same pose target can be retrieved by calling robot.Pose() when the robot is at the target
# Example:
# robot.setJoints(joints)
# pose_target_2 = robot.Pose()
print('Target not filtered:')
print(joints)
print(Pose_2_XYZWPR(pose_target))
print('')
# Filter target: automatically filters a pose target according to calibrated kinematics
# IMPORTANT: Set the TCP and reference frame first
joints_approx = joints # joints_approx must be within 20 deg
pose_target_filtered, real_joints = robot.FilterTarget(pose_target, joints_approx)
print('Target filtered:')
print(real_joints.tolist())
print(Pose_2_XYZWPR(pose_target_filtered))
##########################################
# The following procedure how the filterim mechanism works behind the scenes
# This procedure is equivalent to FilterTarget() and does not need to be used
def FilterTarget(target, ref, tcp):
"""Target: pose of the TCP (tcp) with respect to the reference frame (ref)
jnts_ref: preferred joints for inverse kinematics calculation"""
# First: we need to calculate the accurate inverse kinematics to calculate the accurate joint data for the desired target
# Note: SolveIK and SolveFK take the robot into account (from the robot base frame to the robot flange)
robot.setAccuracyActive(True)
pose_rob = ref*target*invH(tcp)
robot_joints = robot.SolveIK(pose_rob)
# Second: Calculate the nominal forward kinematics as this is the calculation that the robot performs
robot.setAccuracyActive(False)
pose_rob_fixed = robot.SolveFK(robot_joints)
target_filtered = invH(ref)*pose_rob_fixed*tcp
return target_filtered
# We should get the same result by running the custom made filter:
#pose_target_filtered_2 = FilterTarget(pose_target, pose_ref, pose_tcp)
#print(Pose_2_XYZWPR(pose_target_filtered_2))
|
StarcoderdataPython
|
12857805
|
<reponame>Novartis/yap
#!/usr/bin/env python
"""
Copyright 2014 Novartis Institutes for Biomedical Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
class yap_conflict_check:
"""
Provides methods to perform file-file, file-sample, file-group and
sample-group comparisons and find conflicts.
"""
def __init__(self, input_files):
self.input_files = map(self.translate_path, input_files)
self.filename_dict = \
self.generate_filename_dict(self.input_files)
def translate_path(self, path):
"""
Given a path,
Returns a path after expanding environment and user variables and
relative paths to absolute path
"""
path = os.path.expandvars(path) # expand environment variables
path = os.path.expanduser(path) # expand user's home directory
# don't convert to absolute if just filename
if len(os.path.dirname(path)) == 0 and (path not in ['.', ".."]):
return path
path = os.path.abspath(path) # convert relative path to absolute
return path # return output
def translate_paired_end_paths(self, paired_end_files):
'''
Given a list of paired end files
Returns a new list of paired end files with each file translated
using translate path function
'''
if len(paired_end_files) <= 0:
return [] # return empty o/p
paired_end_files_out = [] # output variable
for paired_list in paired_end_files: # translate each paths
paired_list_out = map(self.translate_path, paired_list)
paired_end_files_out.append(paired_list) # append to o/p
return paired_end_files_out # return output
def get_paths(self, name):
'''
Given a name,
Returns the list of paths matching to the key similar to the
name
'''
if len(name) <= 0:
return None # return null for empty input
# return if an exact match is found
if name in self.filename_dict:
return self.filename_dict[name]
# return all values for a partial match
matches = []
for key in self.filename_dict:
if key.find(name) == 0:
new_paths = self.find_new_items(matches,
self.filename_dict[key])
# extend only if a unique match is found
if len(new_paths) > 0:
matches.extend(new_paths)
if len(matches) == 0:
return None # return null if no matches
else:
return matches # return output
def find_new_items(self, current_list, new_list):
'''
Given two lists,
Returns items which are not available in current lists,
Return empty list if no such items are found
'''
if len(current_list) == 0:
return new_list # all paths are new
# select an items not in current list and return list
return filter((lambda item: item not in current_list),
new_list)
def validate_names_and_find_duplicates(self, names):
'''
Given list of filenames,
Calls validate_names_and_find_duplicates_with_finder with
get_paths as finder and returns the result
'''
return self.validate_names_and_find_duplicates_with_finder(
names,
self.get_paths)
def validate_names_and_find_duplicates_with_finder(self, filenames,
finder):
"""
Input:
--filenames: a list of filenames occured in contaminant file
Check if all filenames exist in input files name and
there is no filename duplicate in filenames.
Return values:
--match_list:
--error_list: all filenames which not exist in input files name
--duplicate_dict: [key:value]
-key: filename which duplicate happens
-value: all path this filename occurs
"""
match_list = []
error_list = []
duplicate_dict = {}
# translate all filenames paths to complete paths
filenames = map(self.strip_space_tab_newline, filenames)
filenames = map(self.translate_path, filenames)
for fn in filenames:
if fn in self.input_files:
# filename exist in self.input_files
match_list.append(fn)
else:
# treat fn as basename
paths = finder(fn)
if paths is not None:
# basename exists
if len(paths) > 1:
# duplicate happens
duplicate_dict[fn] = paths
else:
# no duplicate
match_list.extend(paths)
else:
# basename not exists
error_list.append(fn)
return match_list, error_list, duplicate_dict
def generate_filename_dict(self, paths):
"""
Given a list of complete filepaths,
Returns a dictionary, with keys as filenames and values as list of
all paths that contain the corresponding key
Invariant: Paths contain filenames complete with extension.
"""
output = {} # output variable
if len(paths) <= 0:
return output # return empty output for empty input
for path in paths:
output[path] = [path] # add each path as key also.
basename = os.path.basename(path) # get filename from path
if len(basename) <= 0:
continue # skip if no filename in path
# get name without extension
basename_no_ext = os.path.splitext(basename)[0]
# create a new entry if it does not exist, append otherwise
if basename in output:
output[basename].append(path)
else:
output[basename] = [path]
# include a name with filename without extension also
if len(basename_no_ext) <= 0:
continue # skip if name is exmpty
if basename_no_ext != basename: # add an entry for just filename
if basename_no_ext in output:
output[basename_no_ext].append(path)
else:
output[basename_no_ext] = [path]
return output # return dict
def find_duplicates_in_list(self, input):
"""
Given a list,
Returns a dictionary of all duplicates in the list,
Return empty dictionary if no duplicate entries are found.
"""
output = {} # output variable
if len(input) <= 0:
return output # return empty output for empty input
for item in input:
if item not in output: # check only if item not seen earlier
item_count = input.count(item) # count items
# add to output if item occurs more than once in list
if item_count > 1:
output[item] = item_count
return output
def list_to_sentence(self, list):
"""
Translate the given list to a string.
"""
sentence = ""
for i in range(0, len(list)):
if i == len(list) - 1:
sentence += "'" + list[i] + "'"
else:
sentence += "'" + list[i] + "' and "
return sentence
def strip_space_tab_newline(self, input):
'''
Given a string,
Returns a string after removing starting and trailing spaces,
tabs and new line character
'''
if len(input) <= 0:
return '' # empty o/p for empty i/p
input = input.strip()
input = input.strip('\n')
input = input.strip('\t')
return input
|
StarcoderdataPython
|
9665455
|
import tkinter as tk
from tkinter import ttk
from tkinter import messagebox
import gui.widgets as tkw
from PIL import ImageTk, Image
class FrameTagPictures(tk.Frame):
def __init__(self,
parent,
controller=None,
prop_frame={},
**kwargs):
self.controller = controller
self.prop_frame = {}
self.prop_frame.update(prop_frame)
self.grid_frame = {'row': 0,
'column': 0,
'sticky': 'nsew',
'padx': 5,
'pady': 5}
self.grid_frame.update(kwargs)
tk.Frame.__init__(self, parent, **self.prop_frame)
self.grid(**self.grid_frame)
self.image_from = None
self.image_to = None
self._set_frame()
self.bind("<Visibility>", self.update_frame)
self.update_frame()
def _set_frame(self):
layout = dict(padx=5,
pady=5,
sticky='nsew')
self.columns = ['namn']
data = self.controller.get_tree_dict()
self.tree_widget = tkw.FileTreeviewWidget(self, columns=self.columns, data=data,
callback=self._on_select_files, rowspan=2, **layout)
self.frame_from = tk.LabelFrame(self, text='Fran')
self.frame_from.grid(row=0, column=1, **layout)
self.frame_to = tk.LabelFrame(self, text='Till')
self.frame_to.grid(row=1, column=1, **layout)
self.frame_tag = tk.LabelFrame(self, text='Tagga')
self.frame_tag.grid(row=0, column=2, rowspan=2, **layout)
tkw.grid_configure(self, nr_rows=2, nr_columns=3)
self._set_image_frames()
self._set_tag_frame()
def _set_image_frames(self):
self.image_widget_from = tkw.ImageWidget(self.frame_from, image_size=0.1)
tkw.grid_configure(self.frame_from)
self.image_widget_to = tkw.ImageWidget(self.frame_to, image_size=0.1)
tkw.grid_configure(self.frame_to)
def _set_tag_frame(self):
layout = dict(padx=5,
pady=5,
sticky='nsew')
self.tag_widget = tkw.TagWidget(self.frame_tag, show_untagged=False, columnspan=2, **layout)
self.button_set_tags = tk.Button(self.frame_tag, text='Satt taggar', command=self._set_tags)
self.button_set_tags.grid(row=1, column=0, **layout)
self.button_remove_tags = tk.Button(self.frame_tag, text='Ta bort taggar', command=self._remove_tags)
self.button_remove_tags.grid(row=1, column=1, **layout)
tkw.grid_configure(self.frame_tag, nr_columns=2, nr_rows=2)
def update_frame(self, event=None):
tag_types_names = self.controller.get_tag_names_in_tag_types()
self.tag_widget.update_frame(tag_types_names)
def _on_select_files(self, selected_file_names):
if not selected_file_names:
return
if selected_file_names[0] != self.image_from:
self.image_from = selected_file_names[0]
image_path = self.controller.get_file_path(self.image_from)
self.image_widget_from.show_image(image_path)
if selected_file_names[-1] != self.image_to:
self.image_to = selected_file_names[-1]
image_path = self.controller.get_file_path(self.image_to)
self.image_widget_to.show_image(image_path)
def _set_tags(self):
tag_list = self.tag_widget.get_checked()
file_name_list = self.tree_widget.get_selected()
for file_name in file_name_list:
for tag in tag_list:
self.controller.add_tag_to_file(tag_name=tag, file_name=file_name)
self.tag_widget.uncheck_all()
def _remove_tags(self):
tag_list = self.tag_widget.get_checked()
file_name_list = self.tree_widget.get_selected()
for file_name in file_name_list:
for tag in tag_list:
self.controller.remove_tag_from_file(tag_name=tag, file_name=file_name)
self.tag_widget.uncheck_all()
|
StarcoderdataPython
|
4889716
|
<gh_stars>0
import os
import logging
from telegram import ParseMode
from telegram.ext import CommandHandler
from trello import TrelloClient
from reventlov.bot_plugins import get_list_from_environment
from reventlov.bot_plugin import BotPlugin
version = '0.0.1'
logger = logging.getLogger(__name__)
logger.info(f'Trello module v{version} loaded')
class TrelloPlugin(BotPlugin):
'''
I can manage Trello boards for you
'''
def __init__(self, dispatcher):
self.client = TrelloClient(
api_key=os.getenv('TRELLO_API_KEY'),
api_secret=os.getenv('TRELLO_API_SECRET'),
token=os.getenv('TRELLO_API_TOKEN'),
)
self.admins = get_list_from_environment('TRELLO_ADMINS')
self.load_orgs()
self.__boards = []
self.handlers = [
CommandHandler(
'list',
self.list_objects,
pass_args=True,
)
]
self.add_handlers(dispatcher)
self.version = '0.0.1'
logger.info(f'Trello plugin v{version} enabled')
@property
def organization(self):
if len(self.__orgs) == 1:
return self.__orgs[0]
else:
return os.getenv('TRELLO_DEFAULT_ORGANIZATION')
def load_orgs(self):
logger.info('Getting organizations')
self.__orgs = self.client.list_organizations()
def load_boards(self):
logger.info('Getting boards')
self.__boards = self.organization.get_boards('open')
@property
def orgs(self):
if len(self.__orgs) == 0:
self.load_orgs()
return self.__orgs
@property
def boards(self):
if len(self.__boards) == 0:
self.load_boards()
return self.__boards
@property
def org_names(self):
return [org.name for org in self.orgs]
@property
def board_names(self):
return [board.name for board in self.boards]
def get_board(self, board_name):
board_found = None
for board in self.boards:
if board.name == board_name:
board_found = board
break
return board_found
def list_orgs(self):
msg = '\n'.join([
f'- *{org_name}*' if org_name == self.organization.name
else f'- {org_name}'
for org_name in self.org_names
])
return msg, ParseMode.MARKDOWN
def list_boards(self):
msg = '\n'.join([
f'- {board_name}'
for board_name in self.board_names
])
return msg, ParseMode.MARKDOWN
def list_column_cards(self, column):
msg = '\n'.join([
f' + {card.name}'
for card in column.list_cards()
])
return msg
def list_board_columns(self, board_name):
msg = f'No such board `{board_name}`'
board = self.get_board(board_name)
if board is not None:
msg = '\n'.join([
f'- {column.name} ({column.cardsCnt()} cards) '
f'\n{self.list_column_cards(column)}'.strip()
for column in board.open_lists()
])
return msg, ParseMode.HTML
def list_objects(self, bot, update, args):
'''
List Trello objects visible to me.
Object type depends on first argument:
- `orgs`: List organizations.
- `boards`: List boards.
- `board_name`: List cards in `board_name`.
By default it lists organizations.
'''
msg = ''
parse_mode = None
if update.message.from_user.username in self.admins:
if len(args) < 1 or args[0] == 'orgs':
msg, parse_mode = self.list_orgs()
elif args[0] == 'boards':
msg, parse_mode = self.list_boards()
elif args[0] in self.board_names:
msg, parse_mode = self.list_board_columns(args[0])
else:
msg = f'No such board `{args[0]}`'
parse_mode = ParseMode.MARKDOWN
msg += '\n\nYou can specify either one of: `orgs`, `boards`, ' \
'or use a `board_name` to list its cards'
else:
msg = 'You must be admin to list Trello objects'
bot.send_message(
chat_id=update.message.chat_id,
text=msg,
parse_mode=parse_mode,
)
|
StarcoderdataPython
|
6504553
|
#!/usr/bin/python
#
# Copyright 2018-2022 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from marshmallow import fields
import polyaxon_sdk
from polyaxon.polyflow.params import ParamSchema
from polyaxon.schemas.base import BaseCamelSchema, BaseConfig
from polyaxon.schemas.fields.ref_or_obj import RefOrObject
class TunerSchema(BaseCamelSchema):
hub_ref = fields.Str(required=True)
queue = RefOrObject(fields.Str(allow_none=True))
presets = RefOrObject(fields.List(fields.Str(allow_none=True)))
params = fields.Dict(
keys=fields.Str(), values=fields.Nested(ParamSchema), allow_none=True
)
@staticmethod
def schema_config():
return V1Tuner
class V1Tuner(BaseConfig, polyaxon_sdk.V1Tuner):
"""You can configure Polyaxon to use a custom tuner to customize the built-in optimizers.
The tuner allows you to customize the behavior of the operations that
generate new suggestions based on the previous observations.
You can provide a queue or provide presets to override
the default configuration of the component.
You can resolve any context information from the main operation inside a tuner,
like params, globals, ...
To override the complete behavior users can provide their own component.
Args:
hub_ref: str
queue: List[str], optional
presets: List[str], optional
params: Dict[str, [V1Param](/docs/core/specification/params/)], optional
## YAML usage
```yaml
>>> tuner:
>>> hubRef: acme/custom-tuner
```
## Python usage
```python
>>> from polyaxon.lifecycle import V1Statuses
>>> from polyaxon.polyflow import V1Tuner
>>> tuner = V1Tuner(
>>> hub_ref="acme/custom-tuner",
>>> queue="agent-name/queue-name",
>>> persets=["preset1", "preset2"],
>>> )
```
## Fields
### hubRef
For several algorithms, Polyaxon provides built-in tuners. these tuners
are hosted on the public component hub. Users can customize or
build different service to generate new suggestions.
To provide a custom component hosted on Polyaxon Component Hub, you can use `hubRef`
```yaml
>>> tuner:
>>> hubRef: acme/optimizer-logic:v1
...
```
### queue
The [queue](/docs/core/scheduling-strategies/queues/) to use.
If not provided, the default queue will be used.
```yaml
>>> tuner:
>>> queue: agent-name/queue-name
```
If the agent name is not specified, Polyaxon will resolve the name of the queue
based on the default agent.
```yaml
>>> hook:
>>> queue: queue-name
```
### presets
The [presets](/docs/management/organizations/presets/) to use for the tuner operation,
if provided, it will override the component's presets otherwise
the presets of the component will be used if available.
```yaml
>>> tuner:
>>> presets: [test]
```
### params
The [params](/docs/core/specification/params/) to pass if the handler requires extra params,
they will be validated against the inputs/outputs.
If a parameter is passed and the component does not define a corresponding inputs/outputs,
a validation error will be raised unless the param has the `contextOnly` flag enabled.
```yaml
>>> tuner:
>>> params:
>>> param1: {value: 1.1}
>>> param2: {value: test}
>>> ...
```
"""
IDENTIFIER = "tuner"
SCHEMA = TunerSchema
REDUCED_ATTRIBUTES = [
"hubRef",
"params",
"queue",
"presets",
]
|
StarcoderdataPython
|
5003942
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""The parser mediator object."""
import datetime
import logging
import os
from dfvfs.lib import definitions as dfvfs_definitions
# TODO: disabled as long nothing is listening on the parse error queue.
# from plaso.lib import event
from plaso.lib import py2to3
from plaso.lib import timelib
class ParserMediator(object):
"""Class that implements the parser mediator."""
def __init__(
self, event_queue_producer, parse_error_queue_producer, knowledge_base):
"""Initializes a parser mediator object.
Args:
event_queue_producer: the event object queue producer (instance of
ItemQueueProducer).
parse_error_queue_producer: the parse error queue producer (instance of
ItemQueueProducer).
knowledge_base: A knowledge base object (instance of KnowledgeBase),
which contains information from the source data needed
for parsing.
"""
super(ParserMediator, self).__init__()
self._abort = False
self._event_queue_producer = event_queue_producer
self._extra_event_attributes = {}
self._file_entry = None
self._filter_object = None
self._knowledge_base = knowledge_base
self._mount_path = None
self._parse_error_queue_producer = parse_error_queue_producer
self._parser_chain_components = []
self._text_prepend = None
self.number_of_events = 0
self.number_of_parse_errors = 0
@property
def abort(self):
"""Read-only value to indicate the parsing should be aborted."""
return self._abort
@property
def codepage(self):
"""The codepage."""
return self._knowledge_base.codepage
@property
def hostname(self):
"""The hostname."""
return self._knowledge_base.hostname
@property
def knowledge_base(self):
"""The knowledge base."""
return self._knowledge_base
@property
def platform(self):
"""The platform."""
return self._knowledge_base.platform
@property
def timezone(self):
"""The timezone object."""
return self._knowledge_base.timezone
@property
def year(self):
"""The year."""
return self._knowledge_base.year
def _GetInode(self, inode_value):
"""Retrieves the inode from the inode value.
Note that the inode value in TSK can be a string, e.g. '27-128-1'.
Args:
inode_value: a string or an integer containing the inode.
Returns:
An integer containing the inode or -1 on error if the inode value
cannot be converted to an integer.
"""
if isinstance(inode_value, py2to3.INTEGER_TYPES):
return inode_value
if isinstance(inode_value, float):
return int(inode_value)
if not isinstance(inode_value, basestring):
return -1
if b'-' in inode_value:
inode_value, _, _ = inode_value.partition(b'-')
try:
return int(inode_value, 10)
except ValueError:
return -1
def _GetRelativePath(self, path_spec):
"""Retrieves the relative path.
Args:
path_spec: a path specification (instance of dfvfs.PathSpec).
Returns:
A string containing the relative path or None.
"""
if not path_spec:
return
# TODO: Solve this differently, quite possibly inside dfVFS using mount
# path spec.
location = getattr(path_spec, u'location', None)
if not location and path_spec.HasParent():
location = getattr(path_spec.parent, u'location', None)
if not location:
return
data_stream = getattr(path_spec, u'data_stream', None)
if data_stream:
location = u'{0:s}:{1:s}'.format(location, data_stream)
if path_spec.type_indicator != dfvfs_definitions.TYPE_INDICATOR_OS:
return location
# If we are parsing a mount point we don't want to include the full
# path to file's location here, we are only interested in the path
# relative to the mount point.
if self._mount_path:
_, _, location = location.partition(self._mount_path)
return location
def _GetYearFromFileEntry(self):
"""Retrieves the year from the file entry date and time values.
This function uses the creation time if available otherwise the change
time (metadata last modification time) is used.
Returns:
An integer containing the year of the file entry or None.
"""
file_entry = self.GetFileEntry()
stat_object = file_entry.GetStat()
posix_time = getattr(stat_object, u'crtime', None)
if posix_time is None:
posix_time = getattr(stat_object, u'ctime', None)
if posix_time is None:
logging.warning(
u'Unable to determine creation year from file stat information.')
return
try:
datetime_object = datetime.datetime.fromtimestamp(
posix_time, self._knowledge_base.timezone)
except ValueError as exception:
logging.error((
u'Unable to determine creation year from file stat information '
u'with error: {0:s}').format(exception))
return
return datetime_object.year
def AddEventAttribute(self, attribute_name, attribute_value):
"""Add an attribute that will be set on all events produced.
Setting attributes using this method will cause events produced via this
mediator to have an attribute with the provided name set with the
provided value.
Args:
attribute_name: The name of the attribute to set.
attribute_value: The value of the attribute to set.
Raises:
KeyError: If an attribute with the given name is already set.
"""
if hasattr(self._extra_event_attributes, attribute_name):
raise KeyError(u'Value already set for attribute {0:s}'.format(
attribute_name))
self._extra_event_attributes[attribute_name] = attribute_value
def AppendToParserChain(self, plugin_or_parser):
"""Add a parser or plugin to the chain to the chain."""
self._parser_chain_components.append(plugin_or_parser.NAME)
def ClearEventAttributes(self):
"""Clear out attributes that should be added to all events."""
self._extra_event_attributes = {}
def ClearParserChain(self):
"""Clears the parser chain."""
self._parser_chain_components = []
def GetDisplayName(self, file_entry=None):
"""Retrieves the display name for a file entry.
Args:
file_entry: optional file entry object (instance of dfvfs.FileEntry).
If none is provided, the display name of self._file_entry
will be returned.
Returns:
A human readable string that describes the path to the file entry.
Raises:
ValueError: if the file entry is missing.
"""
if file_entry is None:
file_entry = self._file_entry
if file_entry is None:
raise ValueError(u'Missing file entry')
path_spec = getattr(file_entry, u'path_spec', None)
relative_path = self._GetRelativePath(path_spec)
if not relative_path:
relative_path = file_entry.name
if not relative_path:
return file_entry.path_spec.type_indicator
if self._text_prepend:
relative_path = u'{0:s}{1:s}'.format(self._text_prepend, relative_path)
parent_path_spec = path_spec.parent
if parent_path_spec and path_spec.type_indicator in [
dfvfs_definitions.TYPE_INDICATOR_BZIP2,
dfvfs_definitions.TYPE_INDICATOR_GZIP]:
parent_path_spec = parent_path_spec.parent
if parent_path_spec and parent_path_spec.type_indicator in [
dfvfs_definitions.TYPE_INDICATOR_VSHADOW]:
store_index = getattr(path_spec.parent, u'store_index', None)
if store_index is not None:
return u'VSS{0:d}:{1:s}:{2:s}'.format(
store_index + 1, file_entry.path_spec.type_indicator, relative_path)
return u'{0:s}:{1:s}'.format(
file_entry.path_spec.type_indicator, relative_path)
def GetEstimatedYear(self):
"""Retrieves an estimate of the year.
This function first looks to see if the knowledge base defines a year, if
not it tries to determine the year based on the file entry metadata, if
that fails the current year is used.
Returns:
An integer containing the year of the file entry or None.
"""
# TODO: improve this method to get a more reliable estimate.
# Preserve the year-less date and sort this out in the psort phase.
year = self._knowledge_base.year
if not year:
# TODO: Find a decent way to actually calculate the correct year
# instead of relying on stats object.
year = self._GetYearFromFileEntry()
if not year:
year = timelib.GetCurrentYear()
return year
def GetFileEntry(self):
"""Retrieves the active file entry.
Returns:
A file entry (instance of dfvfs.FileEntry).
"""
return self._file_entry
def GetFilename(self):
"""Retrieves the name of the active file entry.
Returns:
A string containing the name of the active file entry or None.
"""
if not self._file_entry:
return
data_stream = getattr(self._file_entry.path_spec, u'data_stream', None)
if data_stream:
return u'{0:s}:{1:s}'.format(self._file_entry.name, data_stream)
return self._file_entry.name
def GetParserChain(self):
"""The parser chain up to this point."""
return u'/'.join(self._parser_chain_components)
def MatchesFilter(self, event_object):
"""Checks if the event object matches the filter.
Args:
event_object: the event object (instance of EventObject).
Returns:
A boolean value indicating if the event object matches the filter.
"""
return self._filter_object and self._filter_object.Matches(event_object)
def PopFromParserChain(self):
"""Remove the last added parser or plugin from the chain."""
self._parser_chain_components.pop()
def ProcessEvent(
self, event_object, parser_chain=None, file_entry=None, query=None):
"""Processes an event before it is emitted to the event queue.
Args:
event_object: the event object (instance of EventObject).
parser_chain: Optional string containing the parsing chain up to this
point.
file_entry: Optional file entry object (instance of dfvfs.FileEntry).
The default is None, which will default to the current
file entry set in the mediator.
query: Optional query string.
"""
# TODO: rename this to event_object.parser_chain or equivalent.
if not getattr(event_object, u'parser', None) and parser_chain:
event_object.parser = parser_chain
# TODO: deprecate text_prepend in favor of an event tag.
if not getattr(event_object, u'text_prepend', None) and self._text_prepend:
event_object.text_prepend = self._text_prepend
if file_entry is None:
file_entry = self._file_entry
display_name = None
if file_entry:
event_object.pathspec = file_entry.path_spec
if not getattr(event_object, u'filename', None):
path_spec = getattr(file_entry, u'path_spec', None)
event_object.filename = self._GetRelativePath(path_spec)
if not display_name:
# TODO: dfVFS refactor: move display name to output since the path
# specification contains the full information.
display_name = self.GetDisplayName(file_entry)
stat_object = file_entry.GetStat()
inode_value = getattr(stat_object, u'ino', None)
if not hasattr(event_object, u'inode') and inode_value:
event_object.inode = self._GetInode(inode_value)
if not getattr(event_object, u'display_name', None) and display_name:
event_object.display_name = display_name
if not getattr(event_object, u'hostname', None) and self.hostname:
event_object.hostname = self.hostname
if not getattr(event_object, u'username', None):
user_sid = getattr(event_object, u'user_sid', None)
username = self._knowledge_base.GetUsernameByIdentifier(user_sid)
if username:
event_object.username = username
if not getattr(event_object, u'query', None) and query:
event_object.query = query
for attribute, value in self._extra_event_attributes.iteritems():
if hasattr(event_object, attribute):
raise KeyError(u'Event already has a value for {0:s}'.format(attribute))
setattr(event_object, attribute, value)
def ProduceEvent(self, event_object, query=None):
"""Produces an event onto the queue.
Args:
event_object: the event object (instance of EventObject).
query: Optional query string.
"""
self.ProcessEvent(
event_object, parser_chain=self.GetParserChain(),
file_entry=self._file_entry, query=query)
if self.MatchesFilter(event_object):
return
self._event_queue_producer.ProduceItem(event_object)
self.number_of_events += 1
def ProduceEvents(self, event_objects, query=None):
"""Produces events onto the queue.
Args:
event_objects: a list or generator of event objects (instances of
EventObject).
query: Optional query string.
"""
for event_object in event_objects:
self.ProduceEvent(event_object, query=query)
def ProduceParseError(self, message):
"""Produces a parse error.
Args:
message: The message of the error.
"""
self.number_of_parse_errors += 1
# TODO: Remove call to logging when parser error queue is fully functional.
logging.error(
u'[{0:s}] unable to parse file: {1:s} with error: {2:s}'.format(
self.GetParserChain(), self.GetDisplayName(), message))
# TODO: disabled as long nothing is listening on the parse error queue.
# if self._parse_error_queue_producer:
# path_spec = self._file_entry.path_spec
# parser_chain = self.GetParserChain()
# parse_error = event.ParseError(
# parser_chain, message, path_spec=path_spec)
# self._parse_error_queue_producer.ProduceItem(parse_error)
# self.number_of_parse_errors += 1
def ResetCounters(self):
"""Resets the counters."""
self.number_of_events = 0
self.number_of_parse_errors = 0
def ResetFileEntry(self):
"""Resets the active file entry."""
self._file_entry = None
def SetFileEntry(self, file_entry):
"""Sets the active file entry.
Args:
file_entry: the file entry (instance of dfvfs.FileEntry).
"""
self._file_entry = file_entry
def SetFilterObject(self, filter_object):
"""Sets the filter object.
Args:
filter_object: the filter object (instance of objectfilter.Filter).
"""
self._filter_object = filter_object
def SetMountPath(self, mount_path):
"""Sets the mount path.
Args:
mount_path: string containing the mount path.
"""
# Remove a trailing path separator from the mount path so the relative
# paths will start with a path separator.
if mount_path and mount_path.endswith(os.sep):
mount_path = mount_path[:-1]
self._mount_path = mount_path
def SetTextPrepend(self, text_prepend):
"""Sets the text prepend.
Args:
text_prepend: string that contains the text to prepend to every event.
"""
self._text_prepend = text_prepend
def SignalAbort(self):
"""Signals the parsers to abort."""
self._abort = True
|
StarcoderdataPython
|
11220841
|
import numpy as np
from sklearn.utils import check_random_state
from mne.io import BaseRaw
from joblib import Memory
from .core_smica import SMICA
from .mne import ICA, transfer_to_mne
from .utils import fourier_sampling
location = './cachedir'
memory = Memory(location, verbose=0)
def _transform_set(M, D):
'''Moves the matrices D using the matrix M
Parameters
----------
M : array-like, shape (N, N)
Movement
D : array-like, shape (K, N, N)
Array of current estimated matrices. K is the number of matrices
Returns
-------
op : array-like, shape (K, N, N)
Array of the moved matrices. op[i] = M.D[i].M.T
'''
K, _, _ = D.shape
N, _ = M.shape
op = np.zeros((K, N, N))
for i, d in enumerate(D):
op[i] = M.dot(d.dot(M.T))
return op
def _move(epsilon, D):
'''Moves the matrices D by a perturbation epsilon
Parameters
----------
epsilon : array-like, shape (N, N)
Perturbation
D : array-like, shape (K, N, N)
Array of current estimated matrices. K is the number of matrices
Returns
-------
M : array-like, shape (N, N)
Displacement matrix
op : array-like, shape (K, N, N)
Array of the moved matrices. op[i] = M.D[i].M.T
'''
_, N, _ = D.shape
M = np.eye(N) + epsilon
return M, _transform_set(M, D)
def _loss(B_list):
op = 0.
for B in B_list:
Br = B.ravel()
Bd = np.diag(B)
op += Br.dot(Br) - Bd.dot(Bd)
return op
def _joint_diag(C, max_iter, tol=1e-7, theta=0.5, max_ls_tries=20,
verbose=False):
if verbose:
print(C.shape)
K, N, _ = C.shape
D = C.copy()
W = np.eye(N)
old_loss = _loss(D)
step = 1.
for n in range(max_iter):
# Isolate the diagonals
diagonals = np.diagonal(D, axis1=1, axis2=2)
# Compute the z_ij
z = np.dot(diagonals.T, diagonals)
# Compute the y_ij
y = np.sum(D * diagonals[:, None, :], axis=0)
# Compute the new W
z_diag = np.diagonal(z)
det = (z_diag[:, None] * z_diag[None, :] - z ** 2) + np.eye(N)
eps = (z * y.T - z_diag[:, None] * y) / det
# np.fill_diagonal(W, 0.)
# Stopping criterion
norm = np.sqrt(np.mean(eps ** 2))
if verbose:
print(n, norm)
if norm < tol:
break
# Scale
if norm > theta:
eps *= theta / norm
# Move
for ls in range(max_ls_tries):
M, D_new = _move(step * eps, D)
new_loss = _loss(D_new)
if new_loss < old_loss:
step = min(1, 2 * step)
break
else:
step = max(step / 2, 1e-5)
old_loss = new_loss
D = D_new
W = M @ W
return W
@memory.cache(ignore=['verbose'])
def sobi(X, lags, n_components=None,
tol=1e-7, max_iter=1000, verbose=False):
"""
Use sobi for source estimation
X : data matrix
p : number of time lags to use
"""
n_sensors, n_samples = X.shape
p = len(lags)
if n_components is None:
n_components = n_sensors
u, d, _ = np.linalg.svd(X, full_matrices=False)
del _
whitener = (u / d).T[:n_components]
del u, d
Y = whitener.dot(X)
C = np.zeros((p, n_components, n_components))
for i, lag in enumerate(lags):
t = n_samples - lag
C[i] = np.dot(Y[:, -t:], Y[:, lag:].T) / t
W = _joint_diag(C, max_iter=max_iter, tol=tol, verbose=verbose)
return W.dot(whitener)
class SOBI(SMICA):
def __init__(self, p, n_components,
freqs, sfreq, avg_noise=False, rng=None):
'''
n_components : number of sources
freqs : the frequency intervals
sfreq : sampling frequency
'''
self.p = p
self.lags = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 12, 14, 16, 18, 20,
25, 30, 35, 40, 45, 50,
55, 60, 65, 70, 80, 90, 95, 100,
120, 140, 160, 180, 200, 220,
240, 260, 280, 300]) * sfreq / 1000
self.lags = self.lags.astype(int)
self.n_components = n_components
self.freqs = freqs
self.sfreq = sfreq
self.avg_noise = avg_noise
self.f_scale = 0.5 * (freqs[1:] + freqs[:-1])
self.rng = check_random_state(rng)
self.filtering_method = 'pinv'
def fit(self, X, y=None, **kwargs):
'''
Fits sobi to data X (p x n matrix sampled at fs)
'''
self.X = X
C, ft, freq_idx = fourier_sampling(X, self.sfreq, self.freqs)
n_mat, n_sensors, _ = C.shape
self.C_ = C
self.ft_ = ft
self.freq_idx_ = freq_idx
W = sobi(X, self.lags, self.n_components, **kwargs)
self.A_ = np.linalg.pinv(W)
self.powers_ = np.zeros((n_mat, self.n_components))
for i in range(n_mat):
self.powers_[i] = np.diag(W.dot(C[i]).dot(W.T))
scale = np.mean(self.powers_, axis=0, keepdims=True)
self.A_ = self.A_ * np.sqrt(scale)
self.powers_ = self.powers_ / scale
self.sigmas_ = np.zeros((C.shape[0], X.shape[0]))
return self
def compute_sources(self, X=None, method='pinv'):
if method == 'wiener':
raise ValueError('Only method=pinv is implemented for SOBI')
return super().compute_sources(X=X, method=method)
class SOBI_mne(ICA):
def __init__(self, p, n_components, freqs, rng=None):
self.p = p
self.n_components = n_components
self.freqs = freqs
self.f_scale = 0.5 * (freqs[1:] + freqs[:-1])
self.rng = check_random_state(rng)
def fit(self, inst, picks=None, avg_noise=False, **kwargs):
'''
Fits smica to inst (either raw or epochs)
'''
self.inst = inst
self.info = inst.info
self.sfreq = inst.info['sfreq']
self.picks = picks
self.avg_noise = avg_noise
if isinstance(inst, BaseRaw):
self.inst_type = 'raw'
X = inst.get_data(picks=picks)
else:
self.inst_type = 'epoch'
X = inst.get_data(picks=picks)
n_epochs, _, _ = X.shape
X = np.hstack(X)
self.X = X
X /= np.std(X)
smica = SOBI(self.p, self.n_components, self.freqs, self.sfreq,
self.avg_noise)
smica.fit(X, **kwargs)
self.powers = smica.powers_
self.A = smica.A_
self.sigmas = smica.sigmas_
self.smica = smica
self.ica_mne = transfer_to_mne(self.A, self.inst, self.picks)
return self
def compute_sources(self, X=None, method='pinv'):
return self.smica.compute_sources(X, method=method)
|
StarcoderdataPython
|
11224662
|
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from utils import conv2d_flipkernel, adjecent_matrix, adjecent_sparse
def dot(x, y, sparse=False):
if sparse:
return tf.sparse_tensor_dense_matmul(x, y)
else:
return tf.matmul(x, y)
def kernel_net_coord(coord, weight, train, config):
x = tf.reshape(coord, [-1, 8])
x = tf.concat([x, tf.reshape(weight, [-1,1])], axis=1)
x = slim.fully_connected(x, 32, activation_fn=tf.nn.relu, weights_initializer=tf.truncated_normal_initializer(stddev=0.01), scope="fc_1")
x = slim.fully_connected(x, 64, activation_fn=tf.nn.relu, weights_initializer=tf.truncated_normal_initializer(stddev=0.01), scope="fc_2")
x = slim.fully_connected(x, 1, activation_fn=None, weights_initializer=tf.truncated_normal_initializer(stddev=0.01), scope="fc_3")
return tf.reshape(x, [-1, config.nodesize, config.nodesize])
class ir_Block_coord():
def __init__(self, scope, X, Adj, Dist, Support, Coord, S, train, config):
self.scope = scope
self.X = X
self.Adj = Adj
self.Support = Support
self.Dist = Dist
self.Coord = Coord
self.S = S
self.config = config
self.train = train
def build_model(self):
with tf.variable_scope(self.scope):
return self.build_ir_Block_coord()
def build_ir_Block_coord(self):
ka = self.config.ka
k = self.config.k # Number of value iterations performed
t = self.config.t
ch_i = self.config.ch_i # Channels in input layer
ch_h = self.config.ch_h # Channels in initial hidden layer
ch_q = self.config.ch_q # Channels in q layer (~actions)
state_batch_size = self.config.statebatchsize # k+1 state inputs for each channel
bs = self.config.batchsize
img_s = self.config.nodesize
diag = np.zeros([img_s, img_s])
np.fill_diagonal(diag, 1.0)
DO_SHARE=None
# Generate weights using kernel_net_coord
P = []
for i in range(ch_q):
with tf.variable_scope('p_'+str(i)):
coeff = kernel_net_coord(self.Coord, self.Dist, self.train, self.config)
coeff = coeff * self.Support
P.append(coeff)
P_fb = []
for i in range(ch_q):
with tf.variable_scope('pb_'+str(i)):
coeff = kernel_net_coord(self.Coord, self.Dist, self.train, self.config)
coeff = coeff * self.Support
P_fb.append(coeff)
P = tf.transpose(tf.stack(P), [1,0,2,3])
P_fb = tf.transpose(tf.stack(P_fb), [1,0,2,3])
# Copy r for each channel
r_ = self.X
r_repeat = []
for j in range(ch_q):
r_repeat.append(r_)
r_repeat = tf.stack(r_repeat)
r_repeat = tf.transpose(r_repeat, [1,0,2])
r_repeat = tf.expand_dims(r_repeat, axis=-1)
q = tf.matmul(P, r_repeat)
v = tf.reduce_max(q, axis=1, keep_dims=True, name="v")
v_ = tf.reshape(v, [-1, img_s])
# Copy v for each channel
v_repeat = []
for i in range(ch_q):
v_repeat.append(v_)
v_repeat = tf.stack(v_repeat)
v_repeat = tf.transpose(v_repeat, [1,0,2])
v_repeat = tf.expand_dims(v_repeat, axis=-1)
for i in range(0, k-1):
q1 = tf.matmul(P, r_repeat)
q2 = tf.matmul(P_fb, v_repeat)
q = q1 + q2
v = tf.reduce_max(q, axis=1, keep_dims=True, name="v")
v_ = tf.reshape(v, [-1, img_s])
v_repeat = []
for j in range(ch_q):
v_repeat.append(v_)
v_repeat = tf.stack(v_repeat)
v_repeat = tf.transpose(v_repeat, [1,0,2])
v_repeat = tf.expand_dims(v_repeat, axis=-1)
q1 = tf.matmul(P, r_repeat)
q2 = tf.matmul(P_fb, v_repeat)
q = q1 + q2
# Select adjcent node to generate next action probability.
bs = tf.shape(q)[0]
rprn = tf.reshape(tf.tile(tf.reshape(tf.range(bs), [-1, 1]), [1, state_batch_size]), [-1])
ins = tf.cast(tf.reshape(self.S, [-1]), tf.int32)
idx_in = tf.transpose(tf.stack([ins, rprn]), [1, 0])
v_idx = tf.gather_nd(tf.transpose(self.Adj, [2,0,1]), idx_in, name="v_out")
v_out_rp = []
for j in range(state_batch_size):
v_out_rp.append(v_)
v_out_rp = tf.stack(v_out_rp)
v_out_rp = tf.transpose(v_out_rp, [1,0,2])
v_out_rp = tf.reshape(v_out_rp, [-1, img_s])
logits = tf.multiply(v_idx, v_out_rp)
output = tf.nn.softmax(logits, name="output")
return logits, output
|
StarcoderdataPython
|
9608455
|
<reponame>silverlogic/plum-back<filename>apps/cards/models.py<gh_stars>1-10
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.contrib.postgres.fields import ArrayField
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.db import models
from django.utils.translation import ugettext_lazy as _
from model_utils.choices import Choices
from model_utils.models import TimeStampedModel
class Card(TimeStampedModel):
Type = Choices(
('C', 'credit', _('Credit')),
('P', 'prepaid', _('Prepaid')),
('D', 'debit', _('Prepaid')),
('R', 'deferred_debt', _('Deferred Debt')),
('H', 'charge', _('Charge Card')),
)
SubType = Choices(
('N', 'non_reloadable', _('Non-Reloadable')),
('R', 'reloadable', _('Reloadable')),
)
family = models.ForeignKey('family.Family', related_name='cards')
owner_type = models.ForeignKey(ContentType)
owner_id = models.PositiveIntegerField()
owner = GenericForeignKey('owner_type', 'owner_id')
name_on_card = models.CharField(max_length=255)
number = models.CharField(max_length=20)
expiration_date = models.DateField(help_text='Day is ignored.')
type = models.CharField(max_length=100, choices=Type)
sub_type = models.CharField(max_length=100, choices=SubType)
amount_on_card = models.DecimalField(max_digits=20, decimal_places=2, default=0)
amount_spent = models.DecimalField(max_digits=20, decimal_places=2, default=0)
visa_document_id = models.CharField(max_length=255, blank=True)
def __str__(self):
return '{} {}'.format(self.name_on_card, self.number[-4:])
class Meta:
ordering = ['created']
class Rule(models.Model):
Type = Choices(
('global', 'global_', _('Global')),
('merchant', 'merchant', _('Merchant'))
)
MerchantType = Choices(
('MCT_ADULT_ENTERTAINMENT', 'adult_entertainment', 'Adult Entertainment'),
('MCT_AIRFARE', 'airfare', 'Airfare'),
('MCT_ALCOHOL', 'alcohol', 'Alcohol'),
('MCT_APPAREL_AND_ACCESSORIES', 'apparel', 'Apparel and Accesories'),
('MCT_AUTOMOTIVE', 'auto', 'Automotive'),
('MCT_CAR_RENTAL', 'car_rental', 'Car Rental'),
('MCT_ELECTRONICS', 'electronics', 'Electronics'),
('MCT_ENTERTAINMENT_AND_SPORTINGEVENTS', 'entertainment', 'Entertainment and Sporting Events'),
('MCT_GAMBLING', 'gambling', 'Gambling'),
('MCT_GAS_AND_PETROLEUM', 'gas', 'Gas and Petroleum'),
('MCT_GROCERY', 'grocery', 'Grocery'),
('MCT_HOTEL_AND_LODGING', 'hotel_and_lodging', 'Hotel and Lodging'),
('MCT_HOUSEHOLD', 'household', 'Household'),
('MCT_PERSONAL_CARE', 'personal_care', 'Personal Care'),
('MCT_RECREATION', 'recreation', 'Recreation'),
('MCT_SMOKE_AND_TOBACCO' 'smoke_and_tobacco', 'Smoke and Tobacco'),
)
card = models.ForeignKey('Card', related_name='rules')
type = models.CharField(max_length=100, choices=Type)
merchant_types = ArrayField(models.CharField(max_length=255, choices=MerchantType), blank=True, default=list)
class Transfer(models.Model):
from_card = models.ForeignKey('Card', related_name='from_transfers')
to_card = models.ForeignKey('Card', related_name='to_transfers')
amount = models.DecimalField(max_digits=20, decimal_places=2)
def __str__(self):
return '{} to {}: ${}'.format(self.from_card.number[-4:], self.to_card.number[-4:], self.amount)
def save(self, *args, **kwargs):
if not self.pk:
self.to_card.amount_on_card += self.amount
self.to_card.save()
super().save(*args, **kwargs)
class Transaction(models.Model):
Status = Choices(
('approved', _('Approved')),
('declined', _('Declined')),
)
card = models.ForeignKey('Card')
amount = models.DecimalField(max_digits=20, decimal_places=2)
merchant_name = models.CharField(max_length=50)
status = models.CharField(max_length=50, choices=Status)
when = models.DateTimeField()
def __str__(self):
return '${} at {} as {}'.format(self.amount, self.merchant_name, self.card.number[-4:])
def save(self, *args, **kwargs):
if not self.pk:
self.card.amount_spent += self.amount
self.card.amount_on_card -= self.amount
self.card.save()
self.send_email()
super().save(*args, **kwargs)
def send_email(self):
context = {
'last_4': self.card.number[-4:],
'transaction': self
}
message = render_to_string('transaction-alert.txt', context=context)
send_mail('New Transaction', message=message, from_email=None, recipient_list=[parent.user.email for parent in self.card.family.parents.all()])
|
StarcoderdataPython
|
1605952
|
<filename>test_short.py
import RPi.GPIO as GPIO
from time import sleep
import time
Motor1A = 16
Motor1B = 18
Motor1E = 22
Motor2A = 19
Motor2B = 21
Motor2E = 23
Trigcenter = 38
Echocenter = 40
Trigright = 38
Echoright = 40
Trigleft = 38
Echoleft = 40
x = 0
GPIO.setmode(GPIO.BOARD)
GPIO.setup(Motor1A,GPIO.OUT)
GPIO.setup(Motor1B,GPIO.OUT)
GPIO.setup(Motor1E,GPIO.OUT)
GPIO.setup(Motor2A,GPIO.OUT)
GPIO.setup(Motor2B,GPIO.OUT)
GPIO.setup(Motor2E,GPIO.OUT)
p = GPIO.PWM(22, 50)
p.start(85)
def get_distance(trig_pin, echo_pin):
distance_list = []
distance_average = 0
i = 0
while i < 10:
GPIO.output(trig_pin, True)
time.sleep(0.00001)
GPIO.output(trig_pin, False)
while GPIO.input(echo_pin) == 0:
pulse = time.time()
while GPIO.input(echo_pin) == 1:
end_pulse = time.time()
distance_list.append(round((end_pulse - pulse) * 340 * 100 / 2, 1))
i = i + 1
for dist in distance_list:
distance_average = distance_average + dist
return distance_average / 10
GPIO.setup(Trig, GPIO.OUT)
GPIO.setup(Echo, GPIO.IN)
GPIO.output(Trig, False)
print "Distance: ", get_distance(Trig, Echo), " cm"
while x == 0:
if get_distance(Trigcenter, Echocenter) > 20 and
get_distance(Trigright, Echoright) > 20 and
get_distance(Trigleft, Echoleft) > 20:
print "Going forwards"
GPIO.output(Motor1A,GPIO.HIGH)
GPIO.output(Motor1B,GPIO.LOW)
GPIO.output(Motor1E,GPIO.HIGH)
GPIO.output(Motor2A,GPIO.HIGH)
GPIO.output(Motor2B,GPIO.LOW)
GPIO.output(Motor2E,GPIO.HIGH)
else:
while get_distance(Trigcenter, Echocenter) < 20 and
get_distance(Trigright, Echoright) < 20 and
get_distance(Trigleft, Echoleft) < 20:
GPIO.output(Motor1A,GPIO.LOW)
GPIO.output(Motor1B,GPIO.LOW)
GPIO.output(Motor1E,GPIO.LOW)
GPIO.output(Motor2A,GPIO.HIGH)
GPIO.output(Motor2B,GPIO.LOW)
GPIO.output(Motor2E,GPIO.HIGH)
sleep(0.35)
GPIO.cleanup()
|
StarcoderdataPython
|
3232247
|
#!/usr/bin/env python3
#
# Copyright 2018 Red Hat, Inc.
#
# Authors:
# <NAME> <<EMAIL>>
#
# This work is licensed under the MIT License. Please see the LICENSE file or
# http://opensource.org/licenses/MIT.
from django.template import Context, Template
from patchew.tags import tail_lines, grep_A, grep_B, grep_C, grep, lines_between
import unittest
class CustomTagsTest(unittest.TestCase):
def assertTemplate(self, template, expected, **kwargs):
context = Context(kwargs)
self.assertEqual(Template(template).render(context), expected)
def test_template_filters(self):
self.assertTemplate('{{s|ansi2text}}', 'dbc', s='abc\rd')
self.assertTemplate('{{s|grep:"[0-9]"}}', '0\n9', s='0\na\n9')
self.assertTemplate('{{s|grep_A:"b"}}',
'b\nc\nd\ne\n---\nb',
s='a\nb\nc\nd\ne\nf\nx\ny\nz\nb')
self.assertTemplate('{{s|grep_B:"b"}}',
'a\nb\n---\nx\ny\nz\nb',
s='a\nb\nc\nd\ne\nf\nx\ny\nz\nb')
self.assertTemplate('{{s|grep_C:"b"}}',
'a\nb\nc\nd\ne\n---\nx\ny\nz\nb',
s='a\nb\nc\nd\ne\nf\nx\ny\nz\nb')
self.assertTemplate('{{s|tail_lines:3}}', 'b\nc\nd', s='a\nb\nc\nd')
def test_template_tags(self):
self.assertTemplate('{% ansi2text s %}', 'dbc', s='abc\rd')
self.assertTemplate('{% grep s "[0-9]" %}', '0\n9', s='0\na\n9')
self.assertTemplate('{% grep_A s regex="[bc]" n=1 %}', 'b\nc\nd', s='a\nb\nc\nd')
self.assertTemplate('{% grep_B s regex="[bc]" n=1 %}', 'a\nb\nc', s='a\nb\nc\nd')
self.assertTemplate('{% grep_C s "b" n=1 %}', 'a\nb\nc', s='a\nb\nc\nd')
self.assertTemplate('{% tail_lines s n=3 %}', 'b\nc\nd', s='a\nb\nc\nd')
self.assertTemplate('{% lines_between s start="^b$" stop="c" %}', 'b\nc', s='a\nb\nc\nd')
def test_grep(self):
self.assertEqual(grep('0\na\n9', '[0-9]'), '0\n9')
self.assertEqual(grep('0\na\n9', '[0-9]', '---'), '0\n---\n9')
def test_grep_A(self):
self.assertEqual(grep_A('a\nb\nc\nd', 'b', 1, None), 'b\nc')
self.assertEqual(grep_A('a\nb\nc\nd', 'b', 2, None), 'b\nc\nd')
self.assertEqual(grep_A('a\nb\nc\nd\nb\ne', 'b', 1, None), 'b\nc\nb\ne')
self.assertEqual(grep_A('a\nb\nc\nd\nb\ne', 'b', 2, None), 'b\nc\nd\nb\ne')
self.assertEqual(grep_A('a\nb\nc\nd\nz\nb\ne', 'b', 1, None), 'b\nc\nb\ne')
self.assertEqual(grep_A('a\nb\nc\nd\nz\nb\ne', 'b', 2, None), 'b\nc\nd\nb\ne')
self.assertEqual(grep_A('a\nb\nc\nz\nb\nb\ne', 'b', 1, None), 'b\nc\nb\nb\ne')
self.assertEqual(grep_A('b\nc\nz\nb\nb\ne', 'b', 1, None), 'b\nc\nb\nb\ne')
self.assertEqual(grep_A('b\n', 'b', 1, None), 'b')
def test_grep_A_sep(self):
self.assertEqual(grep_A('a\nb\nc\nd', 'b', 1), 'b\nc')
self.assertEqual(grep_A('a\nb\nc\nd', 'b', 2), 'b\nc\nd')
self.assertEqual(grep_A('a\nb\nc\nd\nb\ne', 'b', 1), 'b\nc\n---\nb\ne')
self.assertEqual(grep_A('a\nb\nc\nd\nb\ne', 'b', 2), 'b\nc\nd\nb\ne')
self.assertEqual(grep_A('a\nb\nc\nd\nz\nb\ne', 'b', 1), 'b\nc\n---\nb\ne')
self.assertEqual(grep_A('a\nb\nc\nd\nz\nb\nb\ne', 'b', 1), 'b\nc\n---\nb\nb\ne')
self.assertEqual(grep_A('b\nc\nz\nb\nb\ne', 'b', 1), 'b\nc\n---\nb\nb\ne')
self.assertEqual(grep_A('b\n', 'b', 1), 'b')
def test_grep_B(self):
self.assertEqual(grep_B('a\nb\nc\nd', 'b', 1, None), 'a\nb')
self.assertEqual(grep_B('a\nb\nc\nd', 'b', 2, None), 'a\nb')
self.assertEqual(grep_B('a\nb\nc\nd\nb\ne', 'b', 1, None), 'a\nb\nd\nb')
self.assertEqual(grep_B('a\nb\nc\nd\nz\nb\ne', 'b', 1, None), 'a\nb\nz\nb')
self.assertEqual(grep_B('a\nb\nc\nd\nz\nb\ne', 'b', 2, None), 'a\nb\nd\nz\nb')
self.assertEqual(grep_B('a\nb\nc\nz\nb\nb\ne', 'b', 1, None), 'a\nb\nz\nb\nb')
self.assertEqual(grep_B('b\nc\nz\nb\nb\ne', 'b', 1, None), 'b\nz\nb\nb')
self.assertEqual(grep_B('b\n', 'b', 1, None), 'b')
def test_grep_B_sep(self):
self.assertEqual(grep_B('a\nb\nc\nd', 'b', 1), 'a\nb')
self.assertEqual(grep_B('a\nb\nc\nd', 'b', 2), 'a\nb')
self.assertEqual(grep_B('a\nb\nc\nd\nb\ne', 'b', 1), 'a\nb\n---\nd\nb')
self.assertEqual(grep_B('a\nb\nc\nd\nz\nb\ne', 'b', 1), 'a\nb\n---\nz\nb')
self.assertEqual(grep_B('a\nb\nc\nd\nz\nb\ne', 'b', 2), 'a\nb\n---\nd\nz\nb')
self.assertEqual(grep_B('a\nb\nc\nz\nb\nb\ne', 'b', 1), 'a\nb\n---\nz\nb\nb')
self.assertEqual(grep_B('b\nc\nz\nb\nb\ne', 'b', 1), 'b\n---\nz\nb\nb')
self.assertEqual(grep_B('b\n', 'b', 1), 'b')
def test_grep_C(self):
self.assertEqual(grep_C('a\nb\nc\nd', 'b', 1, None), 'a\nb\nc')
self.assertEqual(grep_C('a\nb\nc\nd', 'b', 2, None), 'a\nb\nc\nd')
self.assertEqual(grep_C('a\nb\nc\nd\nb\ne', 'b', 1, None), 'a\nb\nc\nd\nb\ne')
self.assertEqual(grep_C('a\nb\nc\nd\nz\nb\ne', 'b', 1, None), 'a\nb\nc\nz\nb\ne')
self.assertEqual(grep_C('a\nb\nc\nd\nz\nb\ne', 'b', 2, None), 'a\nb\nc\nd\nz\nb\ne')
self.assertEqual(grep_C('a\nb\nc\nz\nb\nb\ne', 'b', 1, None), 'a\nb\nc\nz\nb\nb\ne')
self.assertEqual(grep_C('b\nc\nz\nb\nb\ne', 'b', 1, None), 'b\nc\nz\nb\nb\ne')
self.assertEqual(grep_C('b\n', 'b', 1, None), 'b')
def test_grep_C_sep(self):
self.assertEqual(grep_C('a\nb\nc\nd', 'b', 1), 'a\nb\nc')
self.assertEqual(grep_C('a\nb\nc\nd', 'b', 2), 'a\nb\nc\nd')
self.assertEqual(grep_C('a\nb\nc\nd\nb\ne', 'b', 1), 'a\nb\nc\nd\nb\ne')
self.assertEqual(grep_C('a\nb\nc\nd\nz\nb\ne', 'b', 1), 'a\nb\nc\n---\nz\nb\ne')
self.assertEqual(grep_C('a\nb\nc\nd\nz\nb\ne', 'b', 2), 'a\nb\nc\nd\nz\nb\ne')
self.assertEqual(grep_C('a\nb\nc\nz\nb\nb\ne', 'b', 1), 'a\nb\nc\nz\nb\nb\ne')
self.assertEqual(grep_C('b\nc\nz\nb\nb\ne', 'b', 1), 'b\nc\nz\nb\nb\ne')
self.assertEqual(grep_C('b\n', 'b', 1), 'b')
def test_tail_lines(self):
self.assertEqual(tail_lines('', 0), '')
self.assertEqual(tail_lines('', 1), '')
self.assertEqual(tail_lines('', 2), '')
self.assertEqual(tail_lines('', 4), '')
self.assertEqual(tail_lines('a\nb\n', 0), '')
self.assertEqual(tail_lines('a\nb\n', 1), 'b')
self.assertEqual(tail_lines('a\nb\n', 2), 'a\nb')
self.assertEqual(tail_lines('a\nb\nc\n', 2), 'b\nc')
self.assertEqual(tail_lines('a\nb\nc\n', 4), 'a\nb\nc')
self.assertEqual(tail_lines('a\nb\nc\nd\n', 2), 'c\nd')
self.assertEqual(tail_lines('\n\n\n', 2), '\n')
self.assertEqual(tail_lines('\n\n\nbc', 2), '\nbc')
self.assertEqual(tail_lines('\n\nbc', 3), '\n\nbc')
self.assertEqual(tail_lines('\n\n\n\nbc', 3), '\n\nbc')
def test_lines_between(self):
self.assertEqual(lines_between('a\nb\nc\nd', 'b', 'c'), 'b\nc')
self.assertEqual(lines_between('a\nb\nc\nd', 'b', 'c', False), 'b')
self.assertEqual(lines_between('a\nb\ncb\nd', 'b', 'c'), 'b\ncb')
self.assertEqual(lines_between('a\nb\ncb\nd', 'b', 'c', False), 'b\ncb\nd')
self.assertEqual(lines_between('a\nb\n\n\na\nb', '.', '^$'), 'a\nb\n\na\nb')
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
309139
|
import configparser
import argparse
import torch
from torch.utils.data import TensorDataset
import torch_xla.distributed.xla_multiprocessing as xmp
import torch_xla
import torch_xla.core.xla_model as xm
from DTI_model import DTI_model, modelConfig
from Tester import Tester, TesterConfig
from utils import *
from prepare_dataset import *
SEED = 3056
seed_everything(SEED)
#command line parser for config file
config = configparser.ConfigParser()
parser = argparse.ArgumentParser(prog="Train")
parser.add_argument("-c","--config",dest="filename", help="Pass a training config file",metavar="FILE")
args = parser.parse_args()
config.read(args.filename)
#dataset:
#Dataset:
kiba_dataset_path = config['kiba_dataset']['kiba_path']
device = 'cpu'
_, _, DTI_test, affinity_test = generate_DTI(kiba_dataset_path)
DTI_test = torch.from_numpy(DTI_test).to(device=device, dtype=torch.long)
affinity_test = torch.from_numpy(affinity_test).to(device=device, dtype=torch.long)
kiba_test = TensorDataset(DTI_test,affinity_test)
num_workers = int(config['training_config']['num_workers'])
batch_size = int(config['training_config']['eval_batch_size'])
ckpt_path = config['training_config']['ckpt_path']
max_epoch = int(config['training_config']['max_epoch'])
TPU = config['training_config']['TPU']
measure_LM_contribution = config['training_config']['measure_LM_contribution']
from DTI_model import DTI_model, modelConfig
#model
query_block_conv_in_dim = int(config['model_config']['query_block_conv_in_dim'])
query_block_conv_out_dim = int(config['model_config']['query_block_conv_out_dim'])
query_block_conv_stride = int(config['model_config']['query_block_conv_stride'])
query_block_conv_padding = int(config['model_config']['query_block_conv_padding'])
query_block_conv_kernel_size = int(config['model_config']['query_block_conv_kernel_size'])
LM = config['model_config']['LM']
Is_Recurrent = config['model_config']['Is_Recurrent']
query_block_recurrent_input_size = int(config['model_config']['query_block_recurrent_input_size'])
query_block_recurrent_hidden_size = int(config['model_config']['query_block_recurrent_hidden_size'])
query_block_recurrent_num_layers = int(config['model_config']['query_block_recurrent_num_layers'])
query_block_recurrent_read_dim = int(config['model_config']['query_block_recurrent_read_dim'])
query_block_recurrent_write_dim = int(config['model_config']['query_block_recurrent_write_dim'])
groupnorm = int(config['model_config']['groupnorm'])
LM_block_read_dim = int(config['model_config']['LM_block_read_dim'])
LM_block_write_dim = int(config['model_config']['LM_block_write_dim'])
#prepare model
model_config = modelConfig(
query_block_conv_in_dim = query_block_conv_in_dim,
query_block_conv_out_dim = query_block_conv_out_dim,
query_block_conv_stride = query_block_conv_stride,
query_block_conv_padding = query_block_conv_padding,
query_block_conv_kernel_size = query_block_conv_kernel_size,
LM = LM,
Is_Recurrent=Is_Recurrent,
query_block_recurrent_input_size = query_block_recurrent_input_size,
query_block_recurrent_hidden_size = query_block_recurrent_hidden_size,
query_block_recurrent_num_layers = query_block_recurrent_num_layers,
query_block_recurrent_read_dim = query_block_recurrent_read_dim,
query_block_recurrent_write_dim = query_block_recurrent_write_dim ,
groupnorm = groupnorm,
LM_block_read_dim = LM_block_read_dim,
LM_block_write_dim = LM_block_write_dim,
vocab_size = VOCAB_SIZE #set at prepare_dataset.py
)
model = DTI_model(model_config)
test_config = TesterConfig(
num_workers = num_workers,
batch_size = batch_size,
max_epoch = max_epoch,
ckpt_path = ckpt_path,
measure_LM_contribution= measure_LM_contribution
)
tester = Tester(model, kiba_test, test_config)
def _map_fn(index):
# For xla_spawn (TPUs)
tester.test()
if __name__ == "__main__":
if TPU:
xmp.spawn(_map_fn, args=(), nprocs=num_workers,start_method='fork')
else:
tester.test()
|
StarcoderdataPython
|
1762428
|
<gh_stars>0
from collections import deque
num = int(input())
pumps = deque()
for _ in range(num):
info = [int(x) for x in input().split()]
pumps.append(info)
for i in range(num):
is_valid = True
fuel = 0
for _ in range(num):
current = pumps.popleft()
fuel += current[0] - current[1]
if fuel < 0:
is_valid = False
pumps.append(current)
if is_valid:
print(i)
break
|
StarcoderdataPython
|
8053389
|
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:mod:`Quantulum` parser.
"""
# Standard library
import re
import logging
from fractions import Fraction
from collections import defaultdict
from math import pow
# Quantulum
from . import load
from . import regex as reg
from . import classes as cls
from . import disambiguate as dis
from . import language
def _get_parser(lang='en_US'):
"""
Get parser module for given language
:param lang:
:return:
"""
return language.get('parser', lang)
###############################################################################
def extract_spellout_values(text, lang='en_US'):
"""
Convert spelled out numbers in a given text to digits.
"""
return _get_parser(lang).extract_spellout_values(text)
###############################################################################
def substitute_values(text, values):
"""
Convert spelled out numbers in a given text to digits.
"""
shift, final_text, shifts = 0, text, defaultdict(int)
for value in values:
first = value['old_span'][0] + shift
second = value['old_span'][1] + shift
final_text = final_text[0:first] + value['new_surface'] + \
final_text[second:]
shift += len(value['new_surface']) - len(value['old_surface'])
for char in range(first + 1, len(final_text)):
shifts[char] = shift
logging.debug('Text after numeric conversion: "%s"', final_text)
return final_text, shifts
###############################################################################
def get_values(item, lang='en_US'):
"""
Extract value from regex hit.
"""
def callback(pattern):
return ' %s' % (reg.unicode_fractions()[pattern.group(0)])
fracs = r'|'.join(reg.unicode_fractions())
value = item.group('value')
# Remove grouping operators
value = re.sub(
r'(?<=\d)[%s](?=\d{3})' % reg.grouping_operators_regex(lang), '',
value)
# Replace unusual exponents by e (including e)
value = re.sub(
r'(?<=\d)(%s)(e|E|10)\^?' % reg.multiplication_operators_regex(lang),
'e', value)
# calculate other exponents
value, factors = resolve_exponents(value)
logging.debug("After exponent resolution: {}".format(value))
value = re.sub(fracs, callback, value, re.IGNORECASE)
range_separator = re.findall(
r'\d+ ?((?:-\ )?(?:%s)) ?\d' % '|'.join(reg.ranges(lang)), value)
uncer_separator = re.findall(
r'\d+ ?(%s) ?\d' % '|'.join(reg.uncertainties(lang)), value)
fract_separator = re.findall(r'\d+/\d+', value)
value = re.sub(' +', ' ', value)
uncertainty = None
if range_separator:
# A range just describes an uncertain quantity
values = value.split(range_separator[0])
values = [
float(re.sub(r'-$', '', v)) * factors[i]
for i, v in enumerate(values)
]
if values[1] < values[0]:
raise ValueError(
"Invalid range, with second item being smaller than the first "
"item"
)
mean = sum(values) / len(values)
uncertainty = mean - min(values)
values = [mean]
elif uncer_separator:
values = [float(i) for i in value.split(uncer_separator[0])]
uncertainty = values[1] * factors[1]
values = [values[0] * factors[0]]
elif fract_separator:
values = value.split()
try:
if len(values) > 1:
values = [
float(values[0]) * factors[0] + float(Fraction(values[1]))
]
else:
values = [float(Fraction(values[0]))]
except ZeroDivisionError as e:
raise ValueError('{} is not a number'.format(values[0]), e)
else:
values = [float(re.sub(r'-$', '', value)) * factors[0]]
logging.debug('\tUncertainty: %s', uncertainty)
logging.debug('\tValues: %s', values)
return uncertainty, values
###############################################################################
def resolve_exponents(value, lang='en_US'):
"""Resolve unusual exponents (like 2^4) and return substituted string and
factor
Params:
value: str, string with only one value
Returns:
str, string with basis and exponent removed
array of float, factors for multiplication
"""
factors = []
matches = re.finditer(
reg.number_pattern_groups(lang), value, re.IGNORECASE | re.VERBOSE)
for item in matches:
if item.group('base') and item.group('exponent'):
base = item.group('base')
exp = item.group('exponent')
if base in ['e', 'E']:
# already handled by float
factors.append(1)
continue
# exp = '10'
# Expect that in a pure decimal base,
# either ^ or superscript notation is used
if re.match(r'\d+\^?', base):
if not ('^' in base or re.match(
r'[%s]' % reg.unicode_superscript_regex(), exp)):
factors.append(1)
continue
for superscript, substitute in reg.unicode_superscript().items():
exp.replace(superscript, substitute)
exp = float(exp)
base = float(base.replace('^', ''))
factor = pow(base, exp)
stripped = str(value).replace(item.group('scale'), '')
value = stripped
factors.append(factor)
logging.debug("Replaced {} by factor {}".format(
item.group('scale'), factor))
else:
factors.append(1)
continue
return value, factors
###############################################################################
def build_unit_name(dimensions, lang='en_US'):
"""
Build the name of the unit from its dimensions.
"""
name = _get_parser(lang).name_from_dimensions(dimensions)
logging.debug('\tUnit inferred name: %s', name)
return name
###############################################################################
def get_unit_from_dimensions(dimensions, text, lang='en_US'):
"""
Reconcile a unit based on its dimensionality.
"""
key = load.get_key_from_dimensions(dimensions)
try:
unit = load.units(lang).derived[key]
except KeyError:
logging.debug(u'\tCould not find unit for: %s', key)
unit = cls.Unit(
name=build_unit_name(dimensions, lang),
dimensions=dimensions,
entity=get_entity_from_dimensions(dimensions, text, lang))
# Carry on original composition
unit.original_dimensions = dimensions
return unit
def name_from_dimensions(dimensions, lang='en_US'):
"""
Build the name of a unit from its dimensions.
Param:
dimensions: List of dimensions
"""
return _get_parser(lang).name_from_dimensions(dimensions)
def infer_name(unit):
"""
Return unit name based on dimensions
:return: new name of this unit
"""
name = name_from_dimensions(unit.dimensions) if unit.dimensions else None
return name
###############################################################################
def get_entity_from_dimensions(dimensions, text, lang='en_US'):
"""
Infer the underlying entity of a unit (e.g. "volume" for "m^3") based on
its dimensionality.
"""
new_derived = [{
'base': load.units(lang).names[i['base']].entity.name,
'power': i['power']
} for i in dimensions]
final_derived = sorted(new_derived, key=lambda x: x['base'])
key = load.get_key_from_dimensions(final_derived)
ent = dis.disambiguate_entity(key, text, lang)
if ent is None:
logging.debug('\tCould not find entity for: %s', key)
ent = cls.Entity(name='unknown', dimensions=new_derived)
return ent
###############################################################################
def parse_unit(item, unit, slash, lang='en_US'):
"""
Parse surface and power from unit text.
"""
return _get_parser(lang).parse_unit(item, unit, slash)
###############################################################################
def get_unit(item, text, lang='en_US'):
"""
Extract unit from regex hit.
"""
group_units = ['prefix', 'unit1', 'unit2', 'unit3', 'unit4']
group_operators = ['operator1', 'operator2', 'operator3', 'operator4']
# How much of the end is removed because of an "incorrect" regex match
unit_shortening = 0
item_units = [item.group(i) for i in group_units if item.group(i)]
if len(item_units) == 0:
unit = load.units(lang).names['dimensionless']
else:
derived, slash = [], False
multiplication_operator = False
for index in range(0, 5):
unit = item.group(group_units[index])
operator_index = None if index < 1 else group_operators[index - 1]
operator = None if index < 1 else item.group(operator_index)
# disallow spaces as operators in units expressed in their symbols
# Enforce consistency among multiplication and division operators
# Single exceptions are colloquial number abbreviations (5k miles)
if operator in reg.multiplication_operators(lang) or (
operator is None and unit and
not (index == 1 and unit in reg.suffixes(lang))):
if multiplication_operator != operator and not (
index == 1 and str(operator).isspace()):
if multiplication_operator is False:
multiplication_operator = operator
else:
# Cut if inconsistent multiplication operator
# treat the None operator differently - remove the
# whole word of it
if operator is None:
# For this, use the last consistent operator
# (before the current) with a space
# which should always be the preceding operator
derived.pop()
operator_index = group_operators[index - 2]
# Remove (original length - new end) characters
unit_shortening = item.end() - item.start(
operator_index)
logging.debug(
"Because operator inconsistency, cut from "
"operator: '{}', new surface: {}"
.format(
operator, text[item.start():item.end() -
unit_shortening]))
break
# Determine whether a negative power has to be applied to following
# units
if operator and not slash:
slash = any(
i in operator for i in reg.division_operators(lang))
# Determine which unit follows
if unit:
unit_surface, power = parse_unit(item, unit, slash, lang)
base = dis.disambiguate_unit(unit_surface, text, lang)
derived += [{
'base': base,
'power': power,
'surface': unit_surface
}]
unit = get_unit_from_dimensions(derived, text, lang)
logging.debug('\tUnit: %s', unit)
logging.debug('\tEntity: %s', unit.entity)
return unit, unit_shortening
###############################################################################
def get_surface(shifts, orig_text, item, text, unit_shortening=0):
"""
Extract surface from regex hit.
"""
# handle cut end
span = (item.start(), item.end() - unit_shortening)
logging.debug('\tInitial span: %s ("%s")', span, text[span[0]:span[1]])
real_span = (span[0] - shifts[span[0]], span[1] - shifts[span[1] - 1])
surface = orig_text[real_span[0]:real_span[1]]
logging.debug('\tShifted span: %s ("%s")', real_span, surface)
while any(surface.endswith(i) for i in [' ', '-']):
surface = surface[:-1]
real_span = (real_span[0], real_span[1] - 1)
while surface.startswith(' '):
surface = surface[1:]
real_span = (real_span[0] + 1, real_span[1])
logging.debug('\tFinal span: %s ("%s")', real_span, surface)
return surface, real_span
###############################################################################
def is_quote_artifact(orig_text, span):
"""
Distinguish between quotes and units.
"""
res = False
cursor = re.finditer(r'["\'][^ .,:;?!()*+-].*?["\']', orig_text)
for item in cursor:
if span[0] <= item.span()[1] <= span[1]:
res = item
break
return res
###############################################################################
def build_quantity(orig_text,
text,
item,
values,
unit,
surface,
span,
uncert,
lang='en_US'):
"""
Build a Quantity object out of extracted information.
Takes care of caveats and common errors
"""
return _get_parser(lang).build_quantity(orig_text, text, item, values,
unit, surface, span, uncert)
###############################################################################
def clean_text(text, lang='en_US'):
"""
Clean text before parsing.
"""
# Replace a few nasty unicode characters with their ASCII equivalent
maps = {'×': 'x', '–': '-', '−': '-'}
for element in maps:
text = text.replace(element, maps[element])
# Language specific cleaning
text = _get_parser(lang).clean_text(text)
logging.debug('Clean text: "%s"', text)
return text
###############################################################################
def parse(text, lang='en_US', verbose=False):
"""
Extract all quantities from unstructured text.
"""
log_format = '%(asctime)s --- %(message)s'
logging.basicConfig(format=log_format)
root = logging.getLogger()
if verbose: # pragma: no cover
level = root.level
root.setLevel(logging.DEBUG)
logging.debug('Verbose mode')
orig_text = text
logging.debug('Original text: "%s"', orig_text)
text = clean_text(text, lang)
values = extract_spellout_values(text, lang)
text, shifts = substitute_values(text, values)
quantities = []
for item in reg.units_regex(lang).finditer(text):
groups = dict(
[i for i in item.groupdict().items() if i[1] and i[1].strip()])
logging.debug(u'Quantity found: %s', groups)
try:
uncert, values = get_values(item, lang)
unit, unit_shortening = get_unit(item, text)
surface, span = get_surface(shifts, orig_text, item, text,
unit_shortening)
objs = build_quantity(orig_text, text, item, values, unit, surface,
span, uncert, lang)
if objs is not None:
quantities += objs
except ValueError as err:
logging.debug('Could not parse quantity: %s', err)
if verbose: # pragma: no cover
root.level = level
return quantities
###############################################################################
def inline_parse(text, verbose=False): # pragma: no cover
"""
Extract all quantities from unstructured text.
"""
parsed = parse(text, verbose=verbose)
shift = 0
for quantity in parsed:
index = quantity.span[1] + shift
to_add = u' {' + str(quantity) + u'}'
text = text[0:index] + to_add + text[index:]
shift += len(to_add)
return text
###############################################################################
def inline_parse_and_replace(text, lang='en_US',
verbose=False): # pragma: no cover
"""
Parse text and replace with the standardised quantities as string
"""
parsed = parse(text, lang=lang, verbose=verbose)
shift = 0
for quantity in parsed:
index_start = quantity.span[0] + shift
index_end = quantity.span[1] + shift
to_add = str(quantity)
text = text[0:index_start] + to_add + text[index_end:]
shift += len(to_add) - (quantity.span[1] - quantity.span[0])
return text
###############################################################################
def inline_parse_and_expand(text, lang='en_US', verbose=False):
"""
Parse text and replace qunatities with speakable version
"""
parsed = parse(text, verbose=verbose)
shift = 0
for quantity in parsed:
index_start = quantity.span[0] + shift
index_end = quantity.span[1] + shift
to_add = quantity.to_spoken()
text = text[0:index_start] + to_add + text[index_end:]
shift += len(to_add) - (quantity.span[1] - quantity.span[0])
return text
|
StarcoderdataPython
|
284402
|
<reponame>ionicsolutions/modelstore
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import pytest
from modelstore.storage.local import FileSystemStorage
# pylint: disable=unused-import
from tests.storage.test_utils import (
TEST_FILE_CONTENTS,
TEST_FILE_NAME,
file_contains_expected_contents,
remote_file_path,
remote_path,
temp_file,
)
# pylint: disable=protected-access
# pylint: disable=redefined-outer-name
@pytest.fixture
def file_system_storage(tmp_path):
return FileSystemStorage(root_dir=str(tmp_path))
def test_create_from_environment_variables(monkeypatch):
# Does not fail when environment variables exist
monkeypatch.setenv("MODEL_STORE_ROOT_PREFIX", "~")
try:
_ = FileSystemStorage()
except:
pytest.fail("Failed to initialise storage from env variables")
def test_create_fails_with_missing_environment_variables(monkeypatch):
# Fails when environment variables are missing
for key in FileSystemStorage.BUILD_FROM_ENVIRONMENT.get("required", []):
monkeypatch.delenv(key, raising=False)
with pytest.raises(Exception):
_ = FileSystemStorage()
def test_validate(file_system_storage):
assert file_system_storage.validate()
assert os.path.exists(file_system_storage.root_prefix)
def test_push(tmp_path, file_system_storage):
prefix = remote_file_path()
result = file_system_storage._push(temp_file(tmp_path), prefix)
assert result == os.path.join(file_system_storage.root_prefix, prefix)
def test_pull(tmp_path, file_system_storage):
# Push the file to storage
prefix = remote_file_path()
remote_destination = file_system_storage._push(temp_file(tmp_path), prefix)
# Pull the file back from storage
local_destination = os.path.join(tmp_path, TEST_FILE_NAME)
result = file_system_storage._pull(remote_destination, tmp_path)
assert result == local_destination
assert os.path.exists(local_destination)
assert file_contains_expected_contents(local_destination)
@pytest.mark.parametrize(
"file_exists,should_call_delete",
[
(
False,
False,
),
(
True,
True,
),
],
)
def test_remove(file_exists, should_call_delete, tmp_path, file_system_storage):
# Push the file to storage
prefix = remote_file_path()
if file_exists:
file_system_storage._push(temp_file(tmp_path), prefix)
try:
# Remove the file
assert file_system_storage._remove(prefix) == should_call_delete
# The file no longer exists
assert not os.path.exists(os.path.join(file_system_storage.root_prefix, prefix))
except:
# Should fail gracefully here
pytest.fail("Remove raised an exception")
def test_read_json_objects_ignores_non_json(tmp_path, file_system_storage):
# Create files with different suffixes
prefix = remote_path()
for file_type in ["txt", "json"]:
source = os.path.join(tmp_path, f"test-file-source.{file_type}")
with open(source, "w") as out:
out.write(json.dumps({"key": "value"}))
# Push the file to storage
remote_destination = os.path.join(prefix, f"test-file-destination.{file_type}")
file_system_storage._push(source, remote_destination)
# Read the json files at the prefix
items = file_system_storage._read_json_objects(prefix)
assert len(items) == 1
def test_read_json_object_fails_gracefully(tmp_path, file_system_storage):
# Push a file that doesn't contain JSON to storage
remote_path = file_system_storage._push(
temp_file(tmp_path, contents="not json"), remote_file_path()
)
# Read the json files at the prefix
item = file_system_storage._read_json_object(remote_path)
assert item is None
def test_list_models_missing_domain(file_system_storage):
models = file_system_storage.list_models("domain-that-doesnt-exist")
assert len(models) == 0
def test_storage_location(file_system_storage):
# Asserts that the location meta data is correctly formatted
prefix = remote_file_path()
exp = {
"type": "file_system",
"path": os.path.join(file_system_storage.root_prefix, prefix),
}
result = file_system_storage._storage_location(prefix)
assert result == exp
@pytest.mark.parametrize(
"meta_data,should_raise,result",
[
(
{
"path": "/path/to/file",
},
False,
"/path/to/file",
),
],
)
def test_get_location(file_system_storage, meta_data, should_raise, result):
# Asserts that pulling the location out of meta data is correct
if should_raise:
with pytest.raises(ValueError):
file_system_storage._get_storage_location(meta_data)
else:
assert file_system_storage._get_storage_location(meta_data) == result
@pytest.mark.parametrize(
"state_name,should_create,expect_exists",
[
("state-1", False, False),
("state-2", True, True),
],
)
def test_state_exists(file_system_storage, state_name, should_create, expect_exists):
if should_create:
file_system_storage.create_model_state(state_name)
assert file_system_storage.state_exists(state_name) == expect_exists
|
StarcoderdataPython
|
3221427
|
import os
import pytest
import fetch_data as fd
def test_file_logging():
import logging
from fetch_data import utils
dest = "./tests/downloads/logging_download.log"
utils.log_to_file(dest)
logging.warning("[TESTING] This is a test log for downloading")
with open(dest) as file:
assert "regrid" not in file.read()
def test_read_catalog():
fname = "./tests/example_catalog.yml"
cat = fd.read_catalog(fname)
assert isinstance(cat, dict)
assert cat != {}
def test_get_url_list_no_login_http():
url = (
"http://dap.ceda.ac.uk/neodc/esacci"
"/sea_surface_salinity/data/v02.31/7days/2012/01"
"/ESACCI-SEASURFACESALINITY-L4-*_25km-*-fv2.31.nc" # wildcards
)
flist = fd.core.get_url_list(url, use_cache=False)
assert len(flist) != 0
@pytest.mark.skipif(
os.environ.get("CI", "false") == "true", reason="Skipping downloads in CI"
)
def test_get_url_list_bad_url():
url = "http://fake_url.com/test_*_file.nc" # wildcards
with pytest.raises(FileNotFoundError):
fd.core.get_url_list(url, use_cache=False)
def test_get_url_list_bad_filename_raise():
url = (
"http://dap.ceda.ac.uk/neodc/esacci"
"/sea_surface_salinity/data/v02.31/7days/2012/01"
"/bad_file_*_name.nc" # wildcards
)
flist = fd.core.get_url_list(url, use_cache=False)
assert flist == []
def test_get_url_list_fake_kwarg_https():
url = (
"http://dap.ceda.ac.uk/neodc/esacci"
"/sea_surface_salinity/data/v02.31/7days/2012/01"
"/ESACCI-SEASURFACESALINITY-L4-*_25km-*-fv2.31.nc" # wildcards
)
with pytest.raises(KeyError):
fd.core.get_url_list(url, use_cache=False, username="tester", password="<PASSWORD>")
def test_choose_downloader():
import pooch
url = "ftp://thispartdoesntmatter.com"
protocol = fd.core.choose_downloader(url, progress=False)
assert protocol.__class__ == pooch.downloaders.FTPDownloader().__class__
@pytest.mark.skipif(
os.environ.get("CI", "false") == "true", reason="Skipping downloads in CI"
)
def test_download_urls():
url = (
"http://dap.ceda.ac.uk/neodc/esacci"
"/sea_surface_salinity/data/v02.31/7days/2012/01"
"/ESACCI-SEASURFACESALINITY-L4-*_25km-*-fv2.31.nc"
)
dest = "./tests/downloads/"
urls = fd.core.get_url_list(
url, cache_path=f"{dest}/remote_files.cache", use_cache=True
)[:1]
fd.core.download_urls(urls, dest_dir=dest)
def test_make_readme():
fname = "./tests/example_catalog.yml"
cat = fd.read_catalog(fname)
for key in cat:
cat[key]["name"] = key.upper().replace("_", " ")
fd.core.create_download_readme("README.txt", **cat[key])
|
StarcoderdataPython
|
3464273
|
<gh_stars>1-10
#!/usr/bin/env python
import netcdf4_functions as nffun
import os, sys, csv, time, math, numpy
from optparse import OptionParser
#Create, run and process a CLM/ELM model ensemble member
# given specified case and parameters (see parm_list and parm_data files)
# Parent case must be pre-built and all namelists in run directory.
# Post-processing calcuates normalized sum of squared errors (SSE) given
# data constraints specified in "constraints" directory"
# DMRicciuto 12/1/2015
#
# Note: This will only work for single-point CLM/ELM compiled with MPI_SERIAL
#-------------------Parse options-----------------------------------------------
parser = OptionParser()
parser.add_option("--runroot", dest="runroot", default="../../run", \
help="Directory where the run would be created")
parser.add_option("--case_copy", dest="casename", default="", \
help="Name of case to copy from")
parser.add_option("--site_orig", dest="site_orig", default='', \
help = 'Site being run in original case')
parser.add_option("--site_new", dest="site_new", default='', \
help = 'Site to run')
parser.add_option("--nyears", dest="nyears", default=0, \
help = 'Number of years to run')
parser.add_option("--finidat_year", dest='finyr', default=0, \
help = 'Year for initial data file')
parser.add_option("--finidat_thiscase", dest='fincase', default=False, \
action="store_true", help = 'Use this case for finidat')
parser.add_option('--spin_cycle', dest='spin_cycle', default=0, \
help = 'Number of years in spinup cycle')
parser.add_option('--1850_landuse', dest='nolanduse', default=False, \
action='store_true', help = '1850 land use (no dynamics)')
parser.add_option('--1850_co2', dest='noco2', default=False, \
action='store_true', help = 'use 1850 CO2')
parser.add_option('--1850_ndep', dest='nondep', default=False, \
action='store_true', help = 'use 1850 NDep')
parser.add_option('--suffix', dest='suffix', default='', \
help = 'use 1850 NDep')
parser.add_option('--machine', dest='machine', default='cades', \
help = 'machine')
parser.add_option('--warming', dest='warming', default='0.0', \
help = 'warming level to apply')
(options, args) = parser.parse_args()
casename = options.casename
#create directory from original case
orig_dir = str(os.path.abspath(options.runroot)+'/'+casename+'/run')
new_dir = orig_dir.replace(options.site_orig, options.site_new)
if (options.suffix != ''):
new_dir = new_dir.replace(casename,casename+'_'+options.suffix)
print 'Copying from '+orig_dir+' to \n'+new_dir
if (new_dir == orig_dir):
print 'Error: New and old directories are the same. Exiting'
sys.exit(1)
#copy files to new directory
os.system('mkdir -p '+new_dir+'/timing/checkpoints')
os.system('rm '+new_dir+'/*.nc')
os.system('rm -f '+new_dir+'/*.nc.orig')
os.system('cp '+orig_dir+'/*_in* '+new_dir)
os.system('cp '+orig_dir+'/*nml '+new_dir)
if (not ('ICB' in casename)):
os.system('cp '+orig_dir+'/*stream* '+new_dir)
os.system('cp '+orig_dir+'/*.rc '+new_dir)
os.system('cp '+orig_dir+'/*para*.nc '+new_dir)
#Change site name in relevant files
for f in os.listdir(new_dir):
if (os.path.isfile(new_dir+'/'+f) and (f[-2:] == 'in' or f[-3:] == 'nml' or 'streams' in f)):
myinput=open(new_dir+'/'+f)
myoutput=open(new_dir+'/'+f+'.tmp','w')
for s in myinput:
if ('drv_in' in f and 'stop_n' in s and int(options.nyears) > 0):
s_out = ' stop_n = '+str(options.nyears)+'\n'
elif ('drv_in' in f and 'lnd_ntasks' in s):
np = int(s.split()[2])
s_out = s
elif ('drv_in' in f and 'restart_n' in s and int(options.nyears) > 0):
s_out = ' restart_n = '+str(options.nyears)+'\n'
elif ('lnd_in' in f and 'finidat =' in s and int(options.finyr) > 0):
year_orig = str((s.split('.')[-2:-1])[0])[0:4]
year_new = str(10000+int(options.finyr))[1:]
s_out = s.replace('.clm2.r.'+year_orig, '.clm2.r.'+year_new)
s_out = s_out.replace(options.site_orig, options.site_new)
if (options.suffix != ''):
s_out = s_out.replace(casename,casename+'_'+options.suffix)
elif ('lnd_in' in f and "hist_nhtfrq =" in s and \
int(options.spin_cycle) > 0):
nhtfrq = str(-8760*int(options.spin_cycle))
if ('ad_spinup' in new_dir):
s_out = 'hist_nhtfrq = '+nhtfrq+','+nhtfrq+'\n'
else:
s_out = 'hist_nhtfrq = '+nhtfrq+'\n'
elif ('lnd_in' in f and "flanduse_timeseries =" in s and \
options.nolanduse == True):
s_out = " flanduse_timeseries = ''\n"
elif ('lnd_in' in f and "do_transient_pfts" in s and \
options.nolanduse == True):
s_out = "do_transient_pfts = .false.\n"
elif ('lnd_in' in f and "do_harvest" in s and \
options.nolanduse == True):
s_out = "do_harvest = .false.\n"
elif ('lnd_in' in f and "co2_file =" in s and \
options.noco2 == True):
s_out = s.replace('.nc','_CON.nc')
elif ('lnd_in' in f and "stream_fldfilename_ndep" in s and \
options.nondep == True):
s_out = s.replace('.nc','_CON.nc')
elif ('diri =' not in s):
s_out = s.replace(options.site_orig, options.site_new)
if (options.suffix != ''):
s_out = s_out.replace(casename,casename+'_'+options.suffix)
elif ('diri' in s and 'lnd' in f):
exedir = s.split()[2][1:-4]
print exedir
s_out = s
else:
s_out = s
myoutput.write(s_out)
myoutput.close()
myinput.close()
os.system(' mv '+new_dir+'/'+f+'.tmp '+new_dir+'/'+f)
#Assume makepointdata has been run to generate surface and domain data
if (options.site_orig == options.site_new and os.path.exists(orig_dir+'/surfdata.nc')):
os.system('cp '+orig_dir+'/surfdata.nc '+new_dir)
else:
os.system('cp temp/surfdata.nc '+new_dir)
if (options.site_orig == options.site_new and os.path.exists(orig_dir+'/domain.nc')):
os.system('cp '+orig_dir+'/domain.nc '+new_dir)
else:
os.system('cp temp/domain.nc '+new_dir)
os.system('pwd')
if ('20TR' in options.casename and not options.nolanduse):
os.system('cp temp/*pftdyn*.nc '+new_dir)
#if a global file exists, modify
if (os.path.exists('temp/global_'+options.casename+'_0.pbs') and options.suffix != ''):
file_in = open('temp/global_'+options.casename+'_0.pbs','r')
file_out =open('temp/global_'+options.casename+'_'+options.suffix+'.pbs','w')
mpicmd = 'mpirun -np '+str(np)
if ('cades' in options.machine):
mpicmd = '/software/dev_tools/swtree/cs400_centos7.2_pe2016-08/openmpi/1.10.3/centos7.2_gnu5.3.0/bin/mpirun'+ \
' -np '+str(np)+' --hostfile $PBS_NODEFILE '
elif (('titan' in options.machine or 'eos' in options.machine) and int(options.ninst) == 1):
mpicmd = 'aprun -n '+str(np)
elif ('cori' in options.machine or 'edison' in options.machine):
mpicmd = 'srun -n '+str(np)
for s in file_in:
if "#" in s:
file_out.write(s)
file_out.write('\n\n')
file_out.write('cd '+new_dir+'\n')
file_out.write(mpicmd+' '+exedir+'/e3sm.exe\n')
file_in.close()
file_out.close()
print "Submitting the job:"
if ('cori' in options.machine or 'edison' in options.machine):
os.system('sbatch temp/global_'+options.casename+'_'+options.suffix+'.pbs')
else:
os.system('qsub temp/global_'+options.casename+'_'+options.suffix+'.pbs')
#if an ensemble script exists, make a copy for this site
if (options.site_orig != ''):
case_prefix = options.casename.split(options.site_orig)[0][:-1]
ens_fname = 'scripts/'+case_prefix+'/ensemble_run_'+options.casename.replace(options.site_orig, \
options.site_new)+'.pbs'
if os.path.exists('scripts/'+case_prefix+'/ensemble_run_'+options.casename+'.pbs'):
os.system('cp scripts/'+case_prefix+'/ensemble_run_'+options.casename+'.pbs '+ens_fname)
myfile = open(ens_fname,'r')
myfile_out = open(ens_fname+'_temp','w')
for s in myfile:
args = s.split(' ')
if ('--case' in args):
caseind = args.index('--case')
print caseind
args[caseind+1] = args[caseind+1].replace(options.site_orig,options.site_new)
siteind = args.index('--site')
args[siteind+1] = args[siteind+1].replace(options.site_orig,options.site_new)
sout = " ".join(args)
myfile_out.write(sout)
else:
myfile_out.write(s.replace(options.site_orig,options.site_new))
myfile.close()
myfile_out.close()
os.system('mv '+ens_fname+'_temp '+ens_fname)
|
StarcoderdataPython
|
9738863
|
# https://github.com/ValvePython/steam/issues/97
import gevent.monkey
gevent.monkey.patch_all()
from getpass import getpass
from gevent.pywsgi import WSGIServer
from steam_worker import SteamWorker
from flask import Flask, request, abort, jsonify
import logging
logging.basicConfig(format="%(asctime)s | %(name)s | %(message)s", level=logging.INFO)
LOG = logging.getLogger('SimpleWebAPI')
app = Flask('SimpleWebAPI')
@app.route("/ISteamApps/GetProductInfo/", methods=['GET'])
def GetProductInfo():
appids = request.args.get('appids', '')
pkgids = request.args.get('packageids', '')
if not appids and not pkgids:
return jsonify({})
appids = map(int, appids.split(',')) if appids else []
pkgids = map(int, pkgids.split(',')) if pkgids else []
return jsonify(worker.get_product_info(appids, pkgids) or {})
@app.route("/ISteamApps/GetProductChanges/", methods=['GET'])
def GetProductChanges():
chgnum = int(request.args.get('since_changenumber', 0))
return jsonify(worker.get_product_changes(chgnum))
@app.route("/ISteamApps/GetPlayerCount/", methods=['GET'])
def GetPlayerCount():
appid = int(request.args.get('appid', 0))
return jsonify(worker.get_player_count(appid))
if __name__ == "__main__":
LOG.info("Simple Web API recipe")
LOG.info("-"*30)
LOG.info("Starting Steam worker...")
worker = SteamWorker()
worker.prompt_login()
LOG.info("Starting HTTP server...")
http_server = WSGIServer(('', 5000), app)
try:
http_server.serve_forever()
except KeyboardInterrupt:
LOG.info("Exit requested")
worker.close()
|
StarcoderdataPython
|
9715790
|
<reponame>avacorreia/Exercicio-Curso-em-Video-Pyton-Modulo1
import datetime
ano = int(input('Insira o ano: '))
if ano == 0:
ano = datetime.datetime.now().year
if ano % 400 and ano != 0 and ano % 4 == 0 and ano % 100:
print(ano)
print('O ano é bissexto.')
else:
print(ano)
print('O ano não é bissexto.')
|
StarcoderdataPython
|
271094
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Entity editing tests for enumerated value fields
"""
__author__ = "<NAME> (<EMAIL>)"
__copyright__ = "Copyright 2014, <NAME>"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import os
import unittest
import logging
log = logging.getLogger(__name__)
from django.conf import settings
from django.db import models
from django.http import QueryDict
from django.contrib.auth.models import User
from django.test import TestCase # cf. https://docs.djangoproject.com/en/dev/topics/testing/tools/#assertions
from django.test.client import Client
from utils.SuppressLoggingContext import SuppressLogging
from annalist.identifiers import RDF, RDFS, ANNAL
from annalist import layout
from annalist.models.entitytypeinfo import EntityTypeInfo
from annalist.models.site import Site
from annalist.models.collection import Collection
from annalist.models.recordtype import RecordType
from annalist.models.recordlist import RecordList
from annalist.models.recordview import RecordView
from annalist.models.recordfield import RecordField
from annalist.models.recordtypedata import RecordTypeData
from annalist.models.entitydata import EntityData
from .AnnalistTestCase import AnnalistTestCase
from .tests import (
test_layout,
TestHost, TestHostUri, TestBasePath, TestBaseUri, TestBaseDir
)
from .init_tests import (
init_annalist_test_site, init_annalist_test_coll, resetSitedata
)
from .entity_testutils import (
collection_create_values,
continuation_url_param,
create_test_user
)
from .entity_testtypedata import (
recordtype_create_values,
)
from .entity_testviewdata import (
recordview_url,
recordview_create_values, recordview_values,
view_view_form_data,
)
from .entity_testentitydata import (
entity_url, entitydata_edit_url,
entitydata_value_keys, entitydata_create_values, entitydata_values,
default_view_form_data,
)
# -----------------------------------------------------------------------------
#
# Entity edit enumerated value field tests
#
# -----------------------------------------------------------------------------
class EntityEditEnumFieldTest(AnnalistTestCase):
def setUp(self):
init_annalist_test_site()
self.testsite = Site(TestBaseUri, TestBaseDir)
self.testcoll = Collection.create(self.testsite, "testcoll", collection_create_values("testcoll"))
self.testtype = RecordType.create(self.testcoll, "testtype", recordtype_create_values("testtype"))
self.testdata = RecordTypeData.create(self.testcoll, "testtype", {})
# Login and permissions
create_test_user(self.testcoll, "testuser", "testpassword")
self.client = Client(HTTP_HOST=TestHost)
loggedin = self.client.login(username="testuser", password="<PASSWORD>")
self.assertTrue(loggedin)
return
def tearDown(self):
resetSitedata(scope="collections")
return
@classmethod
def setUpClass(cls):
super(EntityEditEnumFieldTest, cls).setUpClass()
return
@classmethod
def tearDownClass(cls):
super(EntityEditEnumFieldTest, cls).tearDownClass()
resetSitedata(scope="collections") #@@checkme@@
return
# -----------------------------------------------------------------------------
# Helpers
# -----------------------------------------------------------------------------
def _create_entity_data(self, entity_id, update="Entity"):
"Helper function creates entity data with supplied entity_id"
e = EntityData.create(self.testdata, entity_id,
entitydata_create_values(entity_id, update=update)
)
return e
def _check_entity_data_values(self, entity_id, type_id="testtype", update="Entity", update_dict=None):
"Helper function checks content of form-updated record type entry with supplied entity_id"
# log.info("_check_entity_data_values: type_id %s, entity_id %s"%(type_id, entity_id))
typeinfo = EntityTypeInfo(self.testcoll, type_id)
self.assertTrue(typeinfo.entityclass.exists(typeinfo.entityparent, entity_id))
e = typeinfo.entityclass.load(typeinfo.entityparent, entity_id)
self.assertEqual(e.get_id(), entity_id)
self.assertEqual(
e.get_view_url(""),
TestHostUri+entity_url("testcoll", type_id, entity_id)
)
v = entitydata_values(entity_id, type_id=type_id, update=update)
if update_dict:
v.update(update_dict)
for k in update_dict:
if update_dict[k] is None:
v.pop(k, None)
# log.info(e.get_values())
self.assertDictionaryMatch(e.get_values(), v)
return e
def _create_record_view(self, view_id):
"Helper function creates record view entry with supplied view_id"
t = RecordView.create(
self.testcoll, view_id,
recordview_create_values(view_id=view_id, view_entity_type="annal:View")
)
return t
def _check_record_view_values(
self, view_id, view_uri=None, update="RecordView",
num_fields=4,
update_dict=None,
):
"Helper function checks content of record view entry with supplied view_id"
self.assertTrue(RecordView.exists(self.testcoll, view_id))
t = RecordView.load(self.testcoll, view_id)
self.assertEqual(t.get_id(), view_id)
self.assertEqual(t.get_view_url(), TestHostUri+recordview_url("testcoll", view_id))
v = recordview_values(
view_id=view_id, view_uri=view_uri,
view_entity_type="annal:View",
update=update,
num_fields=num_fields,
)
if update_dict:
v.update(update_dict)
for k in update_dict:
if update_dict[k] is None:
v.pop(k, None)
# log.info("*** actual: %r"%(t.get_values(),))
# log.info("*** expect: %r"%(v,))
self.assertDictionaryMatch(t.get_values(), v)
return t
# -----------------------------------------------------------------------------
# Form response tests
# -----------------------------------------------------------------------------
# -------- new entity --------
def test_post_new_entity_new_view(self):
self.assertFalse(EntityData.exists(self.testdata, "entitynewview"))
f = default_view_form_data(
entity_id="entitynewview", action="new", update="Updated entity",
new_view="New view"
)
u = entitydata_edit_url("new", "testcoll", "testtype", view_id="Default_view")
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
v = entitydata_edit_url("new", "testcoll", "_view", view_id="View_view")
w = entitydata_edit_url("edit", "testcoll", "testtype", entity_id="entitynewview", view_id="Default_view")
c = continuation_url_param(w)
self.assertIn(v, r['location'])
self.assertIn(c, r['location'])
self._check_entity_data_values("entitynewview", update="Updated entity")
return
def test_post_new_entity_new_view_no_login(self):
self.client.logout()
f = default_view_form_data(
entity_id="entitynewview", action="new", update="Updated entity",
new_view="New view"
)
u = entitydata_edit_url("new", "testcoll", "testtype", view_id="Default_view")
r = self.client.post(u, f)
self.assertEqual(r.status_code, 401)
self.assertEqual(r.reason_phrase, "Unauthorized")
return
def test_post_new_entity_new_field(self):
self.assertFalse(EntityData.exists(self.testdata, "entitynewfield"))
f = default_view_form_data(
entity_id="entitynewfield", action="new", update="Updated entity",
new_field="New field"
)
u = entitydata_edit_url("new", "testcoll", "testtype", view_id="Default_view")
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
v = entitydata_edit_url("new", "testcoll", "_field", view_id="Field_view")
w = entitydata_edit_url("edit", "testcoll", "testtype", entity_id="entitynewfield", view_id="Default_view")
c = continuation_url_param(w)
self.assertIn(v, r['location'])
self.assertIn(c, r['location'])
self._check_entity_data_values("entitynewfield", update="Updated entity")
return
def test_post_new_entity_new_field_no_login(self):
self.client.logout()
f = default_view_form_data(
entity_id="entitynewfield", action="new", update="Updated entity",
new_field="New field"
)
u = entitydata_edit_url("new", "testcoll", "testtype", view_id="Default_view")
r = self.client.post(u, f)
self.assertEqual(r.status_code, 401)
self.assertEqual(r.reason_phrase, "Unauthorized")
return
def test_post_new_entity_new_type(self):
self.assertFalse(EntityData.exists(self.testdata, "entitynewtype"))
f = default_view_form_data(
entity_id="entitynewtype", action="new", update="Updated entity",
new_type="New type"
)
u = entitydata_edit_url("new", "testcoll", "testtype", view_id="Default_view")
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
v = entitydata_edit_url("new", "testcoll", "_type", view_id="Type_view")
w = entitydata_edit_url("edit", "testcoll", "testtype", entity_id="entitynewtype", view_id="Default_view")
c = continuation_url_param(w)
self.assertIn(v, r['location'])
self.assertIn(c, r['location'])
self._check_entity_data_values("entitynewtype", update="Updated entity")
return
def test_post_new_entity_new_type_no_login(self):
self.client.logout()
f = default_view_form_data(
entity_id="entitynewtype", action="new", update="Updated entity",
new_type="New type"
)
u = entitydata_edit_url("new", "testcoll", "testtype", view_id="Default_view")
r = self.client.post(u, f)
self.assertEqual(r.status_code, 401)
self.assertEqual(r.reason_phrase, "Unauthorized")
return
def test_post_new_entity_enum_type_new(self):
self.assertFalse(EntityData.exists(self.testdata, "entitynewtype"))
f = default_view_form_data(
entity_id="entitynewtype", action="new", update="Updated entity",
new_enum="entity_type__new_edit"
)
u = entitydata_edit_url("new", "testcoll", "testtype", view_id="Default_view")
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
v = entitydata_edit_url("edit", "testcoll", "_type", "testtype", view_id="Type_view")
w = entitydata_edit_url(
"edit", "testcoll", "testtype", entity_id="entitynewtype",
view_id="Default_view"
)
c = continuation_url_param(w)
self.assertIn(v, r['location'])
self.assertIn(c, r['location'])
self._check_entity_data_values("entitynewtype", update="Updated entity")
return
def test_post_new_entity_enum_type_new_no_login(self):
self.client.logout()
f = default_view_form_data(
entity_id="entitynewtype", action="new", update="Updated entity",
new_enum="entity_type__new_edit"
)
u = entitydata_edit_url("new", "testcoll", "testtype", view_id="Default_view")
r = self.client.post(u, f)
self.assertEqual(r.status_code, 401)
self.assertEqual(r.reason_phrase, "Unauthorized")
return
# -------- edit entity --------
def test_post_edit_entity_new_view(self):
self._create_entity_data("entitynewview")
e1 = self._check_entity_data_values("entitynewview")
f = default_view_form_data(
entity_id="entitynewview", action="edit", update="Updated entity",
new_view="New view"
)
u = entitydata_edit_url(
"edit", "testcoll", "testtype", entity_id="entitynewview",
view_id="Default_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
v = entitydata_edit_url("new", "testcoll", "_view", view_id="View_view")
c = continuation_url_param(u)
self.assertIn(v, r['location'])
self.assertIn(c, r['location'])
self._check_entity_data_values("entitynewview", update="Updated entity")
return
def test_post_edit_entity_new_view_no_login(self):
self.client.logout()
f = default_view_form_data(
entity_id="entitynewview", action="edit", update="Updated entity",
new_view="New view"
)
u = entitydata_edit_url(
"edit", "testcoll", "testtype", entity_id="entitynewview",
view_id="Default_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 401)
self.assertEqual(r.reason_phrase, "Unauthorized")
return
def test_post_edit_entity_new_field(self):
self._create_entity_data("entitynewfield")
e1 = self._check_entity_data_values("entitynewfield")
f = default_view_form_data(
entity_id="entitynewfield", action="edit", update="Updated entity",
new_field="New field"
)
u = entitydata_edit_url(
"edit", "testcoll", "testtype", entity_id="entitynewfield",
view_id="Default_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
v = entitydata_edit_url("new", "testcoll", "_field", view_id="Field_view")
c = continuation_url_param(u)
self.assertIn(v, r['location'])
self.assertIn(c, r['location'])
self._check_entity_data_values("entitynewfield", update="Updated entity")
return
def test_post_edit_entity_new_field_no_login(self):
self.client.logout()
f = default_view_form_data(
entity_id="entitynewfield", action="edit", update="Updated entity",
new_field="New field"
)
u = entitydata_edit_url(
"edit", "testcoll", "testtype", entity_id="entitynewfield",
view_id="Default_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 401)
self.assertEqual(r.reason_phrase, "Unauthorized")
return
def test_post_edit_entity_new_type(self):
self._create_entity_data("entitynewtype")
e1 = self._check_entity_data_values("entitynewtype")
f = default_view_form_data(
entity_id="entitynewtype", action="edit", update="Updated entity",
new_type="New type"
)
u = entitydata_edit_url(
"edit", "testcoll", "testtype", entity_id="entitynewtype",
view_id="Default_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
v = entitydata_edit_url("new", "testcoll", "_type", view_id="Type_view")
c = continuation_url_param(u)
self.assertIn(v, r['location'])
self.assertIn(c, r['location'])
self._check_entity_data_values("entitynewtype", update="Updated entity")
return
def test_post_edit_entity_new_type_no_login(self):
self.client.logout()
f = default_view_form_data(
entity_id="entitynewtype", action="edit", update="Updated entity",
new_type="New type"
)
u = entitydata_edit_url(
"edit", "testcoll", "testtype", entity_id="entitynewtype",
view_id="Default_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 401)
self.assertEqual(r.reason_phrase, "Unauthorized")
return
def test_post_edit_entity_enum_type_new(self):
self._create_entity_data("entitynewtype")
e1 = self._check_entity_data_values("entitynewtype")
f = default_view_form_data(
entity_id="entitynewtype", action="edit", update="Updated entity",
new_enum="entity_type__new_edit"
)
u = entitydata_edit_url(
"edit", "testcoll", "testtype", entity_id="entitynewtype",
view_id="Default_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
# v = entitydata_edit_url("new", "testcoll", "_type", view_id="Type_view")
v = entitydata_edit_url("edit", "testcoll", "_type", "testtype", view_id="Type_view")
w = entitydata_edit_url(
"edit", "testcoll", "testtype", entity_id="entitynewtype",
view_id="Default_view"
)
c = continuation_url_param(w)
self.assertIn(v, r['location'])
self.assertIn(c, r['location'])
self._check_entity_data_values("entitynewtype", update="Updated entity")
return
def test_post_edit_entity_enum_type_new_no_login(self):
self.client.logout()
f = default_view_form_data(
entity_id="entitynewtype", action="edit", update="Updated entity",
new_enum="entity_type__new_edit"
)
u = entitydata_edit_url(
"edit", "testcoll", "testtype", entity_id="entitynewtype",
view_id="Default_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 401)
self.assertEqual(r.reason_phrase, "Unauthorized")
return
# -------- edit view: tests 'new' button on enumeration in repeated value --------
def test_post_edit_view_enum_field_new(self):
self._create_record_view("editview")
self._check_record_view_values("editview")
f = view_view_form_data(
view_id="editview", orig_id="editview",
action="edit", update="Updated RecordView",
new_enum="View_fields__3__View_field_sel__new_edit"
)
u = entitydata_edit_url(
"edit", "testcoll", "_view",
entity_id="editview", view_id="View_view"
)
r = self.client.post(u, f)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
v = entitydata_edit_url("edit", "testcoll", "_field", "Entity_comment", view_id="Field_view")
w = entitydata_edit_url(
"edit", "testcoll", "_view", entity_id="editview",
view_id="View_view"
)
c = continuation_url_param(w)
self.assertIn(v, r['location'])
self.assertIn(c, r['location'])
self._check_record_view_values("editview", update="Updated RecordView")
return
# End.
|
StarcoderdataPython
|
1835419
|
import torch
import torch.nn as nn
from torch.nn import init
import math
import numpy as np
'''
try:
from networks.resample2d_package.resample2d import Resample2d
from networks.channelnorm_package.channelnorm import ChannelNorm
from networks import FlowNetC
from networks import FlowNetS
from networks import FlowNetSD
from networks import FlowNetFusion
from networks.submodules import *
except:
from .networks.resample2d_package.resample2d import Resample2d
from .networks.channelnorm_package.channelnorm import ChannelNorm
from .networks import FlowNetC
from .networks import FlowNetS
from .networks import FlowNetSD
from .networks import FlowNetFusion
from .networks.submodules import *
'Parameter count = 162,518,834'
'''
from networks import FlowNetS
from networks.submodules import *
import pdb
class FlowNet2S(FlowNetS.FlowNetS):
def __init__(self, args, batchNorm=False, div_flow=20):
super(FlowNet2S,self).__init__(args, input_channels = 6, batchNorm=batchNorm)
self.rgb_max = args.rgb_max
self.div_flow = div_flow
def forward(self, inputs):
#pdb.set_trace()
rgb_mean = inputs.contiguous().view(inputs.size()[:2]+(-1,)).mean(dim=-1).view(inputs.size()[:2] + (1,1,1,))
x = (inputs - rgb_mean) / self.rgb_max
x = torch.cat( (x[:,:,0,:,:], x[:,:,1,:,:]), dim = 1)
out_conv1 = self.conv1(x)
out_conv2 = self.conv2(out_conv1)
out_conv3 = self.conv3_1(self.conv3(out_conv2))
out_conv4 = self.conv4_1(self.conv4(out_conv3))
out_conv5 = self.conv5_1(self.conv5(out_conv4))
out_conv6 = self.conv6_1(self.conv6(out_conv5))
flow6 = self.predict_flow6(out_conv6)
flow6_up = self.upsampled_flow6_to_5(flow6)
out_deconv5 = self.deconv5(out_conv6)
concat5 = torch.cat((out_conv5,out_deconv5,flow6_up),1)
flow5 = self.predict_flow5(concat5)
flow5_up = self.upsampled_flow5_to_4(flow5)
out_deconv4 = self.deconv4(concat5)
concat4 = torch.cat((out_conv4,out_deconv4,flow5_up),1)
flow4 = self.predict_flow4(concat4)
flow4_up = self.upsampled_flow4_to_3(flow4)
out_deconv3 = self.deconv3(concat4)
concat3 = torch.cat((out_conv3,out_deconv3,flow4_up),1)
flow3 = self.predict_flow3(concat3)
flow3_up = self.upsampled_flow3_to_2(flow3)
out_deconv2 = self.deconv2(concat3)
concat2 = torch.cat((out_conv2,out_deconv2,flow3_up),1)
flow2 = self.predict_flow2(concat2)
if self.training:
return flow2,flow3,flow4,flow5,flow6
else:
return self.upsample1(flow2*self.div_flow)
|
StarcoderdataPython
|
8110546
|
<filename>setup.py
import os,sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to pytest")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args="./tests/test_pushysdk.py"
def run_tests(self):
import shlex
import pytest
errno=pytest.main(shlex.split(self.pytest_args))
sys.exit(errno)
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
def makeLongDescription():
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, 'README.md'), encoding='utf-8') as f:
return f.read()
setup(
name="PushySDK",
version="0.1.6",
author="<NAME>",
author_email="<EMAIL>",
description="A very simple Python client for the Pushy notification service API.",
license="MIT",
keywords="Pushy Notification API",
url="https://github.com/jazzycamel/pushy",
packages=find_packages(exclude=['docs','tests']),
install_requires=['requests','six'],
tests_require=['pytest','pytest-cov'],
cmdclass={'test': PyTest},
long_description=makeLongDescription(),
long_description_content_type='text/markdown',
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"License :: OSI Approved :: MIT License",
]
)
|
StarcoderdataPython
|
202544
|
<filename>certbot-dns-digitalocean/certbot_dns_digitalocean/_internal/dns_digitalocean.py
"""DNS Authenticator for DigitalOcean."""
import logging
import digitalocean
import zope.interface
from certbot import errors
from certbot import interfaces
from certbot.plugins import dns_common
logger = logging.getLogger(__name__)
@zope.interface.implementer(interfaces.IAuthenticator)
@zope.interface.provider(interfaces.IPluginFactory)
class Authenticator(dns_common.DNSAuthenticator):
"""DNS Authenticator for DigitalOcean
This Authenticator uses the DigitalOcean API to fulfill a dns-01 challenge.
"""
description = 'Obtain certificates using a DNS TXT record (if you are ' + \
'using DigitalOcean for DNS).'
def __init__(self, *args, **kwargs):
super(Authenticator, self).__init__(*args, **kwargs)
self.credentials = None
@classmethod
def add_parser_arguments(cls, add): # pylint: disable=arguments-differ
super(Authenticator, cls).add_parser_arguments(add)
add('credentials', help='DigitalOcean credentials INI file.')
def more_info(self): # pylint: disable=missing-function-docstring
return 'This plugin configures a DNS TXT record to respond to a dns-01 challenge using ' + \
'the DigitalOcean API.'
def _setup_credentials(self):
self.credentials = self._configure_credentials(
'credentials',
'DigitalOcean credentials INI file',
{
'token': 'API token for DigitalOcean account'
}
)
def _perform(self, domain, validation_name, validation):
self._get_digitalocean_client().add_txt_record(domain, validation_name, validation)
def _cleanup(self, domain, validation_name, validation):
self._get_digitalocean_client().del_txt_record(domain, validation_name, validation)
def _get_digitalocean_client(self):
return _DigitalOceanClient(self.credentials.conf('token'))
class _DigitalOceanClient(object):
"""
Encapsulates all communication with the DigitalOcean API.
"""
def __init__(self, token):
self.manager = digitalocean.Manager(token=token)
def add_txt_record(self, domain_name, record_name, record_content):
"""
Add a TXT record using the supplied information.
:param str domain_name: The domain to use to associate the record with.
:param str record_name: The record name (typically beginning with '_acme-challenge.').
:param str record_content: The record content (typically the challenge validation).
:raises certbot.errors.PluginError: if an error occurs communicating with the DigitalOcean
API
"""
try:
domain = self._find_domain(domain_name)
except digitalocean.Error as e:
hint = None
if str(e).startswith("Unable to authenticate"):
hint = 'Did you provide a valid API token?'
logger.debug('Error finding domain using the DigitalOcean API: %s', e)
raise errors.PluginError('Error finding domain using the DigitalOcean API: {0}{1}'
.format(e, ' ({0})'.format(hint) if hint else ''))
try:
result = domain.create_new_domain_record(
type='TXT',
name=self._compute_record_name(domain, record_name),
data=record_content)
record_id = result['domain_record']['id']
logger.debug('Successfully added TXT record with id: %d', record_id)
except digitalocean.Error as e:
logger.debug('Error adding TXT record using the DigitalOcean API: %s', e)
raise errors.PluginError('Error adding TXT record using the DigitalOcean API: {0}'
.format(e))
def del_txt_record(self, domain_name, record_name, record_content):
"""
Delete a TXT record using the supplied information.
Note that both the record's name and content are used to ensure that similar records
created concurrently (e.g., due to concurrent invocations of this plugin) are not deleted.
Failures are logged, but not raised.
:param str domain_name: The domain to use to associate the record with.
:param str record_name: The record name (typically beginning with '_acme-challenge.').
:param str record_content: The record content (typically the challenge validation).
"""
try:
domain = self._find_domain(domain_name)
except digitalocean.Error as e:
logger.debug('Error finding domain using the DigitalOcean API: %s', e)
return
try:
domain_records = domain.get_records()
matching_records = [record for record in domain_records
if record.type == 'TXT'
and record.name == self._compute_record_name(domain, record_name)
and record.data == record_content]
except digitalocean.Error as e:
logger.debug('Error getting DNS records using the DigitalOcean API: %s', e)
return
for record in matching_records:
try:
logger.debug('Removing TXT record with id: %s', record.id)
record.destroy()
except digitalocean.Error as e:
logger.warning('Error deleting TXT record %s using the DigitalOcean API: %s',
record.id, e)
def _find_domain(self, domain_name):
"""
Find the domain object for a given domain name.
:param str domain_name: The domain name for which to find the corresponding Domain.
:returns: The Domain, if found.
:rtype: `~digitalocean.Domain`
:raises certbot.errors.PluginError: if no matching Domain is found.
"""
domain_name_guesses = dns_common.base_domain_name_guesses(domain_name)
domains = self.manager.get_all_domains()
for guess in domain_name_guesses:
matches = [domain for domain in domains if domain.name == guess]
if matches:
domain = matches[0]
logger.debug('Found base domain for %s using name %s', domain_name, guess)
return domain
raise errors.PluginError('Unable to determine base domain for {0} using names: {1}.'
.format(domain_name, domain_name_guesses))
@staticmethod
def _compute_record_name(domain, full_record_name):
# The domain, from DigitalOcean's point of view, is automatically appended.
return full_record_name.rpartition("." + domain.name)[0]
|
StarcoderdataPython
|
1800852
|
<filename>qiskit_nature/drivers/second_quantization/pyquanted/pyquantedriver.py
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" PyQuante Driver """
import importlib
import logging
from enum import Enum
from typing import Union, List, Optional
from qiskit.utils.validation import validate_min
from qiskit_nature import QiskitNatureError
from ..qmolecule import QMolecule
from .integrals import compute_integrals
from ..fermionic_driver import FermionicDriver, HFMethodType
from ...molecule import Molecule
from ...units_type import UnitsType
logger = logging.getLogger(__name__)
class BasisType(Enum):
"""Basis Type"""
BSTO3G = "sto3g"
B631G = "6-31g"
B631GSS = "6-31g**"
class PyQuanteDriver(FermionicDriver):
"""
Qiskit Nature driver using the PyQuante2 library.
See https://github.com/rpmuller/pyquante2
"""
def __init__(
self,
atoms: Union[str, List[str]] = "H 0.0 0.0 0.0; H 0.0 0.0 0.735",
units: UnitsType = UnitsType.ANGSTROM,
charge: int = 0,
multiplicity: int = 1,
basis: BasisType = BasisType.BSTO3G,
hf_method: HFMethodType = HFMethodType.RHF,
tol: float = 1e-8,
maxiters: int = 100,
molecule: Optional[Molecule] = None,
) -> None:
"""
Args:
atoms: Atoms list or string separated by semicolons or line breaks. Each element in the
list is an atom followed by position e.g. `H 0.0 0.0 0.5`. The preceding example
shows the `XYZ` format for position but `Z-Matrix` format is supported too here.
units: Angstrom or Bohr.
charge: Charge on the molecule.
multiplicity: Spin multiplicity (2S+1)
basis: Basis set; sto3g, 6-31g or 6-31g**
hf_method: Hartree-Fock Method type.
tol: Convergence tolerance see pyquante2.scf hamiltonians and iterators
maxiters: Convergence max iterations see pyquante2.scf hamiltonians and iterators,
has a min. value of 1.
molecule: A driver independent Molecule definition instance may be provided. When a
molecule is supplied the ``atoms``, ``units``, ``charge`` and ``multiplicity``
parameters are all ignored as the Molecule instance now defines these instead. The
Molecule object is read when the driver is run and converted to the driver dependent
configuration for the computation. This allows, for example, the Molecule geometry
to be updated to compute different points.
Raises:
QiskitNatureError: Invalid Input
"""
validate_min("maxiters", maxiters, 1)
self._check_valid()
if not isinstance(atoms, str) and not isinstance(atoms, list):
raise QiskitNatureError("Invalid atom input for PYQUANTE Driver '{}'".format(atoms))
if isinstance(atoms, list):
atoms = ";".join(atoms)
elif isinstance(atoms, str):
atoms = atoms.replace("\n", ";")
super().__init__(
molecule=molecule,
basis=basis.value,
hf_method=hf_method.value,
supports_molecule=True,
)
self._atoms = atoms
self._units = units.value
self._charge = charge
self._multiplicity = multiplicity
self._tol = tol
self._maxiters = maxiters
@staticmethod
def _check_valid():
err_msg = "PyQuante2 is not installed. See https://github.com/rpmuller/pyquante2"
try:
spec = importlib.util.find_spec("pyquante2")
if spec is not None:
return
except Exception as ex: # pylint: disable=broad-except
logger.debug("PyQuante2 check error %s", str(ex))
raise QiskitNatureError(err_msg) from ex
raise QiskitNatureError(err_msg)
def run(self) -> QMolecule:
if self.molecule is not None:
atoms = ";".join(
[name + " " + " ".join(map(str, coord)) for (name, coord) in self.molecule.geometry]
)
charge = self.molecule.charge
multiplicity = self.molecule.multiplicity
units = self.molecule.units.value
else:
atoms = self._atoms
charge = self._charge
multiplicity = self._multiplicity
units = self._units
basis = self.basis
hf_method = self.hf_method
q_mol = compute_integrals(
atoms=atoms,
units=units,
charge=charge,
multiplicity=multiplicity,
basis=basis,
hf_method=hf_method,
tol=self._tol,
maxiters=self._maxiters,
)
q_mol.origin_driver_name = "PYQUANTE"
cfg = [
"atoms={}".format(atoms),
"units={}".format(units),
"charge={}".format(charge),
"multiplicity={}".format(multiplicity),
"basis={}".format(basis),
"hf_method={}".format(hf_method),
"tol={}".format(self._tol),
"maxiters={}".format(self._maxiters),
"",
]
q_mol.origin_driver_config = "\n".join(cfg)
return q_mol
|
StarcoderdataPython
|
3317930
|
<gh_stars>0
"""
CSC111 Final Project: Reconstructing the Ethereum Network Using
Graph Data Structures in Python
General Information
------------------------------------------------------------------------------
This file was created for the purpose of applying concepts in learned in
CSC111 to the real world problem domain of cryptocurrency transactions.
Copyright Information
------------------------------------------------------------------------------
This file is Copyright of <NAME>, <NAME>, <NAME>, and
<NAME>.
"""
import plotly.graph_objects as go
import networkx as nx
from build_graph import build_graph
def plot_graph(graph: nx.MultiDiGraph) -> None:
"""
Plot the Multiple Directed graph using the plotly library.
"""
# Choosing the spring layout to position the vertices of the graph.
pos = nx.spring_layout(graph)
# Creating the edge trace.
edge_x = []
edge_y = []
xtext = []
ytext = []
edge_values_text = []
for edge in graph.edges():
# Determine the start and end coordinates of the edge on the graph.
x0, y0 = pos[edge[0]]
x1, y1 = pos[edge[1]]
# Add all x coordinates to list of x_edge data.
edge_x.append(x0)
edge_x.append(x1)
edge_x.append(None)
# Add all y coordinates to list of y_edge data.
edge_y.append(y0)
edge_y.append(y1)
edge_y.append(None)
# Add x midpoint coordinates to list of xtext data.
xtext.append((x0 + x1) / 2)
# Add y midpoint coordinates to list of ytext data.
ytext.append((y0 + y1) / 2)
# Add transaction value to list of edge_values data.
value = graph.get_edge_data(edge[0], edge[1])[0]['weight']
edge_values_text.append(f"Transaction Value: {value}")
# Plotting the edges.
edge_trace = go.Scatter(
x=edge_x, y=edge_y,
line=dict(width=1, color='black'),
mode='lines'
)
# Plotting the edge transaction text.
edge_values_trace = go.Scatter(x=xtext, y=ytext, mode='none',
text=edge_values_text,
textposition='top center',
hovertemplate='%{text}<extra></extra>'
)
# Creating the node trace.
node_x = []
node_y = []
node_size = []
for node in graph.nodes():
# Determine the coordinates of each node (using the spring layout defined earlier)
x, y = pos[node]
node_x.append(x)
node_y.append(y)
size = 10
if graph.nodes[node] != {}:
size = graph.nodes[node]['size']
node_size.append(size)
node_trace = go.Scatter(
x=node_x, y=node_y,
mode='markers',
hoverinfo='text',
marker=dict(
showscale=True,
colorscale='Hot',
color=[],
size=node_size,
colorbar=dict(
thickness=10,
title='# of Transactions (degree)',
xanchor='left',
titleside='right'
),
line_width=2
)
)
# Setting the text of each node to its address.
node_text = []
for node in graph.nodes():
node_desc = f"Address: {node}"
# If the account doesn't have an empty representation
# in the graph, get its balance.
if graph.nodes[node] != {}:
balance = graph.nodes[node]['balance']
node_desc = f"Address: {node}\nBalance: {balance}"
# Add the description of the node to the list (which
# will get added to the trace, updating it).
node_text.append(node_desc)
# Update the text and size attributes of the node trace.
node_trace.text = node_text
node_neighbours = []
for node in graph.adjacency():
# To find the neighbours of this node (accounts who either
# sent or received transactions from this current account)
# we must access the second item of a tuple, which contains
# a dictionary representation of its neighbours (addresses
# mapped to
neighbours = len(node[1])
node_neighbours.append(neighbours)
node_trace.marker.color = node_neighbours
# Setting up the layout here.
layout = go.Layout(
title='Ethereum Transaction Graph',
showlegend=False,
hovermode='closest',
xaxis=dict(showgrid=False, zeroline=False),
yaxis=dict(showgrid=False, zeroline=False),
margin=dict(b=20, l=15, r=15, t=50), # Setting up the margins around the graph
)
# Plot the graph figure.
fig = go.Figure(
data=[edge_trace, node_trace, edge_values_trace],
layout=layout
)
# update layout
fig.update_layout(
title_font_size=15
)
fig.show()
|
StarcoderdataPython
|
37840
|
# -*- coding: utf-8 -*-
from coralquant.models.odl_model import BS_Stock_Basic, BS_SZ50_Stocks, TS_Stock_Basic, TS_TradeCal
from coralquant.spider.bs_stock_basic import get_stock_basic
from coralquant import logger
from datetime import date, datetime, timedelta
from sqlalchemy import MetaData
from coralquant.database import session_scope
from coralquant.settings import CQ_Config
from coralquant.models.orm_model import TaskTable
from coralquant.stringhelper import TaskEnum
from sqlalchemy import func, distinct
_logger = logger.Logger(__name__).get_log()
meta = MetaData()
def create_task(
task: TaskEnum,
begin_date: date,
end_date: date,
codes: list = [],
type: str = None,
status: str = None,
market: str = None,
isdel=False,
):
"""创建任务
:param task: 任务类型
:type task: TaskEnum
:param begin_date: 如果开始时间(begin_date)为None,开始时间取股票上市(IPO)时间
:type begin_date: date
:param end_date: 结束时间
:type end_date: date
:param codes: 股票代码列表, defaults to []
:type codes: list, optional
:param type: 证券类型,其中1:股票,2:指数,3:其它, defaults to None
:type type: str, optional
:param status: 上市状态,其中1:上市,0:退市, defaults to None
:type status: str, optional
:param market: 市场类型 (主板/中小板/创业板/科创板/CDR), defaults to None
:type market: str, optional
:param isdel: 是否删除删除原有的相同任务的历史任务列表, defaults to False
:type isdel: bool, optional
"""
with session_scope() as sm:
if not codes:
query = sm.query(BS_Stock_Basic.code, BS_Stock_Basic.ipoDate)
if market:
query = query.join(TS_Stock_Basic, BS_Stock_Basic.code == TS_Stock_Basic.bs_code).filter(
TS_Stock_Basic.market == market
)
if CQ_Config.IDB_DEBUG == "1": # 如果是测试环境
query = query.join(BS_SZ50_Stocks, BS_Stock_Basic.code == BS_SZ50_Stocks.code)
if status:
query = query.filter(BS_Stock_Basic.status == status)
if type:
query = query.filter(BS_Stock_Basic.type == type)
codes = query.all()
if isdel:
# 删除原有的相同任务的历史任务列表
query = sm.query(TaskTable).filter(TaskTable.task == task.value)
query.delete()
sm.commit()
_logger.info("任务:{}-历史任务已删除".format(task.name))
tasklist = []
for c in codes:
tasktable = TaskTable(
task=task.value,
task_name=task.name,
ts_code=c.code,
begin_date=begin_date if begin_date is not None else c.ipoDate,
end_date=end_date,
)
tasklist.append(tasktable)
sm.bulk_save_objects(tasklist)
_logger.info("生成{}条任务记录".format(len(codes)))
def create_bs_task(task: TaskEnum, tmpcodes=None):
"""
创建BS任务列表
"""
# 删除原有的相同任务的历史任务列表
TaskTable.del_with_task(task)
with session_scope() as sm:
query = sm.query(BS_Stock_Basic.code, BS_Stock_Basic.ipoDate, BS_Stock_Basic.outDate, BS_Stock_Basic.ts_code)
if CQ_Config.IDB_DEBUG == "1": # 如果是测试环境
if tmpcodes:
query = query.filter(BS_Stock_Basic.code.in_(tmpcodes))
else:
query = query.join(BS_SZ50_Stocks, BS_Stock_Basic.code == BS_SZ50_Stocks.code)
# query = query.filter(BS_Stock_Basic.status == True) #取上市的
codes = query.all()
tasklist = []
for c in codes:
tasktable = TaskTable(
task=task.value,
task_name=task.name,
ts_code=c.ts_code,
bs_code=c.code,
begin_date=c.ipoDate,
end_date=c.outDate if c.outDate is not None else datetime.now().date(),
)
tasklist.append(tasktable)
sm.bulk_save_objects(tasklist)
_logger.info("生成{}条任务记录".format(len(codes)))
def create_ts_task(task: TaskEnum):
"""
创建TS任务列表
"""
# 删除原有的相同任务的历史任务列表
TaskTable.del_with_task(task)
with session_scope() as sm:
codes = (
sm.query(
TS_Stock_Basic.ts_code, TS_Stock_Basic.bs_code, TS_Stock_Basic.list_date, TS_Stock_Basic.delist_date
)
.filter(TS_Stock_Basic.list_status == "L")
.all()
)
tasklist = []
for c in codes:
tasktable = TaskTable(
task=task.value,
task_name=task.name,
ts_code=c.ts_code,
bs_code=c.bs_code,
begin_date=c.list_date,
end_date=c.delist_date if c.delist_date is not None else datetime.now().date(),
)
tasklist.append(tasktable)
sm.bulk_save_objects(tasklist)
_logger.info("生成{}条任务记录".format(len(codes)))
def create_ts_cal_task(task: TaskEnum):
"""
创建基于交易日历的任务列表
"""
# 删除历史任务
TaskTable.del_with_task(task)
with session_scope() as sm:
rp = sm.query(distinct(TS_TradeCal.date).label("t_date")).filter(
TS_TradeCal.is_open == True, TS_TradeCal.date <= datetime.now().date() # noqa
)
codes = rp.all()
tasklist = []
for c in codes:
tasktable = TaskTable(
task=task.value,
task_name=task.name,
ts_code="按日期更新",
bs_code="按日期更新",
begin_date=c.t_date,
end_date=c.t_date,
)
tasklist.append(tasktable)
sm.bulk_save_objects(tasklist)
_logger.info("生成{}条任务记录".format(len(codes)))
if __name__ == "__main__":
pass
|
StarcoderdataPython
|
6568453
|
'''
@Author: hua
@Date: 2019-12-12 14:03:17
@description: https://www.cnblogs.com/luxiaojun/p/6567132.html
@LastEditors: hua
@LastEditTime: 2019-12-13 13:24:09
'''
from app import sched, delayQueue, socketio
import time
from app.Vendor.Utils import Utils
#循环执行
@sched.scheduled_job('interval', seconds=5)
def interval_job():
#获取任务
""" d_list = delayQueue.consumer()
for item in d_list:
if item["action"] == 'invite':
socketio.emit('beg', Utils.formatBody(item), namespace='/api', room='@broadcast.'+str(item["id"])) """
|
StarcoderdataPython
|
3435912
|
<reponame>chandru99/nilmtk
from nilmtk import *
ds = DataSet("/Users/nipunbatra/Downloads/nilm_gjw_data.hdf5")
elec = ds.buildings[1].elec
elec.plot()
|
StarcoderdataPython
|
3243919
|
<filename>findash/polls/urls.py
from django.urls import path
from . import views
# namespace will differentiate app urls from other app urls with the same name.
app_name= 'polls'
# paths within the app
urlpatterns = [
# /polls
path('',views.IndexView.as_view(), name='index'),
# /polls/5
path('<int:pk>/',views.DetailView.as_view(), name='detail'),
# /polls/5/results
path('<int:pk>/results',views.ResultsView.as_view(), name='results'),
# /polls/5/vote
path('<int:question_id>/vote',views.vote, name='vote'),
]
|
StarcoderdataPython
|
6476439
|
from .maze_craze.maze_craze import env, parallel_env, raw_env # noqa: F401
|
StarcoderdataPython
|
11216118
|
<reponame>romgar/django-biolabs
from biolabs.core import models as core_models
from rest_framework import serializers
class LaboratorySerializer(serializers.HyperlinkedModelSerializer):
description = serializers.CharField(required=False, max_length=255)
adress = serializers.CharField(required=False, max_length=255)
latitude = serializers.DecimalField(required=False, max_digits=23, decimal_places=20)
longitude = serializers.DecimalField(required=False, max_digits=23, decimal_places=20)
country = serializers.CharField(required=False, max_length=255)
class Meta:
model = core_models.Laboratory
fields = ('name', 'description', 'url', 'adress', 'country',
'latitude', 'longitude')
|
StarcoderdataPython
|
6519486
|
<filename>extras/video_processor.py
"""
A script for processing video files into entries so that Lauhdutin can be used to manage a library of videos.
Requires ffmpeg (for ffmpeg.exe and ffprobe.exe) and ImageMagick (for convert.exe).
The structure of the config file, which should be in the working directory of the script and called "video_processor_config.json":
{
"ffmpeg": Absolute path to ffmpeg.exe.
"ffprobe": Absolute path to ffprobe.exe.
"convert": Absolute path to convert.exe.
"mkv_output": Absolute path to the output folder where MKV files are placed.
"video_inputs": A list of absolute paths to the folders where videos are stored.
"output": The absolute path to the @Resources folder of the skin.
"player_process": The name of your media player's process.
"subfolders_as_categories": Whether or not the first level of subfolders should be treated as categories.
"banner_width": The width of the banner in pixels.
"banner_height": The height of the banner in pixels.
"mosaic_tiles_wide": The number of frames widthwise in the mosaic thumbnail.
"mosaic_tiles_high": The number of frames heightwise in the mosaic thumbnail.
"tags": {
"<tag to assign to a video>": [
"<substring to look for in the name of a video>"
]
}
}
"""
import json
import math
import os
import subprocess
import sys
import ctypes
import time
CATEGORY_TITLE_SEPARATOR = " - "
TAG_SOURCES_SKIN = 1
def create_global_variables():
res = {}
path = os.path.join(os.getcwd(), "video_processor_config.json")
with open(path, "r") as file:
res = json.load(file)
assert os.path.isfile(res.get("ffmpeg", "")), "Path to ffmpeg executable is not valid!"
assert os.path.isfile(res.get("ffprobe", "")), "Path to ffprobe executable is not valid!"
assert os.path.isfile(res.get("convert", "")), "Path to ImageMagick's convert executable is not valid!"
assert os.path.isdir(res.get("mkv_output", "")), "Path to the output folder for MKVs is not valid!"
res["video_inputs"] = res.get("video_inputs", [])
assert len(res["video_inputs"]) > 0, "No paths to folders containing videos have been defined!"
for library in res["video_inputs"]:
assert os.path.isdir(library), "\"%s\" is not a valid path!" % library
res["database_output"] = os.path.join(res.get("output", ""), "games.json")
assert os.path.isfile(res["database_output"]), "Path to the database file is not valid!"
res["thumbnails_output"] = os.path.join(res.get("output", ""), "cache", "custom")
assert os.path.isdir(res["thumbnails_output"]), "Path to the thumbnails folder is not valid!"
res["player_process"] = res.get("player_process", None)
res["subfolders_as_categories"] = res.get("subfolders_as_categories", False)
assert res.get("banner_width", 0) > 0, "Banner width is invalid."
assert res.get("banner_height", 0) > 0, "Banner height is invalid."
assert res.get("mosaic_tiles_wide", 2) > 0, "The number of tiles widthwise in the mosaic thumbnail is invalid."
assert res.get("mosaic_tiles_high", 2) > 0, "The number of tiles heightwise in the mosaic thumbnail is invalid."
res["tags"] = res.get("tags", {})
return res
def set_title(title):
ctypes.windll.kernel32.SetConsoleTitleW(title)
# Option 1 - Drag-and-drop video files onto the script and choose this option to create copies of the videos in Matroska video containers (.mkv).
def mux_to_mkv(video):
current_folder, name = os.path.split(video)
name = name[:name.find(".")]
mkv = os.path.join(CONFIG["mkv_output"], "%s.mkv" % name)
ffmpeg = subprocess.Popen([CONFIG["ffmpeg"], "-i", video, "-f", "matroska", "-vcodec", "copy", "-acodec", "copy", mkv])
ffmpeg.wait()
# Option 2 - Update an existing or create a new database based on the videos found in the path that you defined in your config file.
def load_database():
db = {}
if os.path.isfile(CONFIG["database_output"]):
with open(CONFIG["database_output"], "r") as file:
db = json.load(file)
return db.get("games", []), db.get("tagsDictionary", {})
def get_duration(video):
args = [CONFIG["ffprobe"], "-i", "%s" % video, "-show_entries", "format=duration", "-loglevel", "quiet"]
ffprobe = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
outs, errs = ffprobe.communicate(timeout=600)
duration_line = outs.decode("utf-8").split("\n")[1]
return float(duration_line[9:])
def classify_duration(duration):
interval = 10.0
result = duration / 60.0 / interval
lower = math.floor(result) * interval
upper = math.ceil(result) * interval
return "%d-%d min" % (lower, upper)
def getTagKey(allTags, tag):
# Look for existing entry.
for key, value in allTags.items():
if value == tag:
return key
# Get next available key and add a new entry.
i = 1
key = "\"%s\"" % i
while allTags.get(key) != None:
i += 1
allTags[key] = tag
return key
def generate_tags(title, lookup, currentTags, allTags, newTags = []):
sep_index = title.find(CATEGORY_TITLE_SEPARATOR)
if sep_index >= 0:
title = title[sep_index + len(CATEGORY_TITLE_SEPARATOR):]
for tag, substrings in lookup.items():
key = getTagKey(allTags, tag)
if currentTags.get(key) != None:
continue
for substring in substrings:
if substring in title:
currentTags[key] = TAG_SOURCES_SKIN
break
for tag in newTags:
key = getTagKey(allTags, tag)
if currentTags.get(key) != None:
continue
currentTags[key] = TAG_SOURCES_SKIN
return currentTags
valid_extensions = [
".mkv",
".mp4",
".mov",
".flv",
".avi"
]
def valid_extension(file):
for ext in valid_extensions:
if file.endswith(ext):
return True
return False
def update_database(db = [], allTags = {}):
platform_id = 5
banner_path = "cache\\custom"
updated_videos = 0
# Update old entries here
i = 0
num_videos = len(db)
tagLookup = CONFIG["tags"]
for video in db:
i += 1
set_title("%d/%d - Updating existing entries" % (i, num_videos))
if video["plID"] != platform_id:
continue
updating = False
removing = False
path = video["pa"][1:-1]
if not os.path.isfile(path):
removing = True
os.remove(os.path.join(CONFIG["thumbnails_output"], "%s.jpg" % video["ti"]))
db.remove(video)
if removing:
print("Removing video: %s" % video.get("ti", ""))
updated_videos += 1
else:
if video.get("plOv", None) == None:
updating = True
relpath = None
for library in CONFIG["video_inputs"]:
temp = os.path.relpath(video["pa"][1:-1], library)
if relpath == None or len(temp) < len(relpath):
relpath = temp
assert relpath != None
category, file = os.path.split(relpath)
video["plOv"] = category
old_tags = video.get("ta", {}).copy()
new_tags = generate_tags(video["ti"], tagLookup, video.get("ta", {}), allTags)
if len(new_tags) != len(old_tags) or len(set(new_tags) & set(old_tags)) != len(new_tags):
updating = True
video["ta"] = new_tags
if updating:
print("Updating video: %s" % video.get("ti", ""))
updated_videos += 1
# Add new entries
old_videos = [video["pa"] for video in db if video.get("pa", None) != None]
new_videos = []
categories = []
for library in CONFIG["video_inputs"]:
if CONFIG["subfolders_as_categories"]:
for root, dirs, files in os.walk(library):
categories = dirs
break
for category in categories:
for root, dirs, files in os.walk(os.path.join(library, category)):
for file in files:
if not valid_extension(file):
continue
path = "\"%s\"" % os.path.join(root, file)
if path in old_videos:
old_videos.remove(path)
continue
new_videos.append({
"category": category,
"root": root,
"file": file,
"pa": path
})
else:
for root, dirs, files in os.walk(library):
for file in files:
if not valid_extension(file):
continue
path = "\"%s\"" % os.path.join(root, file)
if path in old_videos:
old_videos.remove(path)
continue
new_videos.append({
"root": root,
"file": file,
"pa": path
})
new_entries = []
i = 0
num_videos = len(new_videos)
for video in new_videos:
i += 1
set_title("%d/%d - Adding new entries" % (i, num_videos))
category = video.get("category", None)
root = video["root"]
file = video["file"]
path = video["pa"]
if category != None:
print("Adding file: %s - %s" % (category, file))
title = "%s%s%s" % (category, CATEGORY_TITLE_SEPARATOR, file[:file.rfind(".")])
duration = get_duration(os.path.join(root, file))
new_entries.append({
"pa": path,
"prOv": CONFIG["player_process"],
"ti": title,
"exBa": title,
"plID": platform_id,
"laPl": 0,
"hoPl": (duration / 3600.0),
"un": False,
"no": "%d minutes" % round(duration / 60.0),
"ta": generate_tags(title, tagLookup, {}, allTags, [classify_duration(duration), category]),
"plOv": category
})
else:
print("Adding file: %s" % file)
title = file[:file.rfind(".")]
duration = get_duration(os.path.join(root, file))
new_entries.append({
"pa": path,
"prOv": CONFIG["player_process"],
"ti": title,
"exBa": title,
"plID": platform_id,
"laPl": 0,
"hoPl": (duration / 3600.0),
"un": False,
"no": "%d minutes" % round(duration / 60.0),
"ta": generate_tags(title, tagLookup, {}, allTags, [classify_duration(duration)]),
})
db.extend(new_entries)
return updated_videos, len(new_entries)
def save_database(db, allTags):
with open(CONFIG["database_output"], "w") as file:
json.dump({"version": 2, "games": db, "tagsDictionary": allTags}, file)
# Option 5 - Resize existing thumbnails.
def resize_thumbnail(path):
width = CONFIG["banner_width"]
height = CONFIG["banner_height"]
resize_pattern = "%d^^x%d" % (width, height) # W^^xH or WxH^^
convert = subprocess.Popen([CONFIG["convert"], path, "-resize", resize_pattern, "-gravity", "center", "-extent", "%dx%d" % (width, height), "-quality", "90", path])
convert.wait()
# Option 3 - Generate thumbnails for the videos in your database.
def generate_thumbnail(args):
video_path = args["pa"]
timestamp = args["timestamp"]
thumbnail = os.path.join(CONFIG["thumbnails_output"], "%s.jpg" % args["ti"])
ffmpeg = subprocess.Popen([CONFIG["ffmpeg"], "-i", video_path, "-ss", "00:%s.000" % timestamp, "-vframes", "1", thumbnail])
ffmpeg.wait()
resize_thumbnail(thumbnail)
# Option 4
def generate_mosaic_thumbnail(args):
video_path = args["pa"]
duration = get_duration(video_path)
thumbnail = os.path.join(CONFIG["thumbnails_output"], "%s.jpg" % args["ti"])
tiles_wide = CONFIG["mosaic_tiles_wide"]
tiles_high = CONFIG["mosaic_tiles_high"]
num_frames = float(tiles_wide * tiles_high)
ffmpeg = subprocess.Popen([CONFIG["ffmpeg"], "-i", video_path, "-frames", "1", "-vf", "select=if(isnan(prev_selected_t)\\,gte(t\\,10)\\,gte(t-prev_selected_t\\,%d)),tile=%dx%d" % (int(duration / num_frames), tiles_wide, tiles_high), thumbnail])
ffmpeg.wait()
resize_thumbnail(thumbnail)
# Program
if __name__ == "__main__":
try:
global CONFIG
CONFIG = create_global_variables()
choice = None
options = [
"Mux to MKV",
"Update database",
"Generate thumbnail",
"Generate mosaic thumbnail",
"Resize thumbnails",
"Exit"
]
while choice != len(options):
print("")
i = 1
for option in options:
print("%d: %s" % (i, option))
i += 1
choice = input("\nSelect the action to perform: ")
if choice.strip() == "":
choice = "0"
choice = int(choice)
if choice == 1:
videos = sys.argv[1:]
if len(videos) > 0:
videos.sort()
i = 0
num_videos = len(videos)
for video in videos:
i += 1
set_title("%d/%d - Muxing to MKV" % (i, num_videos))
mux_to_mkv(video)
print("\nMuxed to MKV:")
for video in videos:
print(" %s" % video)
else:
print("\nNo videos to mux to MKV...")
elif choice == 2:
print("")
db, allTags = load_database()
updated, added = update_database(db, allTags)
print("\nUpdated %d videos..." % updated)
print("Added %d videos..." % added)
print("%d videos in total..." % len(db))
if updated > 0 or added > 0:
save_database(db, allTags)
elif choice == 3:
timestamp = input("\nTimestamp (mm:ss): ")
if timestamp.strip() == "":
timestamp = "00:20"
db, allTags = load_database()
videos = []
for video in db:
path = os.path.join(CONFIG["thumbnails_output"], "%s.jpg" % video["ti"])
if os.path.isfile(path):
continue
videos.append({
"pa": video["pa"][1:-1],
"timestamp": timestamp,
"ti": video["ti"]
})
if len(videos) > 0:
videos = sorted(videos, key=lambda k: k["ti"])
i = 0
num_videos = len(videos)
total_time = 0
for video in videos:
i += 1
start_time = time.time()
estimation = 0
if i > 1:
estimation = total_time / (i - 1) * (num_videos - i)
set_title("%d/%d - Generating thumbnail (~%d seconds remaining)" % (i, num_videos, estimation))
generate_thumbnail(video)
total_time += time.time() - start_time
print("\nGenerated thumbnail for:")
for video in videos:
print(" %s" % video["ti"])
else:
print("\nNo videos to generate thumbnails for...")
elif choice == 4:
db, allTags = load_database()
videos = []
for video in db:
path = os.path.join(CONFIG["thumbnails_output"], "%s.jpg" % video["ti"])
if os.path.isfile(path):
continue
videos.append({
"pa": video["pa"][1:-1],
"ti": video["ti"]
})
if len(videos) > 0:
videos = sorted(videos, key=lambda k: k["ti"])
i = 0
num_videos = len(videos)
total_time = 0
for video in videos:
i += 1
start_time = time.time()
estimation = 0
if i > 1:
estimation = total_time / (i - 1) * (num_videos - i)
set_title("%d/%d - Generating mosaic thumbnail (~%d seconds remaining)" % (i, num_videos, estimation))
generate_mosaic_thumbnail(video)
total_time += time.time() - start_time
print("\nGenerated mosaic thumbnail for:")
for video in videos:
print(" %s" % video["ti"])
else:
print("\nNo videos to generate mosaic thumbnails for...")
elif choice == 5:
db, allTags = load_database()
thumbnails = [os.path.join(CONFIG["thumbnails_output"], "%s.jpg" % video["ti"]) for video in db]
if len(thumbnails) > 0:
i = 0
num_thumbnails = len(thumbnails)
for thumbnail in thumbnails:
i += 1
set_title("%d/%d - Resizing thumbnail" % (i, num_thumbnails))
resize_thumbnail(thumbnail)
print("\nResized thumbnail:")
for thumbnail in thumbnails:
print(" %s" % thumbnail)
else:
print("\nNo thumbnails to resize...")
else:
pass
set_title("Done!")
print("\a")
except:
import traceback
traceback.print_exc()
input("\nPress enter to exit...")
|
StarcoderdataPython
|
1936061
|
#!/usr/bin/env python3
# vim: nospell expandtab ts=4
# SPDX-FileCopyrightText: 2020 <NAME> <<EMAIL>>
#
# SPDX-License-Identifier: BSD-2-Clause
from __future__ import annotations
from typing import Any, Generator, List, Tuple
import json
import logging
import requests
from bs4 import BeautifulSoup, NavigableString # type: ignore
from prosegen import ProseGen
LOGGER = logging.getLogger("snerge")
def load_data() -> ProseGen:
instance = ProseGen(20)
quotes = 0
for qid, quote in load_uno_quotes():
quotes += 1
instance.add_knowledge(quote, source=f"uno line {qid}")
LOGGER.info("Added %d Uno quotes", quotes)
quotes = 0
for qid, quote in load_lrr_quotes():
quotes += 1
instance.add_knowledge(quote, source=f"lrr {qid}")
LOGGER.info("Added %d LRR quotes", quotes)
return instance
def load_uno_quotes() -> Generator[Tuple[str, str], None, None]:
LOGGER.info("Loading quotes from Uno-db")
data = requests.get(
"https://raw.githubusercontent.com/RebelliousUno/BrewCrewQuoteDB/main/quotes.txt"
)
qid = 0
for line in data.text.split("\n"):
qid += 1
line = line.strip()
if not line:
continue
line_quotes = line.split('"')[1:]
for quote, attr in zip(*[iter(line_quotes)] * 2):
if "Serge" in attr or "Snerge" in attr:
yield str(qid), str(quote)
def load_lrr_quotes() -> Generator[Tuple[str, str], None, None]:
exclude = []
with open("moderate.txt", "rt") as handle:
for line in handle:
line = line.strip()
_id, _ = line.split(" ", 1)
exclude.append(_id)
LOGGER.info("Added %d quotes to the LRR exclude list", len(exclude))
for page in range(1, 15):
yield from load_lrr_quote_page(page, exclude)
def load_lrr_quote_page(
page: int, exclude: List[str]
) -> Generator[Tuple[str, str], None, None]:
LOGGER.info("Loading LRR quote page %d", page)
html = requests.get(f"https://lrrbot.com/quotes/search?q=serge&mode=name&page={page}")
soup = BeautifulSoup(html.content, "html.parser")
quotes = soup.find("ol", class_="quotes")
if not quotes:
return
for quote in quotes.find_all("li"):
quote_id = quote.find(class_="num").text
if quote_id in exclude:
continue
quote_text = quote.find("blockquote").text
attrib = quote.find("div", class_="attrib")
attrib_text = "".join(
element for element in attrib if isinstance(element, NavigableString)
)
attrib_text = attrib_text.strip("—").strip()
if attrib_text == "Serge":
yield quote_id, quote_text
class SetEncoder(json.JSONEncoder):
def default(self, o: Any) -> Any:
if isinstance(o, set):
return list(o)
return json.JSONEncoder.default(self, o)
def main() -> None:
with open("loaded_lrr_quotes.txt", "wt") as handle:
for quote_id, quote in load_lrr_quotes():
handle.write(f"{quote_id}, {quote}\n")
dataset = load_data()
with open("parsed_state.json", "wt") as handle:
json.dump(dataset.dictionary, handle, cls=SetEncoder)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3492580
|
from .zincbase import KB
|
StarcoderdataPython
|
6589300
|
load("//antlir/bzl:oss_shim.bzl", "http_file")
# This wrapper function around `native.prebuilt_python_library`
# exists because directly using `native.prebuilt_python_library`
# in BUCK causes a build error.
def prebuilt_python_library(**kwargs):
# @lint-ignore BUCKLINT
native.prebuilt_python_library(**kwargs)
def pypi_package(
name,
url,
sha256,
deps = None):
http_file(
name = "{}-download".format(name),
sha256 = sha256,
urls = [url],
visibility = [],
)
prebuilt_python_library(
name = name,
binary_src = ":{}-download".format(name),
visibility = ["PUBLIC"],
deps = deps or [],
)
|
StarcoderdataPython
|
3465594
|
<gh_stars>0
# -*- mode: python; encoding: utf-8 -*-
#
# Copyright 2012 <NAME>, Opera Software ASA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import sys
import os
import os.path
import time
import signal
import errno
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "..")))
import configuration
from background.utils import BackgroundProcess
from mailutils import sendAdministratorMessage
def getRSS(pid):
for line in open("/proc/%d/status" % pid):
words = line.split()
if words[0] == "VmRSS:":
if words[2].lower() == "kb": unit = 1024
elif words[2].lower() == "mb": unit = 1024 ** 2
elif words[2].lower() == "gb": unit = 1024 ** 3
else: raise Exception, "unknown unit: %s" % words[2]
return int(words[1]) * unit
else: raise Exception, "invalid pid"
class Watchdog(BackgroundProcess):
def __init__(self):
super(Watchdog, self).__init__(service=configuration.services.WATCHDOG)
def run(self):
soft_restart_attempted = set()
previous = {}
while not self.terminated:
self.interrupted = False
pidfile_dir = configuration.paths.WSGI_PIDFILE_DIR
if os.path.isdir(pidfile_dir):
pids = set(map(int, os.listdir(pidfile_dir)))
else:
pids = []
for pid in pids:
try: rss = getRSS(pid)
except IOError, error:
if error.errno == errno.ENOENT:
self.warning("unlinking stale pid-file: %s" % os.path.join(pidfile_dir, str(pid)))
os.unlink(os.path.join(pidfile_dir, str(pid)))
continue
else: raise
if previous.get(pid) != rss:
self.debug("pid=%d, rss=%d bytes" % (pid, rss))
previous[pid] = rss
if rss > configuration.services.WATCHDOG["rss_hard_limit"]:
sendAdministratorMessage("watchdog", "pid(%d): hard memory limit exceeded" % pid,
("Current RSS: %d kilobytes\nSending process SIGKILL (%d).\n\n-- critic"
% (rss, signal.SIGKILL)))
self.info("Killing pid(%d): hard memory limit exceeded, RSS: %d kilobytes" % (pid, rss))
os.kill(pid, signal.SIGKILL)
elif rss > configuration.services.WATCHDOG["rss_soft_limit"] and pid not in soft_restart_attempted:
sendAdministratorMessage("watchdog", "pid(%d): soft memory limit exceeded" % process.pid,
("Current RSS: %d kilobytes\nSending process SIGINT (%d).\n\n%s"
% (rss, signal.SIGINT)))
self.info("Killing pid(%d): soft memory limit exceeded, RSS: %d kilobytes" % (pid, rss))
os.kill(pid, signal.SIGINT)
soft_restart_attempted.add(pid)
for pid in previous.keys():
if pid not in pids: del previous[pid]
soft_restart_attempted = soft_restart_attempted & pids
time.sleep(10)
watchdog = Watchdog()
watchdog.run()
|
StarcoderdataPython
|
4996571
|
# 2.5)
from SinglyLikedList import SinglyLikedList
def add_lists(l1, l2):
added = get_real_number(l1) + get_real_number(l2)
return turn_into_list(added)
def get_real_number(linked_list):
node = linked_list.head
factor = 1
result = 0
while node is not None:
element = node.element
result += element * factor
factor *= 10
node = node.next
return result
# recursive solution
# def add_list_recursive(l1, l2, carry):
# if l1 is None and l2 is None and carry == 0:
# return None
#
# result = SinglyLikedList().head
# value = carry
#
# if l1 is not None:
# value += l1.element
# if l2 is not None:
# value += l2.element
#
# result.add_element(value % 10)
#
# # recurse
#
# if l1 is not None or l2 is not None:
# more = add_list_recursive(None if l1 is None else l1.next,
# None if l2 is None else l2.next,
# 1 if value >= 10 else 0)
# result.add_element(None if more is None else more.head.element)
# return result
def turn_into_list(number):
digits = [int(i) for i in str(number)]
new_list = SinglyLikedList()
for digit in digits[::-1]:
new_list.add_element(digit)
return new_list
# test code
list1 = SinglyLikedList()
list1.add_element(2)
list1.add_element(2)
list1.add_element(0)
list2 = SinglyLikedList()
list2.add_element(1)
list2.add_element(1)
list2.add_element(0)
result = add_lists(list1, list2)
result.print_all()
|
StarcoderdataPython
|
1781300
|
<gh_stars>1000+
import scrabble
# Print all the words containing uu
for word in scrabble.wordlist:
if "uu" in word:
print(word)
|
StarcoderdataPython
|
3458216
|
<reponame>Quentin18/Line-Track-Designer<gh_stars>1-10
"""
The **error** module manages the errors of the library.
"""
class LineTrackDesignerError(Exception):
"""Manage exception errors."""
pass
|
StarcoderdataPython
|
5073999
|
import os
from src.perturbator.support_modules.perturbator import branch_insert_process,\
count_tasks_in,\
count_sequence_flows_in,\
count_parallel_gateways_in
def test_parallel_insert_fragment():
absolute_input_path = os.path.abspath('./tests/test_files/input1.bpmn')
absolute_input2_path = os.path.abspath('./tests/test_files/input2.bpmn')
absolute_output_path = os.path.abspath('./tests/test_files/output.bpmn')
initial_tasks = count_tasks_in(absolute_input_path)
initial_fragment_tasks = count_tasks_in(absolute_input2_path)
initial_sequence_flows = count_sequence_flows_in(absolute_input_path)
initial_fragment_sequence_flows = count_sequence_flows_in(absolute_input2_path)
initial_gateways = count_parallel_gateways_in(absolute_input_path)
initial_fragment_gateways = count_parallel_gateways_in(absolute_input_path)
branch_insert_process(absolute_input_path, absolute_input2_path, 'flow11', 'flow13', absolute_output_path)
final_sequence_flows = count_sequence_flows_in(absolute_output_path)
final_gateways = count_parallel_gateways_in(absolute_output_path)
final_tasks = count_tasks_in(absolute_output_path)
assert initial_tasks + initial_fragment_tasks == final_tasks
assert initial_gateways + initial_fragment_gateways + 2 == final_gateways
assert initial_sequence_flows + initial_fragment_sequence_flows + 2 == final_sequence_flows
|
StarcoderdataPython
|
6504095
|
import numpy as np
import face_recognition
from scipy import spatial
from PIL import Image, ImageFont, ImageDraw
class FaceLandmarks(object):
def __init__(self):
pass
def find_list(self, name_file):
image = face_recognition.load_image_file(name_file)
return face_recognition.face_landmarks(image)
def find_box(self, name_file):
image = face_recognition.load_image_file(name_file)
return face_recognition.face_locations(image)
class FaceMask(object):
def __init__(self):
pass
def create_mask(self, face_landmarks_list, origin_img):
shape_img = Image.new("RGBA", origin_img.size, color=(255, 255, 255, 0))
shape_draw = ImageDraw.Draw(shape_img)
#for i in range(len(triangles_list)-1):
# shape_draw.line((triangles_list[i], triangles_list[i+1]), fill=(0,0,0), width=4)
# Draw line of Faces Landmark
for i in face_landmarks_list[0]:
for j in range( len(face_landmarks_list[0][i]) - 1 ):
shape_draw.point(face_landmarks_list[0][i][j],fill=50)
shape_draw.line((face_landmarks_list[0][i][j], face_landmarks_list[0][i][j+1]), fill=(0,0,0), width=4)
shape_draw.line((face_landmarks_list[0]['right_eye'][0], face_landmarks_list[0]['right_eye'][-1]), fill=(0,0,0), width=4)
shape_draw.line((face_landmarks_list[0]['left_eye'][0], face_landmarks_list[0]['left_eye'][-1]), fill=(0,0,0), width=4)
return shape_img
########################################################
def calc_lip_coef(facelandmarks_list):
'''Calc lip coef
Если угол между векторами < -0.97, то счастливые или грустные эммоции (True-happy, False-Sad)
Если > -0,97 то нейтральное
Arguments:
facelandmarks_list {[type]} -- [description]
'''
# Lips handler
# Make np array
arr = np.array(facelandmarks_list[0]['top_lip'])
# Find v_0 and v_1 coordinates
# Middle point - p_0
p_0 = arr[8]
# Angle points p_1 and p_2
p_1 = arr[0]; p_2 = arr[6]
# Find vectors
vec_0 = p_1 - p_0
vec_1 = p_2 - p_0
# Check points on smiling
if p_1[1] > p_2[1]:
smiling_bool = True
else:
smiling_bool = False
# Return cos and smiling_bool
# if cos < -0.97, it is happy or sad emotion
return (1 - spatial.distance.cosine(vec_0, vec_1), smiling_bool)
def determ_lips(facelandmarks_list):
index = calc_lip_coef(facelandmarks_list)
if index[1] and index[0] > -0.97:
return 'data/lips/2.png'
elif not index[1] and index[0] >= -0.97:
return 'data/lips/0.png'
else:
return 'data/lips/1.png'
def calc_nose_coef(facelandmarks_list):
'''Calc nose coef
Определяем нос
Arguments:
facelandmarks_list {[type]} -- [description]
return:
(отношение, площядь)
'''
# Make np arrays
bridge_arr = np.array(facelandmarks_list[0]['nose_bridge'])
tip_arr = np.array(facelandmarks_list[0]['nose_tip'])
# Find nose hieght
nose_hieght = tip_arr[np.where(np.min(tip_arr[:,1])==tip_arr[:,1])][0][1] - bridge_arr[0][1]
# Find nose width
nose_width = tip_arr[-1][0] - tip_arr[0][0]
# Return relation of h/w and nose square
return (nose_hieght/nose_width, (nose_hieght*nose_width)/2. )
def determ_nose(facelandmarks_list):
CONST_NOSE_COEF_MEDIUM = 2.0
CONST_NOSE_COEF_LOW = 1.5
index = calc_nose_coef(facelandmarks_list)[0]
if index <= CONST_NOSE_COEF_LOW:
return 'data/nose/2.png'
elif CONST_NOSE_COEF_MEDIUM <= index < CONST_NOSE_COEF_LOW:
return 'data/nose/3.png'
else:
return 'data/nose/4.png'
def calc_eyebrow_coef(facelandmarks_list):
'''Calc eyeborn coef
Определяет брови
Arguments:
facelandmarks_list {[type]} -- [description]
Returns:
{float} -- соотношение длин
'''
# Make np arrays
eyebrow_arr = np.array(facelandmarks_list[0]['left_eyebrow'])
eye_arr = np.array(facelandmarks_list[0]['left_eye'])
# Find lenght of eyebrow
eyebrow_len = eyebrow_arr[-1][0] - eyebrow_arr[0][0]
#print(eyebrow_len)
# Find lenght of eye
eye_len = eye_arr[-1][0] - eye_arr[0][0]
#print(eye_len)
return eye_len / eyebrow_len
def determ_eyebrow(facelandmarks_list):
CONST_EYEBROW_COEF_MEDIUM = 1.5
CONST_EYEBROW_COEF_LOW = 1.3
index = calc_eyebrow_coef(facelandmarks_list)
print('eyebrow detected: ', index)
if index <= CONST_EYEBROW_COEF_LOW:
return 'data/eyebrows/1.png'
elif CONST_EYEBROW_COEF_LOW < index <= CONST_EYEBROW_COEF_MEDIUM:
return 'data/eyebrows/2.png'
else:
return 'data/eyebrows/3.png'
def calc_eye_coef(facelandmarks_list):
'''Размер глаз
[description]
Arguments:
facelandmarks_list {[type]} -- [description]
Returns:
[type] -- [description]
'''
eye_arr = np.array(facelandmarks_list[0]['left_eye'])
# Find lenght of eye
eye_len = eye_arr[-1][0] - eye_arr[0][0]
return eye_len/(eye_arr[5][1] - eye_arr[3][1])
def determ_eye(facelandmarks_list):
CONST_EYEBROW_COEF_MEDIUM = 5
CONST_EYEBROW_COEF_LOW = 3
index = calc_eye_coef(facelandmarks_list)
print('EYE detected: ', index)
if index <= CONST_EYEBROW_COEF_LOW:
return 'data/eyes/black_1.png'
elif CONST_EYEBROW_COEF_MEDIUM <= index < CONST_EYEBROW_COEF_LOW:
return 'data/eyes/black_2.png'
else:
return 'data/eyes/black_3.png'
########################################################
# Генерирует новую картинку
def make_face_shape(type):
return Image.open(type)
def add_lips(type, image):
lips = Image.open(type)
position = ((75), (200))
image.paste(lips, position, lips)
return image
def add_eyebrows(type, image):
eyebrows = Image.open(type)
position = ((50), (115))
image.paste(eyebrows, position, eyebrows)
return image
def add_eyes(type, image):
eyes = Image.open(type)
position = ((55), (135))
image.paste(eyes, position, eyes)
return image
def add_nose(type, image):
nose = Image.open(type)
position = ((93), (135))
image.paste(nose, position, nose)
return image
def make_photo(face_shape, lips, eyebrows, eyes, nose, save=False, name='test'):
print('shape')
image = make_face_shape(face_shape)
print('copy')
image_copy = image.copy()
print('add_lips')
image = add_lips(lips, image)
print('add_eyebrows')
image = add_eyebrows(eyebrows, image)
print('add_eyes')
image = add_eyes(eyes, image)
print('add_nose')
image = add_nose(nose, image)
print('save')
if save:
# Сохранение
image.save('data/hand/'+name)
return image
def create_face_shape(pic_path, save=False):
CONST_CHIN_COEF_MEDIUM = 1.355
CONST_CHIN_COEF_LOW = 1.25
index = create_face_mask_choice(create_mask_nake(pic_path, save))
print('Face Shape: ', index)
if index <= CONST_CHIN_COEF_LOW:
return 'data/faces/small_black.png'
elif CONST_CHIN_COEF_MEDIUM <= index < CONST_CHIN_COEF_LOW:
return 'data/faces/medium_black.png'
else:
return 'data/faces/large_black.png'
def create_current_mask(pic_path, save=False):
'''Main func
[description]
Arguments:
pic_path {[type]} -- [description]
Keyword Arguments:
save {bool} -- [description] (default: {False})
'''
name = pic_path.split('/')[-1].split('.')[0] + '.png'
origin_img = Image.open(pic_path).convert("RGBA")
fl = FaceLandmarks()
fm = FaceMask()
facelandmarks_list = fl.find_list(pic_path)
image = make_photo(
face_shape=create_face_shape(pic_path),
lips=determ_lips(facelandmarks_list),
eyebrows=determ_eyebrow(facelandmarks_list),
eyes=determ_eye(facelandmarks_list),
nose=determ_nose(facelandmarks_list),
save=False,
name=name
)
# image = fm.create_mask(FaceLandmarkslist, origin_img)
if save:
image.save('data/hand/'+name)
return 'data/hand/'+name
########################################################
def create_mask_nake(pic_path, save=False):
name = pic_path.split('/')[-1].split('.')[0] + '.png'
origin_img = Image.open(pic_path).convert("RGBA")
fl = FaceLandmarks()
fm = FaceMask()
FaceLandmarkslist = fl.find_list(pic_path)
image = fm.create_mask(FaceLandmarkslist, origin_img)
if save:
image.save('data/hand/'+name)
return FaceLandmarkslist
def create_face_mask_choice(facelandmarks_list):
# make np array
arr = np.array(facelandmarks_list[0]['chin'])
# find max x-coordinate
max_x = np.max(arr[:,0])
# find min x-coordinate
min_x = np.min(arr[:,0])
# find max y-coordinate
max_y = np.max(arr[:,1])
# find min y-coordinate
min_y = np.min(arr[:,1])
return (max_x - min_x) / (max_y - min_y + 10)
if __name__ == '__main__':
face_shape = '../data/faces/small_black.png'
lips = '../data/lips/2.png'
eyebrows = '../data/eyebrows/3.png'
eyes = '../data/eyes/1.png'
nose = '../data/nose/2.png' # трубле 3
make_photo(face_shape, lips, eyebrows, eyes, nose)
|
StarcoderdataPython
|
1693035
|
# pytest -s sa/tests/test_sa.py
import os
import sys
from typing import Text
for module_path in [os.path.abspath("/home/gus/Desktop/nlp-finance/"), os.path.abspath("/home/gus/Desktop/nlp-finance/sa")]:
if module_path not in sys.path:
sys.path.append(module_path)
import json
from sa.server_finance import SA
import sa_pb2
import sa_pb2_grpc
def test_sentiment_analysis():
finsta = SA()
request = sa_pb2.SaRequest(
text="I think CRM stocks are sinking big time. Everybody dip!"
)
response = finsta.SentimentAnalysis(request, context=None)
print(response)
assert response.text == request.text
sa_results = json.loads(response.results)
assert sa_results["label"] == "negative"
|
StarcoderdataPython
|
5154837
|
<reponame>sixin-zh/kymatio_wph
__all__ = ['PhaseHarmonics2d']
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.