id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
152011 | #!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME> <<EMAIL>>
#
'''
Non-relativistic magnetizability tensor for UHF
(In testing)
Refs:
[1] <NAME>, J. Chem. Phys., 109, 3185 (1998)
[2] <NAME>, Chem. Phys., 213, 123 (1996)
'''
import time
from functools import reduce
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf.scf import jk
from pyscf.prop.nmr import uhf as uhf_nmr
from pyscf.prop.magnetizability import rhf as rhf_mag
def dia(magobj, gauge_orig=None):
mol = magobj.mol
mf = magobj._scf
mo_energy = magobj._scf.mo_energy
mo_coeff = magobj._scf.mo_coeff
mo_occ = magobj._scf.mo_occ
orboa = mo_coeff[0][:,mo_occ[0] > 0]
orbob = mo_coeff[1][:,mo_occ[1] > 0]
dm0a = numpy.dot(orboa, orboa.T)
dm0b = numpy.dot(orbob, orbob.T)
dm0 = dm0a + dm0b
dme0a = numpy.dot(orboa * mo_energy[0][mo_occ[0] > 0], orboa.T)
dme0b = numpy.dot(orbob * mo_energy[1][mo_occ[1] > 0], orbob.T)
dme0 = dme0a + dme0b
e2 = rhf_mag._get_dia_1e(magobj, gauge_orig, dm0, dme0).ravel()
if gauge_orig is None:
vs = jk.get_jk(mol, [dm0, dm0a, dm0a, dm0b, dm0b],
['ijkl,ji->s2kl',
'ijkl,jk->s1il', 'ijkl,li->s1kj',
'ijkl,jk->s1il', 'ijkl,li->s1kj'],
'int2e_gg1', 's4', 9, hermi=1)
e2 += numpy.einsum('xpq,qp->x', vs[0], dm0)
e2 -= numpy.einsum('xpq,qp->x', vs[1], dm0a) * .5
e2 -= numpy.einsum('xpq,qp->x', vs[2], dm0a) * .5
e2 -= numpy.einsum('xpq,qp->x', vs[3], dm0b) * .5
e2 -= numpy.einsum('xpq,qp->x', vs[4], dm0b) * .5
vk = jk.get_jk(mol, [dm0a, dm0b], ['ijkl,jk->s1il', 'ijkl,jk->s1il'],
'int2e_g1g2', 'aa4', 9, hermi=0)
e2 -= numpy.einsum('xpq,qp->x', vk[0], dm0a)
e2 -= numpy.einsum('xpq,qp->x', vk[1], dm0b)
return -e2.reshape(3, 3)
# Note mo10 is the imaginary part of MO^1
def para(magobj, gauge_orig=None, h1=None, s1=None, with_cphf=None):
'''Paramagnetic susceptibility tensor
Kwargs:
h1: A list of arrays. Shapes are [(3,nmo_a,nocc_a), (3,nmo_b,nocc_b)]
First order Fock matrices in MO basis.
s1: A list of arrays. Shapes are [(3,nmo_a,nocc_a), (3,nmo_b,nocc_b)]
First order overlap matrices in MO basis.
with_cphf : boolean or function(dm_mo) => v1_mo
If a boolean value is given, the value determines whether CPHF
equation will be solved or not. The induced potential will be
generated by the function gen_vind.
If a function is given, CPHF equation will be solved, and the
given function is used to compute induced potential
'''
log = logger.Logger(magobj.stdout, magobj.verbose)
cput1 = (time.clock(), time.time())
mol = magobj.mol
mf = magobj._scf
mo_energy = magobj._scf.mo_energy
mo_coeff = magobj._scf.mo_coeff
mo_occ = magobj._scf.mo_occ
orboa = mo_coeff[0][:,mo_occ[0] > 0]
orbob = mo_coeff[1][:,mo_occ[1] > 0]
if h1 is None:
# Imaginary part of F10
dm0 = (numpy.dot(orboa, orboa.T), numpy.dot(orbob, orbob.T))
h1 = magobj.get_fock(dm0, gauge_orig)
h1 = (lib.einsum('xpq,pi,qj->xij', h1[0], mo_coeff[0].conj(), orboa),
lib.einsum('xpq,pi,qj->xij', h1[1], mo_coeff[1].conj(), orbob))
cput1 = log.timer('first order Fock matrix', *cput1)
if s1 is None:
# Imaginary part of S10
s1 = magobj.get_ovlp(mol, gauge_orig)
s1 = (lib.einsum('xpq,pi,qj->xij', s1, mo_coeff[0].conj(), orboa),
lib.einsum('xpq,pi,qj->xij', s1, mo_coeff[1].conj(), orbob))
with_cphf = magobj.cphf
mo1, mo_e1 = uhf_nmr.solve_mo1(magobj, mo_energy, mo_coeff, mo_occ,
h1, s1, with_cphf)
cput1 = logger.timer(magobj, 'solving mo1 eqn', *cput1)
occidxa = mo_occ[0] > 0
occidxb = mo_occ[1] > 0
mag_para = numpy.einsum('yji,xji->xy', mo1[0], h1[0])
mag_para+= numpy.einsum('yji,xji->xy', mo1[1], h1[1])
mag_para-= numpy.einsum('yji,xji,i->xy', mo1[0], s1[0], mo_energy[0][occidxa])
mag_para-= numpy.einsum('yji,xji,i->xy', mo1[1], s1[1], mo_energy[1][occidxb])
# + c.c.
mag_para = mag_para + mag_para.conj()
mag_para-= numpy.einsum('xij,yij->xy', s1[0][:,occidxa], mo_e1[0])
mag_para-= numpy.einsum('xij,yij->xy', s1[1][:,occidxb], mo_e1[1])
return -mag_para
class Magnetizability(rhf_mag.Magnetizability):
dia = dia
para = para
get_fock = uhf_nmr.get_fock
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
mol = gto.Mole()
mol.verbose = 7
mol.output = '/dev/null'
mol.atom = '''h , 0. 0. 0.
F , 0. 0. .917'''
mol.basis = '631g'
mol.build()
mf = scf.UHF(mol).run()
mag = Magnetizability(mf)
mag.cphf = True
m = mag.kernel()
print(lib.finger(m) - -0.43596639996758657)
mag.gauge_orig = (0,0,1)
m = mag.kernel()
print(lib.finger(m) - -0.76996086788058238)
mag.gauge_orig = (0,0,1)
mag.cphf = False
m = mag.kernel()
print(lib.finger(m) - -0.7973915717274408)
mol = gto.M(atom='''O 0. 0. 0.
H 0. -0.757 0.587
H 0. 0.757 0.587''',
basis='ccpvdz', spin=2)
mf = scf.UHF(mol).run()
mag = Magnetizability(mf)
mag.cphf = True
m = mag.kernel()
print(lib.finger(m) - -4.6700053640388353)
| StarcoderdataPython |
1627792 | <filename>backup-23.09.2021/core/data_generator.py<gh_stars>0
import yaml
import click
import pickle
import argparse
import pandas as pd
import numpy as np
from os import listdir
from os.path import isfile, join
from sklearn.manifold import TSNE
class LoadData:
def __init__(self, name, embedding, fitness_path, keys, objects, normalization):
self.name = name
self.embedding = embedding # caminho comun para as representações
self.fitness_path = fitness_path # arquivo com os fitness
self.objects = objects # arquivo com as médias dos vetores
self.keys = keys
self.normalization = normalization
self.fitness = [f for f in listdir(self.fitness_path)
if isfile(join(self.fitness_path, f))]
def i2v_matrix(self, word_indices, pre_values):
# Chamada por programa
nr = []
num_index = len(word_indices)
if(num_index) >= 300:
for i in range(300):
embedding_vector = pre_values[word_indices[i]]
embedding_vector = embedding_vector.astype(np.float32)
nr.append(embedding_vector)
else:
for j in range(num_index):
embedding_vector = pre_values[word_indices[j]]
embedding_vector = embedding_vector.astype(np.float32)
nr.append(embedding_vector)
remainder = 300 - num_index
for k in range(num_index, num_index + remainder):
# Converting to int16 to save memory
mask_value = np.full((200), -10)
mask_value = mask_value.astype(np.int8)
nr.append(mask_value)
nr = np.array(nr)
return nr
def ncc(self):
i2v_data = ([] for i in range(7))
ncc_data, goal_data, name_data, keys_data, \
dense_vectors = ([] for i in range(6))
[dense_vectors.append(word) for word in self.objects[0]]
#
# Loop through goal files
#
for prog_l in range(len(self.fitness)):
print(self.fitness_path + self.fitness[prog_l])
with open(self.fitness_path + self.fitness[prog_l]) as fl:
label_yaml = yaml.safe_load(fl)
# Loop over 22
for k in range(len(self.keys)):
goal = label_yaml[self.keys[k]]['goal']
name_ncc = self.fitness[prog_l][:-5]
#
# Loading correspondent inst2vec embedding
#
try:
df = pd.read_csv(self.embedding + '/' + name_ncc + '_seq.csv')
except IOError:
# If missing file, fill with zeros
ncc_data.append(np.zeros(dimension))
goal_data.append(0.0)
name_data.append(name_ncc)
keys_data.append(self.keys[k])
continue
# Flatten the vector (Program's dense vector indices)
flat_values = [item for sublist in df.values for item in sublist]
i2v_representation = self.i2v_matrix(flat_values,dense_vectors)
i2v_data.append(i2v_representation)
elif (self.normalization == 'tSNE'):
tsne_representation = self.tSNE(flat_values,dense_vectors)
goal_data.append(goal)
name_data.append(name_ncc)
keys_data.append(self.keys[k])
goal_data = np.expand_dims(goal_data, axis=1)
name_data = np.expand_dims(name_data, axis=1)
keys_data = np.expand_dims(keys_data, axis=1)
if (self.normalization == 0):
i2v_data = np.array(i2v_data)
# Saving binary
np.savez_compressed(self.name,
i2v=i2v_data,
goal=goal_data,
name=name_data,
keys=keys_data)
print(i2v_data.shape)
else:
#avg_words_data = np.expand_dims(avg_words_data, axis=1)
avg_prog_data = np.expand_dims(avg_prog_data, axis=1)
avg_prog_l1_data = np.expand_dims(avg_prog_l1_data, axis=1)
sum_data = np.expand_dims(sum_data, axis=1)
min_max_data = np.expand_dims(min_max_data, axis=1)
# Saving binary
np.savez_compressed(self.name,
#avg_words=avg_words_data,
avg_prog=avg_prog_data,
avg_prog_l1=avg_prog_l1_data,
sum_prog=sum_data,
min_max=min_max_data,
goal=goal_data,
name=name_data,
keys=keys_data)
print(" avg_prog: %s\n avg_prog_l1: %s\n sum_prog: %s\n min_max: %s\n goal: %s\n name: %s\n keys: %s" %
(avg_prog_data.shape,
avg_prog_l1_data.shape,
sum_data.shape,
min_max_data.shape,
goal_data.shape,
name_data.shape,
keys_data.shape))
def Main():
parser = argparse.ArgumentParser()
parser.add_argument("prog_csv",
metavar='p0',
nargs='?',
const=1,
help='Loading the inst2vec csv files.',
type=str,
default='./../../data-massalin/representations/')
parser.add_argument("prog_label",
metavar='p1',
nargs='?',
const=2,
help='Loading fitness yaml file.',
type=str,
default='./../benchmarks/')
parser.add_argument("suite",
metavar='p2',
nargs='?',
const= 3,
help='[angha_15|angha_w|llvm_300|coremark-pro|mibench]',
type=str,
default='angha_15_2k_random_llvm_train')
parser.add_argument("best",
metavar='p4',
nargs='?',
const=4,
help='[best22|best10]',
type=str,
default='best10')
parser.add_argument("name",
metavar='p6',
nargs='?',
const=6,
help='file name',
type=str,
default='angha_15_2k_random_llvm_train-mask.npz')
parser.add_argument("normalization",
metavar='p7',
nargs='?',
const=7,
help='[1|0|tSNE]',
type=str,
default=0)
args = parser.parse_args()
print("Benchmark: %s \nRepresentation: %s \n>> %s\n" %
(args.suite, args.normalization, args.name))
keys_22, objects = ([] for i in range(2))
#
# Loading inst2vec vocabulary
#
with open('./../common/sequences/vocabulary/emb.p', 'rb') as openfile:
while True:
try:
objects.append(pickle.load(openfile))
except EOFError:
break
#
# Loading sequences
#
with open('./../common/sequences/'+ args.best+'.yaml') as sq:
sequences = yaml.safe_load(sq)
for key, value in sequences.items():
keys_22.append(key)
#
# Path of y values
#
fitness_path = args.prog_label+args.suite+'/results/goal_'+args.best+'/'
args.suite = LoadData(args.name,
args.prog_csv+args.suite+'/'+args.suite+'-noopt-ncc',
fitness_path,
keys_22,
objects,
args.normalization)
args.suite.ncc()
if __name__ == '__main__':
Main()
| StarcoderdataPython |
1714686 | <filename>authcheck/app/common/util.py
import re
import os
import json
import time
import pickle
from app.conf.conf import *
from app.model.model import *
from app.model.exception import *
from flask import request, render_template
from mongoengine import Document
def validate_url(u: str):
"""
校验url是否有效
:param u:
:return:
"""
if not re.match(r'^http[s]?://\S+$', u):
return False
return True
def parse_params(args: str):
"""
解析参数
:param args: aa=1&bb=2&cc=3
:return: {}
"""
ps = {}
for kv in args.split('&'):
_kv = str(kv).split('=', 1)
if len(_kv) < 2:
continue
k, v = _kv
ps[k] = v
return ps
def parse_url_params(url: str):
"""
获取url中的参数
:param url:
:return: path, raw_params, {
'param1': param1,
'param2': param2
}
"""
if '?' not in url:
return url, None, None
path, raw_params = url.split('?', 1)
return path, raw_params, parse_params(raw_params)
def get_page(page: int, size: int, total: int):
"""
分页用
:param page: 当前页数(从0开始)
:param size: 页大小
:param total: 数据总量
:return:
"""
if total == 0:
return {
'dic': ["0"]
}
max_page = int(total / size) if total % size != 0 else int(total / size) - 1
per_page = False if page == 0 else page - 1
next_page = False if page == max_page else page + 1
# 前2后3
dic = [i for i in range(page - 2, page + 4) if (0 <= i <= max_page)]
# 省略号
per_ellipsis = True if dic[0] > 1 else False
suf_ellipsis = True if dic[-1] < (max_page - 1) else False
# 最后一个和最前一个
p_min = "0" if dic[0] > 0 else False
p_max = max_page if dic[-1] < max_page else False
return {
'per_page': per_page,
'next_page': next_page,
'dic': dic,
'per_ellipsis': per_ellipsis,
'suf_ellipsis': suf_ellipsis,
'p_min': p_min,
'p_max': p_max
}
def deal_header(url: str, content: bytes, charset='utf-8') -> HeaderModel:
content = content.decode(charset)
lines = content.split('\n', 1)
assert len(lines) > 1
groups = space_pattern.split(lines[0])
assert len(groups) > 2 # e.g: GET /uri HTTP/1.1
method = groups[0]
lines = lines[1].splitlines()
header = {}
for line in lines:
lr = line.split(":", 1)
if len(lr) != 2:
continue
header[lr[0].strip()] = lr[1].strip()
return HeaderModel(url=url, method=method, header=header)
def deal_body(content: bytes, charset='utf-8') -> BodyModel:
return BodyModel(content, charset=charset)
def gen_banner(role, method, url, response_content) -> str:
"""
生成banner用来展示
:return:
"""
banner = "{} - {}({}) - {} - {}"
return banner.format(str(role)[:8], str(method)[:8], len(str(response_content)), str(url)[:100],
str(response_content)[:150])
def resp_length(banner: str):
"""
获取banner中的响应包长度
:param banner:
:return:
"""
if not banner:
return 0
try:
left = banner.index('(')
right = banner.index(')')
length = int(banner[left + 1: right])
except ValueError:
return 0
return length
def deal_request(url, raw) -> (HeaderModel, BodyModel):
"""
处理请求
:param url: url
:param raw: 原生的请求的base64编码,包括请求头、请求体
:return:
"""
try:
raw = base64.b64decode(raw)
except Exception as e:
raise LibException(e)
f = raw.find(b'\r\n\r\n') # 正常应该是这个,但是可能有特殊情况(比如自己手动输入)可能会是下面的那个
if f < 0:
f = raw.find(b'\n\n')
if f < 0:
header = raw.strip()
body = b''
else:
header = raw[: f].strip()
body = raw[f:].strip()
matches = re.findall(rb'charset=(.*?)[\s;]', header)
if len(matches) != 0:
charset = matches[0].decode('utf-8')
else:
charset = encoding
try:
r_header = deal_header(url, header, charset=charset)
r_body = deal_body(body, charset=charset)
except Exception as e:
raise ParserException(e)
return r_header, r_body
def to_json(doc: Document):
if isinstance(doc, list):
t = []
for d in doc:
t.append(to_json(d))
return t
return json.loads(doc.to_json())
__all__ = [
'validate_url', 'parse_params', 'parse_url_params', 'get_page', 'resp_length',
'deal_header', 'deal_body', 'gen_banner', 'deal_request', 'to_json'
]
if __name__ == '__main__':
url = 'http://www.zto.com?a=1&b=2&c=3'
path, raw_params, params = parse_url_params(url)
print(path)
print(raw_params)
print(params)
| StarcoderdataPython |
1737261 | from turtle import Turtle, Screen
import random
screen = Screen()
screen.setup(width=500, height=400)
turtle_colors = ["red", "blue", "green", "orange", "yellow", "purple"]
starting_line = [-125, -75, -25, 25, 75, 125]
list_of_turtles = []
# init turtles
for num_of_turtles in range(0, 6):
n_turtles = Turtle(shape="turtle")
n_turtles.color(turtle_colors[num_of_turtles])
n_turtles.penup()
n_turtles.goto(x=-225, y=starting_line[num_of_turtles])
list_of_turtles.append(n_turtles)
# user input
user_input = screen.textinput(title="Place your bet!", prompt="Select the winning turtle! Enter a color: ")
if user_input:
race_cont = True
while race_cont:
for turtle in list_of_turtles:
if turtle.xcor() > 220:
race_cont = False
winner = turtle.pencolor()
if winner == user_input:
print(f"Winner! The {winner} turtle wins!")
else:
print(f"Loser! The {winner} turtle wins!")
run = random.randint(0, 10)
turtle.forward(run)
screen.exitonclick() | StarcoderdataPython |
3365905 | from django.db import models
from django.utils import timezone
from projects.models.project import Project
class TechnicalSheet(models.Model):
class Meta:
verbose_name = 'technicalsheet'
verbose_name_plural = 'technicalsheets'
created = models.DateTimeField(editable=False, auto_now_add=True)
last_modified = models.DateTimeField(editable=False, auto_now=True)
project = models.ForeignKey(Project, on_delete=models.CASCADE)
test_file = models.FileField(upload_to='documents/tech/', null=True, blank=True)
## other data
## auto save time
''' ## incase if auto_now, auto_now_add work
def save(self, *args, **kwargs):
if not self.created:
self.created = timezone.now()
self.last_modified = timezone.now()
return super(TechnicalSheet, self).save(*args, **kwargs)
'''
| StarcoderdataPython |
1731862 | <filename>leetcode/143-Reorder-List/ReorderList_001.py
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param {ListNode} head
# @return {void} Do not return anything, modify head in-place instead.
def reorderList(self, head):
a = []
b = []
p = head
while p != None:
a.append(p)
p = p.next
i = 0
j = len(a) - 1
size = len(a)
while i < j:
b.append(i)
b.append(j)
i = i + 1
j = j - 1
if i == j:
b.append(i)
for k in range(size):
if k != size - 1:
a[b[k]].next = a[b[k + 1]]
else:
a[b[k]].next = None
| StarcoderdataPython |
1632191 | # run this from terminal with madminer stuff installed to be safe
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from madminer.sampling import combine_and_shuffle
import glob
# MadMiner output
logging.basicConfig(
format='%(asctime)-5.5s %(name)-20.20s %(levelname)-7.7s %(message)s',
datefmt='%H:%M',
level=logging.DEBUG
)
# Output of all other modules (e.g. matplotlib)
for key in logging.Logger.manager.loggerDict:
if "madminer" not in key:
logging.getLogger(key).setLevel(logging.WARNING)
mg_dir = '/home/software/MG5_aMC_v2_6_2/'
path = "./data/"
delphesDatasetList = [f for f in glob.glob(path + "delphes_data?.h5")]
delphesDatasetList += [f for f in glob.glob(path + "delphes_data??.h5")]
delphesDatasetList += [f for f in glob.glob(path + "delphes_data???.h5")]
delphesDatasetList += [f for f in glob.glob(path + "delphes_data????.h5")]
#delphesDatasetList = ['data/delphes_data.h5'.format(i) for i in range (1,201)]
combine_and_shuffle(
delphesDatasetList,
'data/delphes_data_shuffled.h5',
k_factors=0.00029507, # specific to 1k events in run card and suboptimal simulating!!!
)
print ("Files combined: ",len(delphesDatasetList))
| StarcoderdataPython |
171485 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
class Review(models.Model):
comment = models.CharField(max_length=1000)
conversation = models.IntegerField()
title = models.CharField(max_length=100)
style = models.IntegerField()
satisfaction = models.IntegerField()
worker_id = models.IntegerField()
age = models.IntegerField()
gender = models.IntegerField()
price = models.IntegerField()
def __str__(self):
return self.id
class Worker(models.Model):
store_id = models.CharField(max_length=200)
name = models.CharField(max_length=200)
def __str__(self):
return self.id
class Store(models.Model):
store_id = models.CharField(max_length=200)
phone_number = models.CharField(max_length=200)
def __str__(self):
return self.store_id | StarcoderdataPython |
1756109 | from deepnlpf.notifications.email import Email
email = Email()
email.send() | StarcoderdataPython |
3230171 | import robocup
import constants
import main
import math
import skills.touch_ball
import skills._kick
import skills.pass_receive
## AngleReceive accepts a receive_point as a parameter and gets setup there to catch the ball
# It transitions to the 'aligned' state once it's there within its error thresholds and is steady
# Set its 'ball_kicked' property to True to tell it to dynamically update its position based on where
# the ball is moving and attempt to catch it.
# It will move to the 'completed' state if it catches the ball, otherwise it will go to 'failed'.
# Kick is a single_robot_behavior, so no need to import both
class AngleReceive(skills.pass_receive.PassReceive):
def __init__(self):
super().__init__(
captureFunction=(lambda: skills.touch_ball.TouchBall()))
self._target_point = None
self.kick_power = 1
self.target_point = constants.Field.TheirGoalSegment.center()
self.ball_kicked = False
self.target_angle = 0
## The point that the receiver should expect the ball to hit it's mouth
# Default: constants.Field.TheirGoalSegment.center()
@property
def target_point(self):
return self._target_point
@target_point.setter
def target_point(self, value):
self._target_point = value
self.recalculate()
## Returns an adjusted angle with account for ball speed
#
# First finds the rejection, which is the X component of the ball's velocity in the reference
# frame of the robot, with the mouth facing the y axis. Then we calculate the angle required to
# offset this rejection angle (if possible).
def adjust_angle(self, target_angle, ball_angle=None, ball_speed=None):
ball = main.ball()
if ball_angle == None:
ball_angle = (ball.vel).angle()
if ball_speed == None:
ball_speed = ball.vel.mag()
angle_diff = target_angle - ball_angle
rejection = math.sin(angle_diff) * ball_speed
# The min/max is to bound the value by -1 and 1.
adjust = math.asin(min(1, max(-1, rejection /
constants.Robot.MaxKickSpeed)))
return adjust + target_angle
# calculates:
# self._pass_line - the line from the ball along where we think we're going
# self._target_pos - where the bot should be
# self._angle_error - difference in where we're facing and where we want to face (in radians)
# self._x_error
# self._y_error
def recalculate(self):
# can't do squat if we don't know what we're supposed to do
if self.receive_point == None or self.robot == None or self.target_point == None:
return
ball = main.ball()
if self.ball_kicked:
# when the ball's in motion, the line is based on the ball's velocity
self._pass_line = robocup.Line(ball.pos, ball.pos + ball.vel * 10)
# After kicking, apply angle calculations
target_angle_rad = self.adjust_angle((self.target_point -
self.robot.pos).angle())
# Removes angle adjustment
# target_angle_rad = (self.target_point - self.robot.pos).angle()
self._kick_line = robocup.Line(self.robot.pos, robocup.Point(
self.robot.pos.x + math.cos(self.robot.angle) * 10,
self.robot.pos.y + math.sin(self.robot.angle) * 10))
else:
# if the ball hasn't been kicked yet, we assume it's going to go through the receive point
self._pass_line = robocup.Line(ball.pos, self.receive_point)
# Assume ball is kicked at max speed and is coming from the ball point to the location of our robot. Then average this with the target angle.
target_angle_rad = self.adjust_angle(
(self.target_point - self.robot.pos).angle(),
(self.robot.pos - main.ball().pos).angle(),
constants.Robot.MaxKickSpeed)
# TODO make this faster by caching the .angle() part
target_angle_rad = (
target_angle_rad +
(self.target_point - self.robot.pos).angle()) / 2
self._kick_line = robocup.Line(self.receive_point,
self.target_point)
self._angle_facing = target_angle_rad
self.target_angle = target_angle_rad
angle_rad = self.robot.angle
self._angle_error = target_angle_rad - angle_rad
if self.ball_kicked:
receive_before_adjust = self._pass_line.nearest_point(
self.robot.pos)
else:
receive_before_adjust = self.receive_point
# Make the receive point be the mouth, rather than the center of the robot.
# Assumes mouth of robot is at the edge.
self._target_pos = receive_before_adjust - robocup.Point(
constants.Robot.Radius * math.cos(self.robot.angle),
constants.Robot.Radius * math.sin(self.robot.angle))
# Code to provide slipback when receiving the ball
# pass_line_dir = (self._pass_line.get_pt(1) - self._pass_line.get_pt(0)).normalized()
# self._target_pos = actual_receive_point + pass_line_dir * constants.Robot.Radius
# vector pointing down the pass line toward the kicker
self._x_error = self._target_pos.x - self.robot.pos.x
self._y_error = self._target_pos.y - self.robot.pos.y
def execute_running(self):
super().execute_running()
self.recalculate()
self.robot.face(self.robot.pos + robocup.Point(
math.cos(self._angle_facing), math.sin(self._angle_facing)))
if self._kick_line != None:
main.system_state().draw_line(self._kick_line,
constants.Colors.Red, "Shot")
def execute_receiving(self):
super().execute_receiving()
self.ball_kicked = True
# Kick the ball!
self.robot.kick(self.kick_power)
if self.target_point != None:
main.system_state().draw_circle(self.target_point, 0.03,
constants.Colors.Blue, "Target")
| StarcoderdataPython |
95337 | # vim: set filetype=python fileencoding=utf-8:
# -*- coding: utf-8 -*-
#============================================================================#
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
#============================================================================#
''' Immutable module class and namespace factory. '''
# https://www.python.org/dev/peps/pep-0396/
__version__ = '2.0a202112310633'
from . import exceptions
from .base import Class, NamespaceClass, create_namespace
class __( metaclass = NamespaceClass ):
from inspect import ismodule as is_module
from sys import modules
from types import ModuleType as Module # type: ignore
from .base import (
InaccessibleAttribute,
base_package_name,
create_argument_validation_exception,
create_attribute_immutability_exception,
create_attribute_indelibility_exception,
create_attribute_nonexistence_exception,
intercept,
is_operational_name, select_public_attributes,
validate_attribute_existence,
validate_attribute_name, )
class Module( __.Module, metaclass = Class ):
''' Module whose attributes are immutable except during module definition.
Can replace the ``__class__`` attribute on an existing module.
Non-public attributes of the module are concealed from :py:func:`dir`.
Also, a copy of the module dictionary is returned when the ``__dict__``
attribute is accessed; this is done to remove a backdoor by which
attributes could be mutated.
.. note::
Copies of the module dictionary are mutable so as to not violate the
internal expectations of Python as well as important packages,
such as :py:mod:`doctest`. Ideally, these would be immutable,
but cannot be as of this writing. '''
@__.intercept
def __getattribute__( self, name ):
if '__dict__' == name: return dict( super( ).__getattribute__( name ) )
try: return super( ).__getattribute__( name )
except AttributeError as exc:
raise __.create_attribute_nonexistence_exception(
name, self ) from exc
@__.intercept
def __setattr__( self, name, value ):
__.validate_attribute_name( name, self )
raise __.create_attribute_immutability_exception( name, self )
@__.intercept
def __delattr__( self, name ):
__.validate_attribute_name( name, self )
__.validate_attribute_existence( name, self )
raise __.create_attribute_indelibility_exception( name, self )
@__.intercept
def __dir__( self ): return __.select_public_attributes( __class__, self )
def reclassify_module( module ):
''' Assigns :py:class:`Module` as class for module.
Takes either a module object or the name of a module
in :py:data:`sys.modules`. If the module has already been reclassified,
then nothing is done (i.e., the operation is idempotent). '''
module_validity_error = __.create_argument_validation_exception(
'module', reclassify_module,
'module or name of module in Python loaded modules dictionary' )
if isinstance( module, Module ): return
if __.is_module( module ): pass
elif isinstance( module, str ):
module = __.modules.get( module )
if None is module: raise module_validity_error
else: raise module_validity_error
module.__class__ = Module
reclassify_module( base ) # type: ignore # pylint: disable=undefined-variable
reclassify_module( exceptions )
reclassify_module( __name__ )
| StarcoderdataPython |
1688939 | <reponame>mayneyao/notion-bill
import fire
import pandas as pd
class PersonBill:
def __init__(self, name):
self.name = name
self.income = {}
self.payment = {}
self.need_pay = {}
self.items = []
def print_payment(self):
for name, money in self.payment.items():
print('需要向{}支付{}'.format(name, money))
def print_need_pay(self):
for obj, money in self.need_pay.items():
_type = '收取' if money > 0 else '支付'
print('{}需要向{}{}{}'.format(self.name, obj, _type, abs(money)))
class Bill:
def __init__(self, csv_file):
bill_df = pd.read_csv(csv_file)
self.bill_df = bill_df[(bill_df['支付状态'] == 'Yes') & (bill_df['归档'] == 'No')]
self.persons = self.get_all_person()
self.person_detail = {}
for person in self.persons:
self.set_person_bill_detail(person)
def run(self):
self.handle_bill()
self.get_all_person()
for name, detail in self.person_detail.items():
self.get_person_need_pay(name)
detail.print_need_pay()
def set_person_bill_detail(self, name):
self.person_detail[name] = PersonBill(name)
def get_all_person(self):
persons = set(self.bill_df['参与人'])
persons.update(set(self.bill_df['实际支付人']))
all_multi_person = [person for person in persons if ',' in str(person)]
one_persons = persons.difference(all_multi_person)
for multi_person in all_multi_person:
one_persons.update(set(multi_person.split(',')))
return one_persons
def a_pay_to_b(self, a, b, money):
"""
a需要向b支付
:param money: 金额
:param a:
:param b:
:return:
"""
# payment
if not self.person_detail[a].payment.get(b, None):
self.person_detail[a].payment[b] = money
else:
self.person_detail[a].payment[b] += money
# income
if not self.person_detail[b].income.get(a, None):
self.person_detail[b].income[a] = money
else:
self.person_detail[b].income[a] += money
def handle_bill(self):
for index, item in self.bill_df.iterrows():
self.handle_item(item)
def handle_item(self, item):
payer = item['实际支付人']
if not isinstance(payer, float):
if not isinstance(item['参与人'], float):
participant = item['参与人'].split(',')
money = item['金额']
_type = item['支付类型']
if _type == '平分':
avg = money / len(participant)
for person in participant:
if person != payer:
self.a_pay_to_b(person, payer, avg)
elif _type == '个人':
assert len(participant) == 1
if participant[0] != payer:
self.a_pay_to_b(participant[0], payer, money)
def get_person_need_pay(self, person_name):
payment = self.person_detail[person_name].payment
income = self.person_detail[person_name].income
person = [name for name in income.keys()]
person.extend([name for name in payment.keys()])
need_pay = {}
for obj in person:
need_pay[obj] = (income.get(obj, 0)) - (payment.get(obj, 0))
self.person_detail[person_name].need_pay = need_pay
def main(file):
bill = Bill(file)
bill.run()
if __name__ == '__main__':
fire.Fire(main)
| StarcoderdataPython |
4808656 | from output.models.ms_data.regex.re_i12_xsd.re_i12 import (
Regex,
Doc,
)
__all__ = [
"Regex",
"Doc",
]
| StarcoderdataPython |
4803872 | import numpy as np
from scipy import stats, special
from abc import ABC
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
class component(ABC):
"""Abstract base class to rerepsent a galaxy component
A component is specified by it's joint density p(t,x,v,z) over stellar age
t, 2D position x, line-of-sight velocity v, and metallicity z. Sub-classes
of `component` correspond to specific (i) factorisations of the joint
density and, (ii) implementations of the factors. Some details of p(t,x,v,z)
are shared between all components:
- the star formation history is independent of other variables - i.e. one
can factor p(t,x,v,z) = p(t) ...,
- p(t) is a beta distribution,
- age-metallicity relations from chemical evolution model from eqns 3-10
of Zhu et al 2020, parameterised by a depletion timescale `t_dep`,
- spatial properties are set in co-ordinate system defined by a center and
rotation relative to datacube x-axis.
- the `get_p_vx` method, since generically p(v,x) = p(v|x)p(x)
Args:
cube: a pkm.mock_cube.mockCube.
center (x0,y0): co-ordinates of the component center.
rotation: angle (radians) between x-axes of component and cube.
"""
def __init__(self,
cube=None,
center=(0,0),
rotation=0.):
self.cube = cube
self.center = center
self.rotation = rotation
costh = np.cos(rotation)
sinth = np.sin(rotation)
rot_matrix = np.array([[costh, sinth],[-sinth,costh]])
xxyy = np.dstack((self.cube.xx-self.center[0],
self.cube.yy-self.center[1]))
xxyy_prime = np.einsum('kl,ijk',
rot_matrix,
xxyy,
optimize=True)
self.xxp = xxyy_prime[:,:,0]
self.yyp = xxyy_prime[:,:,1]
self.get_z_interpolation_grid()
def get_beta_a_b_from_lmd_phi(self, lmd, phi):
"""Convert from (total, mean) to (a,b) parameters of beta distribution
Args:
lmd: beta distribution total parameter, lmd>0
phi: beta distribution mean parameter, 0<phi<1
Returns:
(a,b): shape parameters
"""
a = lmd * phi
b = lmd * (1. - phi)
return a, b
def set_p_t(self,
lmd=None,
phi=None,
cdf_start_end=(0.05, 0.95)):
"""Set the star formation history
p(t) = Beta(t; lmd, phi), where (lmd, phi) are (total, mean) parameters.
Additionally this sets `self.t_pars` which is used for interpolating
quantities against t. Any time varying quantity (e.g. disk size) is
varied between start and end times as specified by CDF p(t).
Args:
lmd: beta distribution total parameter, lmd>0.
phi: beta distribution mean parameter, 0<phi<1.
cdf_start_end (tuple): CDF values of p(t) defining start and end
times of disk build up
Returns:
type: Description of returned object.
"""
a, b = self.get_beta_a_b_from_lmd_phi(lmd, phi)
assert (a>1.)+(b>1) >= 1, "SFH is bimodal: increase lmd?'"
age_bin_edges = self.cube.ssps.par_edges[1]
age_loc = age_bin_edges[0]
age_scale = age_bin_edges[-1] - age_bin_edges[0]
beta = stats.beta(a, b,
loc=age_loc,
scale=age_scale)
beta_cdf = beta.cdf(age_bin_edges)
t_weights = beta_cdf[1:] - beta_cdf[:-1]
dt = age_bin_edges[1:] - age_bin_edges[:-1]
p_t = t_weights/dt
t_start_end = beta.ppf(cdf_start_end)
t_start, t_end = t_start_end
idx_start_end = np.digitize(t_start_end, age_bin_edges)
delta_t = t_end - t_start
self.t_pars = dict(lmd=lmd,
phi=phi,
t_start=t_start,
t_end=t_end,
delta_t=delta_t,
idx_start_end=idx_start_end,
dt=dt)
self.p_t = p_t
def get_p_t(self, density=True, light_weighted=False):
"""Get p(t)
Args:
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
if light_weighted is False:
p_t = self.p_t.copy()
if density is False:
p_t *= self.cube.ssps.delta_t
else:
p_tz = self.get_p_tz(density=False, light_weighted=True)
p_t = np.sum(p_tz, 1)
if density is True:
ssps = self.cube.ssps
p_t = p_t/ssps.delta_t
return p_t
def plot_sfh(self):
"""Plot the star-foramtion history p(t)
"""
ax = plt.gca()
ax.plot(self.cube.ssps.par_cents[1], self.p_t, '-o')
ax.set_xlabel('Time [Gyr]')
ax.set_ylabel('pdf')
plt.tight_layout()
plt.show()
return
def get_z_interpolation_grid(self,
t_dep_lim=(0.1, 10.),
n_t_dep=1000,
n_z=1000):
"""Store a grid used for interpolating age-metallicity relations
Args:
t_dep_lim: (lo,hi) allowed values of depletion timescale in Gyr
n_t_dep (int): number of steps to use for interpolating t_dep
n_z (int): number of steps to use for interpolating metallicity
"""
a = -0.689
b = 1.899
self.ahat = 10.**a
self.bhat = b-1.
z_max = self.ahat**(-1./self.bhat)
# reduce z_max slightly to avoid divide by 0 warnings
z_max *= 0.9999
t_H = self.cube.ssps.par_edges[1][-1]
log_t_dep_lim = np.log10(t_dep_lim)
t_dep = np.logspace(*log_t_dep_lim, n_t_dep)
z_lim = (z_max, 0., n_z)
z = np.linspace(*z_lim, n_z)
tt_dep, zz = np.meshgrid(t_dep, z, indexing='ij')
t = t_H - tt_dep * zz/(1. - self.ahat * zz**self.bhat)
self.t = t
t_ssps = self.cube.ssps.par_cents[1]
n_t_ssps = len(t_ssps)
z_ssps = np.zeros((n_t_dep, n_t_ssps))
for i in range(n_t_dep):
z_ssps[i] = np.interp(t_ssps, t[i], z)
self.z_t_interp_grid = dict(t=t_ssps,
z=z_ssps,
t_dep=t_dep)
def evaluate_ybar(self):
"""Evaluate the noise-free data-cube
Evaluate the integral
ybar(x, omega) = int_{-inf}^{inf} s(omega-v ; t,z) P(t,v,x,z) dv dt dz
where
omega = ln(wavelength)
s(omega ; t,z) are stored SSP templates
This integral is a convolution over velocity v, which we evaluate using
Fourier transforms (FT). FTs of SSP templates are stored in`ssps.FXw`
while FTs of the velocity factor P(v|t,x) of the density P(t,v,x,z)
are evaluated using the analytic expression of the FT of the normal
distribution. Sets the result to `self.ybar`.
"""
cube = self.cube
ssps = cube.ssps
# get P(t,x,z)
P_txz = self.get_p_txz(density=False)
# get FT of P(v|t,x)
nl = ssps.FXw.shape[0]
omega = np.linspace(0, np.pi, nl)
omega /= ssps.dv
omega = omega[:, np.newaxis, np.newaxis, np.newaxis]
exponent = -1j*self.mu_v*omega - 0.5*(self.sig_v*omega)**2
F_p_v_tx = np.exp(exponent)
# get FT of SSP templates s(w;t,z)
F_s_w_tz = ssps.FXw
F_s_w_tz = np.reshape(F_s_w_tz, (-1,)+ssps.par_dims)
# get FT of ybar
args = P_txz, F_p_v_tx, F_s_w_tz
F_ybar = np.einsum('txyz,wtxy,wzt->wxy', *args, optimize=True)
ybar = np.fft.irfft(F_ybar, self.cube.ssps.n_fft, axis=0)
self.ybar = ybar
def get_p_vx(self, v_edg, density=True, light_weighted=False):
"""Get p(v,x)
Args:
v_edg : array of velocity-bin edges to evaluate the quantity
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
p_v_x = self.get_p_v_x(v_edg,
density=density,
light_weighted=light_weighted)
p_x = self.get_p_x(density=density, light_weighted=light_weighted)
p_vx = p_v_x*p_x
return p_vx
class growingDisk(component):
"""A growing disk with age-and-space dependent velocities and enrichments
The (mass-weighted) joint density of this component can be factorised as
p(t,x,v,z) = p(t) p(x|t) p(v|t,x) p(z|t,x)
where the factors are given by:
- p(t) : a beta distribution (see `set_p_t`)
- p(x|t) : cored power-law stratified with age-varying flattening and slope
(see `set_p_x_t`)
- p(v|t,x) : Gaussians with age-and-space varying means and dispersions.
Means velocity maps resemble rotating disks (see `set_mu_v`) while
dispersions drop off as power-laws on ellipses (see `set_sig_v`)
- p(z|t,x) : chemical evolution model defined in equations 3-10 of
Zhu et al 2020, parameterised by a spatially varying depletion
timescale (see `set_p_z_tx`)
Args:
cube: a pkm.mock_cube.mockCube.
center (x0,y0): co-ordinates of the component center.
rotation: angle (radians) between x-axes of component and cube.
"""
def __init__(self,
cube=None,
center=(0,0),
rotation=0.):
super(growingDisk, self).__init__(
cube=cube,
center=center,
rotation=rotation)
def linear_interpolate_t(self,
f_end,
f_start):
"""Linearly interpolate f against time, given boundary values
Args:
f_end: value of f at end of disk build up i.e. more recently
f_start: value of f at start of disk build up i.e. more in the past
Returns:
array: f interpolated in age-bins of SSP models, set to constant
values outside start/end times
"""
t = self.cube.ssps.par_cents[1]
delta_f = f_start - f_end
t_from_start = t - self.t_pars['t_start']
f = f_end + delta_f/self.t_pars['delta_t'] * t_from_start
f[t < self.t_pars['t_start']] = f_end
f[t > self.t_pars['t_end']] = f_start
return f
def set_p_x_t(self,
q_lims=(0.5, 0.1),
rc_lims=(0.5, 0.1),
alpha_lims=(0.5, 2.)):
"""Set the density p(x|t) as cored power-law in elliptical radius
Desnities are cored power-laws stratified on elliptical radius r,
r^2 = x^2 + (y/q)^2
p(x|t) = (r+rc)^-alpha
where the disk axis ratio q(t) and slope alpha(t) vary linearly with
stellar age between values specified for (young, old) stars.
Args:
q_lims: (young,old) value of disk y/x axis ratio
rc_lims: (young,old) value of disk core-size in elliptical radii
alpha_lims: (young,old) value of power-law slope
"""
# check input
q_lims = np.array(q_lims)
assert np.all(q_lims > 0.)
rc_lims = np.array(rc_lims)
assert np.all(rc_lims > 0.)
alpha_lims = np.array(alpha_lims)
assert np.all(alpha_lims >= 0.)
# get parameters vs time
q = self.linear_interpolate_t(*q_lims)
rc = self.linear_interpolate_t(*rc_lims)
alpha = self.linear_interpolate_t(*alpha_lims)
q = q[:, np.newaxis, np.newaxis]
rc = rc[:, np.newaxis, np.newaxis]
alpha = alpha[:, np.newaxis, np.newaxis]
rr2 = self.xxp**2 + (self.yyp/q)**2
rr = rr2 ** 0.5
rho = (rr+rc) ** -alpha
total_mass_per_t = np.sum(rho * self.cube.dx * self.cube.dy, (1,2))
rho = (rho.T/total_mass_per_t).T
self.x_t_pars = dict(q_lims=q_lims,
rc_lims=rc_lims,
alpha_lims=alpha_lims)
# rearrange shape from [t,x,y] to match function signature [x,y,t]
rho = np.rollaxis(rho, 0, 3)
self.p_x_t = rho
def set_t_dep(self,
q=0.1,
alpha=1.5,
t_dep_in=0.5,
t_dep_out=6.):
"""Set spatially-varying depletion timescale
t_dep varies as power law in eliptical radius (with axis ratio `q`)
with power-law slope `alpha`, from central value `t_dep_in` to outer
value `t_dep_out`.
Args:
q : y/x axis ratio of ellipses of `t_dep` equicontours
alpha : power law slope for varying `t_dep`
t_dep_in : central value of `t_dep`
t_dep_out : outer value of `t_dep`
"""
# check input
assert q > 0.
assert alpha >= 1.
assert (t_dep_in > 0.1) and (t_dep_in < 10.0)
assert (t_dep_out > 0.1) and (t_dep_out < 10.0)
# evaluate t_dep maps
rr2 = self.xxp**2 + (self.yyp/q)**2
rr = rr2**0.5
log_t_dep_in = np.log(t_dep_in)
log_t_dep_out = np.log(t_dep_out)
delta_log_t_dep = log_t_dep_in - log_t_dep_out
log_t_dep = log_t_dep_out + delta_log_t_dep * alpha**-rr
t_dep = np.exp(log_t_dep)
self.t_dep_pars = dict(q=q,
alpha=alpha,
t_dep_in=t_dep_in,
t_dep_out=t_dep_out)
self.t_dep = t_dep
def set_p_z_tx(self):
"""Set p(z|t,x) Zhu+20 given enrichment model and spatially varying t_dep
Evaluates the chemical evolution model defined in equations 3-10 of
Zhu et al 2020, parameterised by a spatially varying depletion
timescale stored by `set_t_dep`.
"""
del_t_dep = self.t_dep[:,:,np.newaxis] - self.z_t_interp_grid['t_dep']
abs_del_t_dep = np.abs(del_t_dep)
idx_t_dep = np.argmin(abs_del_t_dep, axis=-1)
self.idx_t_dep = idx_t_dep
idx_t_dep = np.ravel(idx_t_dep)
z_mu = self.z_t_interp_grid['z'][idx_t_dep, :]
z_mu = np.reshape(z_mu, (self.cube.nx, self.cube.ny, -1))
z_mu = np.moveaxis(z_mu, -1, 0)
z_sig2 = self.ahat * z_mu**self.bhat
log_z_edg = self.cube.ssps.par_edges[0]
del_log_z = log_z_edg[1:] - log_z_edg[:-1]
x_xsun = 1. # i.e. assuming galaxy has hydrogen mass fraction = solar
lin_z_edg = 10.**log_z_edg * x_xsun
nrm = stats.norm(loc=z_mu, scale=z_sig2**0.5)
lin_z_edg = lin_z_edg[:, np.newaxis, np.newaxis, np.newaxis]
cdf_z_tx = nrm.cdf(lin_z_edg)
p_z_tx = cdf_z_tx[1:] - cdf_z_tx[:-1]
nrm = np.sum(p_z_tx.T * del_log_z, -1).T
p_z_tx /= nrm
self.p_z_tx = p_z_tx
def set_mu_v(self,
q_lims=(0.5, 0.5),
rmax_lims=(0.1, 1.),
vmax_lims=(50., 250.)):
"""Set age-and-space dependent mean velocities resembling rotating disks
Mean velocity maps have rotation-curves along x-axis peaking at v_max at
r_max then falling to 0 for r->inf. Given by the equation:
E[p(v|t,[x,y])] = cos(theta) * Kr/(r+rc)^3
where
r^2 = x^2 + (y/q)^2, theta = arctan(x/(y/q))
K and rc are chosen to give peak velocity vmax at distance rmax.
The quantities q, rmax and vmax vary linearly with stellar age between
the values specified for (young,old) stars.
Args:
q_lims: (young,old) value of y/x axis ratio of mu(v) equicontours.
rmax_lims: (young,old) distance of maximum velocity along x-axis.
vmax_lims: (young,old) maximum velocity.
"""
# check input
q_lims = np.array(q_lims)
assert np.all(q_lims > 0.)
rmax_lims = np.array(rmax_lims)
vmax_lims = np.array(vmax_lims)
sign_vmax = np.sign(vmax_lims)
# check vmax's have consistent directions and magnitudes
all_positive = np.isin(sign_vmax, [0,1])
all_negative = np.isin(sign_vmax, [0,-1])
assert np.all(all_positive) or np.all(all_negative)
# linearly interpolate and reshape inputs
q = self.linear_interpolate_t(*q_lims)
rmax = self.linear_interpolate_t(*rmax_lims)
vmax = self.linear_interpolate_t(*vmax_lims)
rc = 2.*rmax
K = vmax*(rmax+rc)**3/rmax
q = q[:, np.newaxis, np.newaxis]
rc = rc[:, np.newaxis, np.newaxis]
K = K[:, np.newaxis, np.newaxis]
# make mu_v maps
th = np.arctan2(self.yyp/q, self.xxp)
# idx = np.where(self.xxp==0)
# th[:, idx[0], idx[1]] = np.pi/2.
rr2 = self.xxp**2 + (self.yyp/q)**2
rr = rr2**0.5
mu_v = K*rr/(rr+rc)**3 * np.cos(th)
self.mu_v_pars = dict(q_lims=q_lims,
rmax_lims=rmax_lims,
vmax_lims=vmax_lims)
self.mu_v = mu_v
def set_sig_v(self,
q_lims=(0.5, 0.1),
alpha_lims=(1.5, 2.5),
sig_v_in_lims=(50., 250.),
sig_v_out_lims=(10., 50)):
"""Set age-and-space dependent velocity dispersion maps
Dispersion maps vary as power-laws between central value sig_v_in, outer
value sig_v_out, with slopes alpha. Velocity dispersion is constant on
ellipses with y/x axis-ratio = q. The quantities q, alpha, sig_v_in,
sig_v_out vary linearly with stellar age between values specified for
(young,old) stars.
Args:
q_lims: (young,old) values of y/x axis-ratio of sigma equicontours.
alpha_lims: (young,old) value of power-law slope.
sig_v_in_lims: (young,old) value of central dispersion.
sig_v_out_lims: (young,old) value of outer dispersion.
"""
# check input
q_lims = np.array(q_lims)
assert np.all(q_lims > 0.)
alpha_lims = np.array(alpha_lims)
assert np.all(alpha_lims >= 1.)
sig_v_in_lims = np.array(sig_v_in_lims)
assert np.all(sig_v_in_lims > 0.)
sig_v_out_lims = np.array(sig_v_out_lims)
assert np.all(sig_v_out_lims > 0.)
# linearly interpolate and reshape inputs
q = self.linear_interpolate_t(*q_lims)
alpha = self.linear_interpolate_t(*alpha_lims)
sig_v_in = self.linear_interpolate_t(*sig_v_in_lims)
sig_v_out = self.linear_interpolate_t(*sig_v_out_lims)
q = q[:, np.newaxis, np.newaxis]
alpha = alpha[:, np.newaxis, np.newaxis]
sig_v_in = sig_v_in[:, np.newaxis, np.newaxis]
sig_v_out = sig_v_out[:, np.newaxis, np.newaxis]
# evaluate sig_v maps
rr2 = self.xxp**2 + (self.yyp/q)**2
rr = rr2**0.5
log_sig_v_in = np.log(sig_v_in)
log_sig_v_out = np.log(sig_v_out)
delta_log_sig_v = log_sig_v_in - log_sig_v_out
log_sig = log_sig_v_out + delta_log_sig_v * alpha**-rr
sig = np.exp(log_sig)
self.sig_v_pars = dict(q_lims=q_lims,
alpha_lims=alpha_lims,
sig_v_in_lims=sig_v_in_lims,
sig_v_out_lims=sig_v_out_lims)
self.sig_v = sig
def get_p_x_t(self, density=True, light_weighted=False):
"""Get p(x|t)
Args:
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
if light_weighted is False:
p_x_t = self.p_x_t.copy()
if density is False:
p_x_t *= (self.cube.dx * self.cube.dy)
else:
p_txz = self.get_p_txz(density=density, light_weighted=True)
if density is True:
ssps = self.cube.ssps
p_txz = p_txz * ssps.delta_z
p_tx = np.sum(p_txz, -1)
p_t = self.get_p_t(density=density, light_weighted=True)
p_x_t = (p_tx.T/p_t).T
p_x_t = np.einsum('txy->xyt', p_x_t)
return p_x_t
def get_p_tx(self, density=True, light_weighted=False):
"""Get p(t,x)
Args:
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
p_x_t = self.get_p_x_t(density=density, light_weighted=light_weighted)
p_t = self.get_p_t(density=density, light_weighted=light_weighted)
p_xt = p_x_t*p_t
p_tx = np.einsum('xyt->txy', p_xt)
return p_tx
def get_p_z_tx(self, density=True, light_weighted=False):
"""Get p(z|t,x)
Args:
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
if light_weighted is False:
p_z_tx = self.p_z_tx.copy()
else:
p_txz = self.get_p_txz(density=True, light_weighted=True)
p_tx = self.get_p_tx(density=True, light_weighted=True)
p_z_tx = (p_txz.T/p_tx.T).T
p_z_tx = np.einsum('txyz->ztxy', p_z_tx)
if density is False:
dz = self.cube.ssps.delta_z
na = np.newaxis
dz = dz[:, na, na, na]
p_z_tx *= dz
return p_z_tx
def get_p_txz(self, density=True, light_weighted=False):
"""Get p(t,x,z)
Args:
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
new_ax = np.newaxis
p_t = self.get_p_t(density=density)
p_t = p_t[:, new_ax, new_ax, new_ax]
p_x_t = self.get_p_x_t(density=density)
p_x_t = np.rollaxis(p_x_t, 2, 0)
p_x_t = p_x_t[:, :, :, new_ax]
p_z_tx = self.get_p_z_tx(density=density)
p_z_tx = np.rollaxis(p_z_tx, 0, 4)
p_txz = p_t * p_x_t * p_z_tx
if light_weighted:
ssps = self.cube.ssps
light_weights = ssps.light_weights[:,new_ax,new_ax,:]
P_txz_mass_wtd = self.get_p_txz(density=False)
normalisation = np.sum(P_txz_mass_wtd*light_weights)
p_txz = p_txz*light_weights/normalisation
return p_txz
def get_p_x(self, density=True, light_weighted=False):
"""Get p(x)
Args:
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
if light_weighted is False:
p_x_t = self.get_p_x_t(density=density)
P_t = self.get_p_t(density=False)
p_x = np.sum(p_x_t * P_t, -1)
else:
na = np.newaxis
ssps = self.cube.ssps
p_txz = self.get_p_txz(density=density, light_weighted=True)
if density is False:
p_x = np.sum(p_txz, (0,3))
else:
delta_tz = ssps.delta_t[:,na,na,na]*ssps.delta_z[na,na,na,:]
p_x = np.sum(p_txz*delta_tz, (0,3))
return p_x
def get_p_z(self, density=True, light_weighted=False):
"""Get p(z)
Args:
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
na = np.newaxis
if light_weighted is False:
p_z_tx = self.get_p_z_tx(density=density)
P_x_t = self.get_p_x_t(density=False) # to marginalise, must be a probabilty
P_x_t = np.einsum('xyt->txy', P_x_t)
P_x_t = P_x_t[na,:,:,:]
P_t = self.get_p_t(density=False) # to marginalise, must be a probabilty
P_t = P_t[na,:,na,na]
p_z = np.sum(p_z_tx * P_x_t * P_t, (1,2,3))
else:
p_tz = self.get_p_tz(density=density, light_weighted=True)
if density is False:
p_z = np.sum(p_tz, 0)
else:
ssps = self.cube.ssps
delta_t = ssps.delta_t[:,na]
p_z = np.sum(p_tz*delta_t, 0)
return p_z
def get_p_tz_x(self, density=True, light_weighted=False):
"""Get p(t,z|x)
Args:
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
new_ax = np.newaxis
# evaluate both as densities...
p_txz = self.get_p_txz(density=density)
p_x = self.get_p_x(density=density)
# ... since dx appears on top and bottom, hence cancel
p_tz_x = p_txz/p_x[new_ax,:,:,new_ax] # shape txz
p_tz_x = np.rollaxis(p_tz_x, 3, 1) # shape tzx
if light_weighted:
ssps = self.cube.ssps
light_weights = ssps.light_weights[:,:,new_ax,new_ax]
P_tz_x_mass_wtd = self.get_p_tz_x(density=False)
normalisation = np.sum(P_tz_x_mass_wtd*light_weights, (0,1))
p_tz_x = p_tz_x*light_weights/normalisation
return p_tz_x
def get_p_tz(self, density=True, light_weighted=False):
"""Get p(t,z)
Args:
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
p_tz_x = self.get_p_tz_x(density=density)
P_x = self.get_p_x(density=False)
p_tz = np.sum(p_tz_x * P_x, (2,3))
if light_weighted:
ssps = self.cube.ssps
P_tz_mass_wtd = self.get_p_tz(density=False)
normalisation = np.sum(P_tz_mass_wtd*ssps.light_weights)
p_tz = p_tz*ssps.light_weights/normalisation
return p_tz
def get_p_v_tx(self, v_edg, density=True, light_weighted=False):
"""Get p(v|t,x)
Args:
v_edg : array of velocity-bin edges to evaluate the quantity
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
na = np.newaxis
if light_weighted is False:
v_edg = v_edg[:, na, na, na]
norm = stats.norm(loc=self.mu_v, scale=self.sig_v)
p_v_tx = norm.cdf(v_edg[1:]) - norm.cdf(v_edg[:-1])
if density is True:
dv = v_edg[1:] - v_edg[:-1]
p_v_tx /= dv
else:
p_tvxz = self.get_p_tvxz(v_edg, density=True, light_weighted=True)
if density is False:
dv = v_edg[1:] - v_edg[:-1]
dv = dv[na, :, na, na, na]
p_tvxz = p_tvxz*dv
ssps = self.cube.ssps
p_tvx = np.sum(p_tvxz*ssps.delta_z, -1)
p_x_t = self.get_p_x_t(density=True, light_weighted=True)
p_t = self.get_p_t(density=True, light_weighted=True)
p_xt = p_x_t * p_t
p_tx = np.einsum('xyt->txy', p_xt)
p_tx = p_tx[:, na, :, :]
p_v_tx = p_tvx/p_tx
p_v_tx = np.einsum('tvxy->vtxy', p_v_tx)
return p_v_tx
def get_p_tvxz(self, v_edg, density=True, light_weighted=False):
"""Get p(t,v,x,z)
Args:
v_edg : array of velocity-bin edges to evaluate the quantity
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
p_txz = self.get_p_txz(density=density)
p_v_tx = self.get_p_v_tx(v_edg, density=density)
newax = np.newaxis
p_v_txz = p_v_tx[:, :, :, :, newax]
p_txz = p_txz[newax, :, :, :, :]
p_vtxz = p_v_txz * p_txz
p_tvxz = np.einsum('vtxyz->tvxyz', p_vtxz)
if light_weighted:
ssps = self.cube.ssps
light_weights = ssps.light_weights
light_weights = light_weights[:,newax,newax,newax,:]
P_tvxz_mass_wtd = self.get_p_tvxz(v_edg, density=False)
normalisation = np.sum(P_tvxz_mass_wtd*light_weights)
p_tvxz = p_tvxz*light_weights/normalisation
return p_tvxz
def get_p_v_x(self, v_edg, density=True, light_weighted=False):
"""Get p(v|x)
Args:
v_edg : array of velocity-bin edges to evaluate the quantity
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
na = np.newaxis
p_v_tx = self.get_p_v_tx(v_edg=v_edg,
density=density,
light_weighted=light_weighted)
P_t = self.get_p_t(density=False, light_weighted=light_weighted)
P_t = P_t[na, :, na, na]
p_v_x = np.sum(p_v_tx * P_t, 1)
return p_v_x
def get_p_v(self, v_edg, density=True, light_weighted=False):
"""Get p(v)
Args:
v_edg : array of velocity-bin edges to evaluate the quantity
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
p_v_x = self.get_p_v_x(v_edg,
density=density,
light_weighted=light_weighted)
P_x = self.get_p_x(density=False, light_weighted=light_weighted)
p_v = np.sum(p_v_x*P_x, (1,2))
return p_v
def get_E_v_x(self, light_weighted=False):
"""Get mean velocity map E[p(v|x)]
Args:
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
P_t = self.get_p_t(density=False, light_weighted=light_weighted)
E_v_x = np.sum((P_t*self.mu_v.T).T, 0)
return E_v_x
def get_jth_central_moment_v_x(self, j, light_weighted=False):
"""Get j'th central moment of velocity map E[p((v-mu_v)^j|x)]
Args:
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
P_t = self.get_p_t(density=False, light_weighted=light_weighted)
mu = self.get_E_v_x()
k = np.arange(0, j+1, 2)
tmp1 = special.comb(j, k)
na = np.newaxis
tmp2 = (self.mu_v - mu)[na,:,:,:]**(j-k[:,na,na,na])
tmp3 = 1.*P_t
tmp4 = self.sig_v[na,:,:,:]**k[:,na,na,na]
tmp5 = special.factorial2(k-1)
muj_v_x = np.einsum('k,ktxy,t,ktxy,k->xy',
special.comb(j, k),
(self.mu_v - mu)[na,:,:,:]**(j-k[:,na,na,na]),
P_t,
self.sig_v[na,:,:,:]**k[:,na,na,na],
special.factorial2(k-1))
return muj_v_x
def get_variance_v_x(self, light_weighted=False):
"""Get variance velocity map
Args:
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
var_v_x = self.get_jth_central_moment_v_x(
2,
light_weighted=light_weighted)
return var_v_x
def get_skewness_v_x(self, light_weighted=False):
"""Get skewness of velocity map
Args:
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
mu3_v_x = self.get_jth_central_moment_v_x(
3,
light_weighted=light_weighted)
var_v_x = self.get_jth_central_moment_v_x(
2,
light_weighted=light_weighted)
skewness_v_x = mu3_v_x/var_v_x**1.5
return skewness_v_x
def get_kurtosis_v_x(self, light_weighted=False):
"""Get kurtosis of velocity map
Args:
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
mu4_v_x = self.get_jth_central_moment_v_x(
4,
light_weighted=light_weighted)
var_v_x = self.get_jth_central_moment_v_x(
2,
light_weighted=light_weighted)
kurtosis_v_x = mu4_v_x/var_v_x**2.
return kurtosis_v_x
def plot_density(self,
vmin=0.1,
vmax=3.,
show_every_nth_time=4):
"""Plot maps of the spatial density p(x|t) at several timesteps
Plot the density between the start/end times of disk growth, which
depends on the CDF of p(t). Skip every N steps between these points.
Args:
vmin: minimum velocity for colormap
vmax: maximum velocity for colormap
show_every_nth_time (int): number of timesteps to skip between plots
"""
t_idx_list = np.arange(*self.t_pars['idx_start_end'],
show_every_nth_time)
t_idx_list = t_idx_list[::-1]
kw_imshow = {'cmap':plt.cm.gist_heat,
'norm':LogNorm(vmin=vmin, vmax=vmax)}
for t_idx in t_idx_list:
t = self.cube.ssps.par_cents[1][t_idx]
img = self.cube.imshow(self.p_x_t[:,:,t_idx], **kw_imshow)
plt.gca().set_title(f't={t}')
plt.tight_layout()
plt.show()
return
def plot_t_dep(self):
"""Plot map of depletion timescale used for chemical enrichment
"""
kw_imshow = {'cmap':plt.cm.jet}
img = self.cube.imshow(self.t_dep,
colorbar_label='$t_\\mathrm{dep}$',
**kw_imshow)
plt.tight_layout()
plt.show()
return
def plot_mu_v(self,
show_every_nth_time=4,
vmax=None):
"""Plot maps of the mean velocity E[p(v|t,x)] at several timesteps
Plot the map between the start/end times of disk growth, which
depends on the CDF of p(t). Skip every N steps between these points.
Args:
vmax: maximum velocity for colormap
show_every_nth_time (int): number of timesteps to skip between plots
"""
if vmax is None:
vmax = np.max(np.abs(self.mu_v_pars['vmax_lims']))
cube = self.cube
t_idx_list = np.arange(*self.t_pars['idx_start_end'],
show_every_nth_time)
t_idx_list = t_idx_list[::-1]
kw_imshow = {'vmin':-vmax, 'vmax':vmax}
for t_idx in t_idx_list:
t = self.cube.ssps.par_cents[1][t_idx]
self.cube.imshow(self.mu_v[t_idx,:,:], **kw_imshow)
plt.gca().set_title(f't={t}')
plt.tight_layout()
plt.show()
def plot_sig_v(self,
show_every_nth_time=4,
vmin=None,
vmax=None):
"""Plot maps of the dispersion of p(v|t,x) at several timesteps
Plot the map between the start/end times of disk growth, which
depends on the CDF of p(t). Skip every N steps between these points.
Args:
vmax: minimum velocity for colormap
vmax: maximum velocity for colormap
show_every_nth_time (int): number of timesteps to skip between plots
"""
cube = self.cube
t_idx_list = np.arange(*self.t_pars['idx_start_end'],
show_every_nth_time)
t_idx_list = t_idx_list[::-1]
sigs = np.concatenate((
self.sig_v_pars['sig_v_in_lims'],
self.sig_v_pars['sig_v_out_lims']
))
if vmin is None:
vmin = np.min(sigs)
if vmax is None:
vmax = np.max(sigs)
kw_imshow = {'cmap':plt.cm.jet,
'vmin':vmin,
'vmax':vmax}
for t_idx in t_idx_list:
t = cube.ssps.par_cents[1][t_idx]
cube.imshow(self.sig_v[t_idx,:,:], **kw_imshow)
plt.gca().set_title(f't={t}')
plt.tight_layout()
plt.show()
class stream(component):
"""A stream with spatially varying kinematics but uniform enrichment.
The (mass-weighted) joint density of this component can be factorised as
p(t,x,v,z) = p(t) p(x) p(v|x) p(z|t)
where the factors are given by:
- p(t) : a beta distribution (see `set_p_t`),
- p(x) : a curved line with constant thickness (see `set_p_x`),
- p(v|x) : Guassian with mean varying along stream and constant sigma (see
set_p_v_x`),
- p(z|t) : single chemical evolution track i.. `t_dep` (see `set_p_z_t`).
Args:
cube: a pkm.mock_cube.mockCube.
center (x0,y0): co-ordinates of the component center.
rotation: angle (radians) between x-axes of component and cube.
nsmp:
"""
def __init__(self,
cube=None,
center=(0,0),
rotation=0.):
super(stream, self).__init__(
cube=cube,
center=center,
rotation=rotation)
def set_p_x(self,
theta_lims=[0., np.pi/2.],
mu_r_lims=[0.7, 0.1],
sig=0.03,
nsmp=75):
"""Define the stream track p(x)
Defined in polar co-ordinates (theta,r). Stream extends between angles
`theta_lims` between radii in `mu_r_lims`. Density is constant along
with varying theta. The track has a constant width on the sky, `sig`.
Args:
theta_lims: (start, end) values of stream angle in radians. Must be
in -pi to pi. To cross negative x-axis, set non-zero rotation when
instantiating the stream component.
mu_r_lims: (start, end) values of stream distance from center.
sig (float): stream thickness.
nsmp (int): number of points to sample the angle theta.
Returns:
type: Description of returned object.
"""
assert np.min(theta_lims)>=-np.pi, "Angles must be in -pi<theta<pi'"
assert np.max(theta_lims)<=np.pi, "Angles must be in -pi<theta<pi'"
self.theta_lims = theta_lims
cube = self.cube
theta0, theta1 = theta_lims
self.nsmp = nsmp
theta_smp = np.linspace(theta0, theta1, self.nsmp)
mu_r0, mu_r1 = mu_r_lims
tmp = (theta_smp - theta0)/(theta1 - theta0)
mu_r_smp = mu_r0 + (mu_r1 - mu_r0) * tmp
mu_x_smp = mu_r_smp * np.cos(theta_smp)
nrm_x = stats.norm(mu_x_smp, sig)
pdf_x = nrm_x.cdf(self.xxp[:,:,np.newaxis] + cube.dx/2.)
pdf_x -= nrm_x.cdf(self.xxp[:,:,np.newaxis] - cube.dx/2.)
mu_y_smp = mu_r_smp * np.sin(theta_smp)
nrm_y = stats.norm(mu_y_smp, sig)
pdf_y = nrm_y.cdf(self.yyp[:,:,np.newaxis] + cube.dy/2.)
pdf_y -= nrm_y.cdf(self.yyp[:,:,np.newaxis] - cube.dy/2.)
pdf = pdf_x * pdf_y
pdf = np.sum(pdf, -1)
pdf /= np.sum(pdf*cube.dx*cube.dy)
self.p_x_pars = dict(theta_lims=theta_lims,
mu_r_lims=mu_r_lims,
sig=sig,
nsmp=nsmp)
self.p_x = pdf
def get_p_x(self, density=True, light_weighted=False):
"""Get p(x)
Args:
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
# since x is indpt of (t,z), light- and mass- weight densities are equal
p_x = self.p_x.copy()
if density is False:
p_x *= (self.cube.dx * self.cube.dy)
return p_x
def get_p_x_t(self, density=True, light_weighted=False):
"""Get p(x|t)
Args:
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
p_x = self.get_p_x(density=density, light_weighted=light_weighted)
# since x is indpt of t, p(x|t)=p(x)
nt = self.cube.ssps.par_dims[1]
p_x_t = np.broadcast_to(p_x[:,:,np.newaxis], p_x.shape+(nt,))
return p_x_t
def get_p_tx(self, density=True, light_weighted=False):
"""Get p(t,x)
Args:
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
p_x = self.get_p_x(density=density, light_weighted=light_weighted)
p_t = self.get_p_t(density=density, light_weighted=light_weighted)
# since x and t are independent p(t,x)=p(x)p(t)
na = np.newaxis
p_tx = p_t[:,na,na]*p_x[na,:,:]
return p_tx
def set_p_z_t(self, t_dep=3.):
assert (t_dep > 0.1) and (t_dep < 10.0)
self.t_dep = t_dep
del_t_dep = self.t_dep - self.z_t_interp_grid['t_dep']
abs_del_t_dep = np.abs(del_t_dep)
idx_t_dep = np.argmin(abs_del_t_dep, axis=-1)
self.idx_t_dep = idx_t_dep
idx_t_dep = np.ravel(idx_t_dep)
z_mu = self.z_t_interp_grid['z'][idx_t_dep]
z_mu = np.squeeze(z_mu)
self.z_mu = z_mu
z_sig2 = self.ahat * z_mu**self.bhat
log_z_edg = self.cube.ssps.par_edges[0]
del_log_z = log_z_edg[1:] - log_z_edg[:-1]
x_xsun = 1. # i.e. assuming galaxy has hydrogen mass fraction = solar
lin_z_edg = 10.**log_z_edg * x_xsun
nrm = stats.norm(loc=z_mu, scale=z_sig2**0.5)
lin_z_edg = lin_z_edg[:, np.newaxis]
cdf_z_tx = nrm.cdf(lin_z_edg)
p_z_t = cdf_z_tx[1:] - cdf_z_tx[:-1]
p_z_t /= np.sum(p_z_t, 0)
p_z_t /= del_log_z[:, np.newaxis]
self.p_z_t_pars = dict(t_dep=t_dep)
self.p_z_t = p_z_t
def get_p_z_t(self, density=True, light_weighted=False):
"""Get p(z|t)
Args:
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
if light_weighted is False:
p_z_t = self.p_z_t.copy()
if density is False:
dz = self.cube.ssps.delta_z
dz = dz[:, np.newaxis]
p_z_t *= dz
else:
p_tz = self.get_p_tz(density=False, light_weighted=True)
p_t = self.get_p_t(density=False, light_weighted=True)
p_z_t = p_tz.T/p_t
if density:
dz = self.cube.ssps.delta_z
dz = dz[:, np.newaxis]
p_z_t /= dz
return p_z_t
def get_p_tz(self, density=True, light_weighted=False):
"""Get p(t,z)
Args:
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
p_z_t = self.get_p_z_t(density=density, light_weighted=False)
p_t = self.get_p_t(density=density, light_weighted=False)
p_zt = p_z_t*p_t
p_tz = p_zt.T
if light_weighted:
ssps = self.cube.ssps
P_tz_mass_wtd = self.get_p_tz(density=False)
normalisation = np.sum(P_tz_mass_wtd*ssps.light_weights)
p_tz = p_tz*ssps.light_weights/normalisation
return p_tz
def get_p_z(self, density=True, light_weighted=False):
"""Get p(z)
Args:
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
p_tz = self.get_p_tz(density=False, light_weighted=light_weighted)
p_z = np.sum(p_tz, 0)
if density is True:
dz = self.cube.ssps.delta_z
p_z /= dz
return p_z
def get_p_z_tx(self, density=True, light_weighted=False):
"""Get p(z|t,x)
Args:
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
p_z_t = self.get_p_z_t(density=density, light_weighted=light_weighted)
p_z_tx = p_z_t[:,:,np.newaxis,np.newaxis]
cube_shape = (self.cube.nx, self.cube.ny)
p_z_tx = np.broadcast_to(p_z_tx, p_z_t.shape+cube_shape)
return p_z_tx
def get_p_txz(self, density=True, light_weighted=False):
"""Get p(t,x,z)
Args:
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
p_tz = self.get_p_tz(density=density, light_weighted=light_weighted)
p_x = self.get_p_x(density=density, light_weighted=light_weighted)
na = np.newaxis
p_txz = p_tz[:,na,na,:] * p_x[na,:,:,na]
return p_txz
def get_p_tz_x(self, density=True, light_weighted=False):
"""Get p(t,z|x)
Args:
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
p_tz = self.get_p_tz(density=density, light_weighted=light_weighted)
na = np.newaxis
cube_shape = (self.cube.nx, self.cube.ny)
p_tz_x = np.broadcast_to(p_tz[:,:,na,na], p_tz.shape+cube_shape)
return p_tz_x
def set_p_v_x(self,
mu_v_lims=[-100,100],
sig_v=100.):
"""Set parameters for p(v|x)
p(v|x) = N(mu_v(x), sig) where mu_v(x) is linearly interpolated with
angle theta, between start/end values specified in `mu_v_lims`
Args:
mu_v_lims: (start, end) values of stream velocity
sig_v (float): constant std dev of velocity distribution
"""
th = np.arctan2(self.yyp, self.xxp)
mu_v = np.zeros_like(th)
if self.theta_lims[0]<self.theta_lims[1]:
mu_v_lo, mu_v_hi = mu_v_lims
else:
mu_v_hi, mu_v_lo = mu_v_lims
min_th, max_th = np.min(self.theta_lims), np.max(self.theta_lims)
idx = np.where(th <= min_th)
mu_v[idx] = mu_v_lo
idx = np.where(th >= max_th)
mu_v[idx] = mu_v_hi
idx = np.where((th > min_th) & (th < max_th))
mu_v[idx] = (th[idx]-min_th)/(max_th-min_th) * (mu_v_hi-mu_v_lo)
mu_v[idx] += mu_v_lo
self.p_v_x_pars = dict(mu_v_lims=mu_v_lims, sig_v=sig_v)
self.mu_v = mu_v
self.sig_v = np.zeros_like(mu_v) + sig_v
def get_p_v_x(self, v_edg, density=True, light_weighted=False):
"""Get p(v|x)
Args:
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
norm = stats.norm(loc=self.mu_v, scale=self.sig_v)
v = (v_edg[:-1] + v_edg[1:])/2.
v = v[:, np.newaxis, np.newaxis]
p_v_x = norm.pdf(v)
if density is False:
dv = v_edg[1:] - v_edg[:-1]
dv = dv[:, np.newaxis, np.newaxis]
p_v_x *= dv
return p_v_x
def get_p_v(self, v_edg, density=True, light_weighted=False):
"""Get p(v)
Args:
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
p_v_x = self.get_p_v_x(v_edg, density=density)
P_x = self.get_p_x(density=False)
p_v = np.sum(p_v_x*P_x, (1,2))
return p_v
def get_p_v_tx(self, v_edg, density=True, light_weighted=False):
"""Get p(v|t,x)
Args:
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
p_v_x = self.get_p_v_x(v_edg, density=density)
nt = self.cube.ssps.par_dims[1]
shape = p_v_x.shape
shape = (shape[0], nt, shape[1], shape[2])
p_v_tx = np.broadcast_to(p_v_x[:,np.newaxis,:,:], shape)
return p_v_tx
def get_p_tvxz(self, v_edg, density=True, light_weighted=False):
"""Get p(v,t,x,z)
Args:
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
p_tz = self.get_p_tz(density=density, light_weighted=light_weighted)
p_v_x = self.get_p_v_x(v_edg, density=density)
p_x = self.get_p_x(density=density)
na = np.newaxis
p_vx = p_v_x * p_x[na,:,:]
p_tvxz = p_tz[:,na,na,na,:] * p_vx[na,:,:,:,na]
return p_tvxz
# end
| StarcoderdataPython |
174720 | # -*- coding: utf-8 -*-
"""
utilities.
"""
from __future__ import print_function, unicode_literals
import os
def make_dir(abspath):
"""
Make an empty directory.
"""
try:
os.mkdir(abspath)
print("Made: %s" % abspath)
except: # pragma: no cover
pass
def make_file(abspath, text):
"""
Make a file with utf-8 text.
"""
try:
with open(abspath, "wb") as f:
f.write(text.encode("utf-8"))
print("Made: %s" % abspath)
except: # pragma: no cover
pass
| StarcoderdataPython |
1770044 | <reponame>senavs/rsaEcryption
import random
def prime_number(number):
if number == 1:
return False
i = 2
while i * i <= number:
if number % i == 0:
return False
i += 1
return True
def random_prime_number(length):
while True:
n = random.randint(1 * pow(10, length - 1), 9 * pow(10, length - 1))
if prime_number(n):
return n
def mdc(a, b):
while b != 0:
rest = a % b
a = b
b = rest
return a
| StarcoderdataPython |
3223043 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
name = "java-service-wrapper"
source = "https://aur.archlinux.org/java-service-wrapper.git"
| StarcoderdataPython |
3357411 | <reponame>smallrobots/Ev3TrackedExplorer_MarkII
#################################################################################################
# ev3_remoted.ev3_server class #
# Version 1.0 #
# #
# Happily shared under the MIT License (MIT) #
# #
# Copyright(c) 2017 SmallRobots.it #
# #
# Permission is hereby granted, free of charge, to any person obtaining #
# a copy of this software and associated documentation files (the "Software"), #
# to deal in the Software without restriction, including without limitation the rights #
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies #
# of the Software, and to permit persons to whom the Software is furnished to do so, #
# subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all #
# copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, #
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR #
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE #
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, #
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE #
# OR OTHER DEALINGS IN THE SOFTWARE. #
# #
# Visit http://www.smallrobots.it for tutorials and videos #
# #
# Credits #
# The Ev3TrackedExlpor3r is built with Lego Mindstorms Ev3 and Lego Technic Parts #
#################################################################################################
import socket
import json
import threading
import ev3_remoted
import ev3_remoted.ev3_robot_model
import ev3_remoted.ev3_sender
import ev3_remoted.ev3_receiver
class Ev3RemoteController(object):
"""Description of a remote controller"""
# Constants
__default_controller_name = "Default"
__default_controller_host_port = 60002
__default_controller_host_address = "127.0.0.1"
__default_is_active_controller = False
# Initialize a new ev3_remote_controller with attributes
def __init__(self,
name = __default_controller_name,
host_port = __default_controller_host_port,
host_address = __default_controller_host_address,
is_active_controller = __default_is_active_controller
):
"""Default constructor"""
self.__name = name
self.__address = host_address
self.__port = host_port
self.is_active_controller = is_active_controller
| StarcoderdataPython |
3272038 | import torch.nn as nn
from .single import ScaledDotProductAttention
class MultiHeadedAttention(nn.Module):
"""
Take in model size and number of heads.
"""
def __init__(self, h, d_in,d_out, dropout=0.3):
super().__init__()
assert d_out % h == 0
# We assume d_v always equals d_k
self.d_k = d_out // h
self.h = h
self.linear_layers = nn.ModuleList([nn.Linear(d_in, d_out) for _ in range(3)])
self.output_linear = nn.Linear(d_out, d_out)
self.attention = ScaledDotProductAttention(dropout)
self.dropout = dropout
def forward(self, query, key, value, mask=None):
batch_size = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = [l(x).view(x.shape[0], -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linear_layers, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch.
x, attn = self.attention(query, key, value, mask=mask.unsqueeze(-2)) #for head axis
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous().view(batch_size, -1, self.h * self.d_k)
return self.output_linear(x)
| StarcoderdataPython |
1635997 | import os, py
if os.name != 'nt':
py.test.skip('tests for win32 only')
from rpython.rlib import rwin32
from rpython.tool.udir import udir
def test_get_osfhandle():
fid = open(str(udir.join('validate_test.txt')), 'w')
fd = fid.fileno()
rwin32.get_osfhandle(fd)
fid.close()
py.test.raises(OSError, rwin32.get_osfhandle, fd)
rwin32.get_osfhandle(0)
def test_get_osfhandle_raising():
#try to test what kind of exception get_osfhandle raises w/out fd validation
py.test.skip('Crashes python')
fid = open(str(udir.join('validate_test.txt')), 'w')
fd = fid.fileno()
fid.close()
def validate_fd(fd):
return 1
_validate_fd = rwin32.validate_fd
rwin32.validate_fd = validate_fd
raises(WindowsError, rwin32.get_osfhandle, fd)
rwin32.validate_fd = _validate_fd
def test_open_process():
pid = rwin32.GetCurrentProcessId()
assert pid != 0
handle = rwin32.OpenProcess(rwin32.PROCESS_QUERY_INFORMATION, False, pid)
rwin32.CloseHandle(handle)
py.test.raises(WindowsError, rwin32.OpenProcess, rwin32.PROCESS_TERMINATE, False, 0)
def test_terminate_process():
import subprocess, signal, sys
proc = subprocess.Popen([sys.executable, "-c",
"import time;"
"time.sleep(10)",
],
)
print proc.pid
handle = rwin32.OpenProcess(rwin32.PROCESS_ALL_ACCESS, False, proc.pid)
assert rwin32.TerminateProcess(handle, signal.SIGTERM) == 1
rwin32.CloseHandle(handle)
assert proc.wait() == signal.SIGTERM
| StarcoderdataPython |
1620404 | <reponame>OpenVessel/RedTinSaintBernard-for-BraTS2021-challenge
path_to_single = r"E:\Datasets\BraTS challenge\BraTS2021_00621"
path_to_BraTS2021 = "E:\Datasets\BraTS challenge\RSNA_ASNR_MICCAI_BraTS2021_TrainingData"
## What is flair
## What is seg
## What is t1
## What is t1ce
## What is t2?
## So we need to unzip the data points
from utils import unzip_all_files_folder_2, convert_nii_to_png
## function works for a single folder we can for loop all folders
# unzip_all_files_folder_2(path_to_single)
convert_nii_to_png(path_to_single)
## read in the nitfi files into numpy arrays
import numpy as np
import nibabel as nib
import itk
# Packages
# itkwidgets
# nibabel
# img = nib.load(example_filename)
# a = np.array(img.dataobj)
## visualize the data somehow
## Training set and the test set
## Apply Agglomerative Hierarchical clustering
## Apply Divisive Hierarchical clustering technique
## What I found interesting Calculate the similarity between two clusters?
# MIN, MAX, Group Average, Distance between Centroids, Ward's Method's ## How can any of these be similar to
# Dice SImilarity Coefficient or Hausdorff distance
# https://towardsdatascience.com/understanding-the-concept-of-hierarchical-clustering-technique-c6e8243758ec | StarcoderdataPython |
1760842 | from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.utils.crypto import get_random_string
from django.core.exceptions import ObjectDoesNotExist
from .models import Game
from .forms import GameForm, JoinGameForm
# Create your views here.
def home(request):
return render(request, "streams/home.html")
def create_game(request):
if request.method == "POST":
form = GameForm(request.POST)
if form.is_valid():
game = form.save(commit=False)
game.game_id = get_random_string(16)
game.save()
return HttpResponseRedirect(reverse("streams:game", kwargs={
"game_id": game.game_id,
"video_source": form.cleaned_data["video_source"],
"audio_source": form.cleaned_data["audio_source"]
}))
else:
form = GameForm()
context = {"form": form}
return render(request, "streams/create_game.html", context)
def join_game(request, game_id=None):
if "game_id" in request.GET and request.GET["game_id"]:
print(request.GET["video_source"] + "\n" + request.GET["audio_source"])
try:
game = Game.objects.get(game_id=request.GET["game_id"])
form = JoinGameForm(request.GET)
if form.is_valid():
video_source = form.cleaned_data["video_source"]
audio_source = form.cleaned_data["audio_source"]
return HttpResponseRedirect(reverse("streams:game", kwargs={
"game_id": game.game_id,
"video_source": video_source,
"audio_source": audio_source
}))
else:
print(form.errors)
except ObjectDoesNotExist:
game_id = request.GET["game_id"]
form = JoinGameForm()
context = {"game_id": game_id, "form": form}
return render(request, "streams/join_game.html", context)
def game(request, game_id, video_source=None, audio_source=None):
context = {"game_id": game_id, "video_source": video_source, "audio_source": audio_source}
return render(request, "streams/game.html", context) | StarcoderdataPython |
3307187 | <reponame>diogolopes18-cyber/MODSI
#!/usr/bin/env python3
from dotenv.main import load_dotenv
from flask import Flask, render_template, flash, request, redirect, url_for, send_from_directory, session, abort, Blueprint
import database_conn as db
# App context
orientador = Blueprint('orientador', __name__)
@orientador.route('/orientador', methods=['GET', 'POST'])
def orientador_page():
return render_template("orientador.html")
@orientador.route('/orientador/projects', methods=['GET', 'POST'])
def new_projects():
# Submit new project proposals
if(request.method == 'POST'):
suggestions = [
{
"sigla": request.form['sigla'],
"nome_projeto": request.form['name'],
"description": request.form['description']
}
]
db.connection_db(data=suggestions, query="insert", tablename="orientador_suggestions")
return render_template("project_suggestion.html")
@orientador.route('/orientador/projects/available', methods=['GET', 'POST'])
def available_projects():
projects = db.connection_db(query="select", tablename="orientador_suggestions")
return render_template("available_projects.html", data=projects)
@orientador.route('/orientador/submit_grade', methods=['GET', 'POST'])
def submit_grade():
if(request.method == 'POST'):
# Submit final grade
grade = {
"student": request.form['student'],
"project_name": request.form['project'],
"grade": request.form['note']
}
# Insert data into DB
db.connection_db(data=grade, query="insert", tablename="grades")
return render_template("final_grade.html")
| StarcoderdataPython |
1748628 | # Generated by Django 2.1.5 on 2019-02-15 08:11
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0009_employee_company_benifits'),
]
operations = [
migrations.AlterField(
model_name='employee',
name='position',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='departments.Position'),
),
]
| StarcoderdataPython |
3223321 | from .UNet_3D import UNet3D | StarcoderdataPython |
1615919 | import sand_python
from sand_python.sand_exceptions import SandError
from sand_python.sand_service import SandService
class SandMiddleware(object):
def __init__(self, get_response=None):
self.get_response = get_response
def process_request(self, request):
sand = SandService('http://sand-py-test', sand_client_id, sand_client_secret, sand_target_scopes, sand_service_scopes, django_cache)
is_valid = sand.validate_request(request, {"Authorization": request.META["HTTP_AUTHORIZATION"]})['allowed']
if is_valid != True:
return JsonResponse({"Message": 'Request not permitted'}, status=401)
else:
return None
| StarcoderdataPython |
1779948 | <gh_stars>1-10
# -----------------------------------------------------------------------------
def filter_list(values, excludes):
"""
Filter a list of values excluding all elements from excludes parameters and return the new list.
Arguments:
values : list
excludes : list
Returns:
list
"""
return list(x for x in values if x not in excludes)
# -----------------------------------------------------------------------------
def list_has_value(values, value):
"""
Check if a list of values has a specific value validating if both are valid.
Arguments:
values : list
value : any
Returns:
bool
"""
if not values or not value:
return False
if value in values:
return True
return False
# -----------------------------------------------------------------------------
def get_arg_list_value(arg_values, key):
"""
Check and return an argument value by key.
Arguments:
arg_values : list
key : str
Returns:
str
"""
for item in arg_values:
if item and item.startswith("{0}=".format(key)):
return item[(len(key) + 1) :]
return None
| StarcoderdataPython |
1771115 | def _LCG(a, z, c, m, n):
sequence = [z]
for _ in range(n):
number = (a * sequence[len(sequence) - 1] + c) % m
sequence.append(number)
return sequence[1:]
def LCG(cli, name):
cli.out(f'Вы выбрали [magenta]{name}[/magenta].')
a = cli.int('Введите множитель (a): ')
z = cli.int('Введите начальное значение (z): ')
c = cli.int('Введите значение приращения (c): ')
m = cli.int('Введите значение модуля (m): ')
n = cli.int('Введите количество генерируемых чисел (n): ')
cli.table(['Число'], _LCG(a, z, c, m, n),
autoheader='z(i)', autoformat='z({})')
if __name__ == "__main__":
a = 7 # Множитель
z = 7 # Начальное значение
c = 7 # Значение приращения
m = 8 # Значение модуля
n = 3 # Количесвто генерируемых чисел
print(_LCG(a, z, c, m, n))
| StarcoderdataPython |
4823827 | <gh_stars>1-10
import play_video
import time
movie_1 = play_video.Play_vdo()
movie_1.play()
time.sleep(10)
movie_1.stop_play() | StarcoderdataPython |
3332509 | <gh_stars>1-10
from typing import List
from transmart_loader.collection_visitor import CollectionVisitor
from transmart_loader.console import Console
from transmart_loader.loader_exception import LoaderException
from transmart_loader.transmart import TreeNode, DataCollection, Observation, \
Patient, Visit, TrialVisit, Study, Concept, Modifier, Dimension, \
RelationType, Relation
class CollectionValidator(CollectionVisitor):
"""
Validation class for TranSMART data collections.
"""
def visit_relation(self, relation: Relation) -> None:
pass
def visit_relation_type(self, relation_type: RelationType) -> None:
pass
def visit_concept(self, concept: Concept) -> None:
pass
def visit_modifier(self, modifier: Modifier) -> None:
pass
def visit_dimension(self, dimension: Dimension) -> None:
pass
def visit_study(self, study: Study) -> None:
pass
def visit_trial_visit(self, trial_visit: TrialVisit) -> None:
pass
def visit_visit(self, visit: Visit) -> None:
pass
def visit_patient(self, patient: Patient) -> None:
pass
def visit_observation(self, observation: Observation) -> None:
pass
def visit_node(self, node: TreeNode) -> None:
if node.parent is not None:
self.errors.append(
'Node {} is not a root node'.format(node.name))
def __init__(self):
self.errors: List[str] = []
@staticmethod
def validate(collection: DataCollection):
validator = CollectionValidator()
validator.visit(collection)
if len(validator.errors) is not 0:
for error in validator.errors:
Console.error(error)
raise LoaderException('Invalid collection')
| StarcoderdataPython |
3368757 | from restfly.endpoint import APIEndpoint
from box import BoxList
class CloudSandboxAPI(APIEndpoint):
def get_quota(self):
"""
Returns the Cloud Sandbox API quota information for the organisation.
Returns:
:obj:`dict`: The Cloud Sandbox quota report.
Examples:
>>> pprint(zia.sandbox.get_quota())
"""
return self._get("sandbox/report/quota", box=BoxList)[0]
def get_report(self, md5_hash: str, report_details: str = "summary"):
"""
Returns the Cloud Sandbox Report for the provided hash.
Args:
md5_hash (str):
The MD5 hash of the file that was analysed by Cloud Sandbox.
report_details (str):
The type of report. Accepted values are 'full' or 'summary'. Defaults to 'summary'.
Returns:
:obj:`dict`: The cloud sandbox report.
Examples:
Get a summary report:
>>> zia.sandbox.get_report('8350dED6D39DF158E51D6CFBE36FB012')
Get a full report:
>>> zia.sandbox.get_report('8350dED6D39DF158E51D6CFBE36FB012', 'full')
"""
return self._get(f"sandbox/report/{md5_hash}?details={report_details}")
| StarcoderdataPython |
199230 | <reponame>frcl/jupytext<filename>tests/test_read_write_functions.py
from io import StringIO
from pathlib import Path
import nbformat
from nbformat.v4.nbbase import new_markdown_cell, new_notebook
import jupytext
from jupytext.compare import compare
def test_simple_hook(tmpdir):
nb_file = str(tmpdir.join("notebook.ipynb"))
md_file = str(tmpdir.join("notebook.md"))
nbformat.write(new_notebook(cells=[new_markdown_cell("Some text")]), nb_file)
nb = jupytext.read(nb_file)
jupytext.write(nb, md_file)
with open(md_file) as fp:
text = fp.read()
assert "Some text" in text.splitlines()
def test_simple_hook_with_explicit_format(tmpdir):
nb_file = str(tmpdir.join("notebook.ipynb"))
py_file = str(tmpdir.join("notebook.py"))
nbformat.write(new_notebook(cells=[new_markdown_cell("Some text")]), nb_file)
nb = jupytext.read(nb_file)
jupytext.write(nb, py_file, fmt="py:percent")
with open(py_file) as fp:
text = fp.read()
assert "# %% [markdown]" in text.splitlines()
assert "# Some text" in text.splitlines()
def test_no_error_on_path_object(tmpdir):
nb_file = Path(str(tmpdir.join("notebook.ipynb")))
md_file = nb_file.with_suffix(".md")
nbformat.write(new_notebook(cells=[new_markdown_cell("Some text")]), str(nb_file))
nb = jupytext.read(nb_file)
jupytext.write(nb, md_file)
def test_read_ipynb_from_stream():
def stream():
return StringIO(
u"""{
"cells": [
{
"cell_type": "code",
"metadata": {},
"source": [
"1 + 1"
]
}
],
"metadata": {},
"nbformat": 4,
"nbformat_minor": 4
}
"""
)
nb = jupytext.read(stream())
nb2 = jupytext.read(stream(), fmt="ipynb")
compare(nb2, nb)
def test_read_py_percent_from_stream():
def stream():
return StringIO(
u"""# %%
1 + 1
"""
)
nb = jupytext.read(stream())
nb2 = jupytext.read(stream(), fmt="py:percent")
compare(nb2, nb)
| StarcoderdataPython |
4820550 | <filename>NU_20-21/4.py<gh_stars>0
#Be sure to upload your work today for your "attendance/participation" grade.
# I will not be grading your work in detail, simply 1 if submitted, 0 if not.
# After you finsh the problems below, please work on Assignment 1.
#I have provided 2 asserts for each already. You should uncomment those
# when you're done to make sure that they pass. Remember that if you
# run your code, with asserts uncommented, and don't see any output,
# that means that the test passed.
# Recall the slice notation list[start:end:step]
#Define a function called crop_list. It will take as input a list named lst of length greater than 4.
# It returns a new list, build form lst, but discarding the first and last two elements.
def crop_list(lst:list)->list:
newlst = lst[2:(len(lst)-2)]
return newlst
assert crop_list([1, 2, 3, 4, 5]) == [3], "cropping 12345"
assert crop_list(["North", "Western", "Computer", "Science", "Departmental", "Affairs"]) == ["Computer", "Science"], "cropping Comp Sci"
assert crop_list([1, 2, 3, 4, 5, 6]) == [3, 4], "cropping 123456"
assert crop_list([1, 2, 3, 4, 5, 6, 7]) == [3, 4, 5], "cropping 123456"
assert crop_list([]) == [], "empty"
#Define a function called backward_hop. It will take as input a list named code.
# It returns a new list, built from the original string, but starting backwards
# counting every second item. For example, backward_hop(['3','t','2','a','1','c'])
# will return ['c','a','t'].
def backward_hop(code:list)-> list:
newlst = code
newlst.reverse()
counter = 1
while counter < len(newlst):
newlst.pop(counter)
counter = counter + 1
return(newlst)
assert backward_hop(['3','t','2','a','1','c']) == ['c','a','t'], "backward hop of cat"
assert backward_hop(['e','i','t','u','a','y','g','t','r','r','e','e','t','w','a','q','w'])==['w','a','t','e','r','g','a','t','e'], "backward hop of watergate"
assert backward_hop([]) ==[], "backwards hop of empty list"
assert backward_hop([1]) ==[1], "backwards hop of one item list"
print("All asserts passed!!!!")
| StarcoderdataPython |
124198 | <filename>libs/tools/json.py<gh_stars>0
from functools import wraps
from flask import jsonify, request
from jsonschema import validate
from jsonschema.exceptions import ValidationError
from werkzeug.exceptions import BadRequest
from importlib import import_module
import logging
def validate_schema(schema_name: str):
def decorator(f):
@wraps(f)
def wrapper(*args, **kw):
try:
schema = import_module('libs.schemas.' + schema_name, package=__name__)
validate(request.json, schema.schema)
except ValidationError as e:
logging.error(f'ValidationError: {e.message}, {request.data}')
return jsonify({
"error_code": 400,
"error_type": "ValidationError",
"error_message": e.message
}), 400
return f(*args, **kw)
return wrapper
return decorator
| StarcoderdataPython |
3376916 | <filename>smsAlert/__init__.py
# -*- coding: utf-8 -*-
__author__ = 'Prashant'
__version__ = '0.1.0'
from .smsAlert import smsAlertMsg | StarcoderdataPython |
194302 | # -*- coding: utf-8 -*-
##########################################################################
# pySAP - Copyright (C) CEA, 2017 - 2018
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
Wavelet transform base module.
"""
# System import
from __future__ import division, print_function, absolute_import
from pprint import pprint
import uuid
import os
import warnings
# Package import
import pysap
from .utils import with_metaclass
from pysap.plotting import plot_transform
try:
import pysparse
except:
warnings.warn("Sparse2d python bindings not found, use binaries.")
pysparse = None
# Third party import
import numpy
class MetaRegister(type):
""" Simple Python metaclass registry pattern.
"""
REGISTRY = {}
def __new__(cls, name, bases, attrs):
""" Allocation.
Parameters
----------
name: str
the name of the class.
bases: tuple
the base classes.
attrs:
the attributes defined for the class.
"""
new_cls = type.__new__(cls, name, bases, attrs)
if name in cls.REGISTRY:
raise ValueError(
"'{0}' name already used in registry.".format(name))
if name not in ("WaveletTransformBase", "ISAPWaveletTransformBase"):
cls.REGISTRY[name] = new_cls
return new_cls
class WaveletTransformBase(with_metaclass(MetaRegister)):
""" Data structure representing a signal wavelet decomposition.
Available transforms are define in 'pysap.transform'.
"""
def __init__(self, nb_scale, verbose=0, **kwargs):
""" Initialize the WaveletTransformBase class.
Parameters
----------
data: ndarray
the input data.
nb_scale: int
the number of scale of the decomposition that includes the
approximation scale.
verbose: int, default 0
control the verbosity level
"""
# Wavelet transform parameters
self.nb_scale = nb_scale
self.name = None
self.bands_names = None
self.nb_band_per_scale = None
self.bands_lengths = None
self.bands_shapes = None
self.isap_transform_id = None
self.flatten_fct = None
self.unflatten_fct = None
self.is_decimated = None
self.scales_lengths = None
self.scales_padds = None
self.use_wrapping = pysparse is None
# Data that can be decalred afterward
self._data = None
self._image_metadata = {}
self._data_shape = None
self._iso_shape = None
self._analysis_data = None
self._analysis_shape = None
self._analysis_header = None
self._analysis_buffer_shape = None
self.verbose = verbose
# Transformation
if not self.use_wrapping:
kwargs["type_of_multiresolution_transform"] = (
self.__isap_transform_id__)
kwargs["number_of_scales"] = self.nb_scale
self.trf = pysparse.MRTransform(**kwargs)
else:
self.trf = None
def __getitem__(self, given):
""" Access the analysis designated scale/band coefficients.
Parameters
----------
given: int, slice or tuple
the scale and band indices.
Returns
-------
coeffs: ndarray or list of ndarray
the analysis coefficients.
"""
# Convert given index to generic scale/band index
if not isinstance(given, tuple):
given = (given, slice(None))
# Check that we have a valid given index
if len(given) != 2:
raise ValueError("Expect a scale/band int or 2-uplet index.")
# Check some data are stored in the structure
if self._analysis_data is None:
raise ValueError("Please specify first the decomposition "
"coefficients array.")
# Handle multi-dim slice object
if isinstance(given[0], slice):
start = given[0].start or 0
stop = given[0].stop or self.nb_scale
step = given[0].step or 1
coeffs = [self.__getitem__((index, given[1]))
for index in range(start, stop, step)]
elif isinstance(given[1], slice):
start = given[1].start or 0
stop = given[1].stop or self.nb_band_per_scale[given[0]]
step = given[1].step or 1
coeffs = [self.band_at(given[0], index)
for index in range(start, stop, step)]
else:
coeffs = [self.band_at(given[0], given[1])]
# Format output
if len(coeffs) == 1:
coeffs = coeffs[0]
return coeffs
def __setitem__(self, given, array):
""" Set the analysis designated scale/band coefficients.
Parameters
----------
given: tuple
the scale and band indices.
array: ndarray
the specific scale/band data as an array.
"""
# Check that we have a valid given index
if len(given) != 2:
raise ValueError("Expect a scale/band int or 2-uplet index.")
# Check given index
if isinstance(given[0], slice) or isinstance(given[1], slice):
raise ValueError("Expect a scale/band int index (no slice).")
# Check some data are stored in the structure
if self._analysis_data is None:
raise ValueError("Please specify first the decomposition "
"coefficients array.")
# Handle multidim slice object
if isinstance(given[0], slice):
start = given[0].start or 0
stop = given[0].stop or self.nb_scale
step = given[0].step or 1
coeffs = [self.__getitem__((index, given[1]))
for index in range(start, stop, step)]
elif isinstance(given[1], slice):
start = given[1].start or 0
stop = given[1].stop or self.nb_band_per_scale[given[0]]
step = given[1].step or 1
coeffs = [self.band_at(given[0], index)
for index in range(start, stop, step)]
else:
coeffs = [self.band_at(given[0], given[1])]
# Format output
if len(coeffs) == 1:
coeffs = coeffs[0]
return coeffs
##########################################################################
# Properties
##########################################################################
def _set_data(self, data):
""" Set the input data array.
Parameters
----------
data: nd-array or pysap.Image
input data/signal.
"""
if self.verbose > 0 and self._data is not None:
print("[info] Replacing existing input data array.")
if not all([e == data.shape[0] for e in data.shape]):
raise ValueError("Expect a square shape data.")
if data.ndim != 2:
raise ValueError("Expect a two-dim data array.")
if self.is_decimated and not (data.shape[0] // 2**(self.nb_scale) > 0):
raise ValueError("Can't decimate the data with the specified "
"number of scales.")
if isinstance(data, pysap.Image):
self._data = data.data
self._image_metadata = data.metadata
else:
self._data = data
self._data_shape = self._data.shape
self._iso_shape = self._data_shape[0]
if self.use_wrapping:
self._set_transformation_parameters()
self._compute_transformation_parameters()
def _get_data(self):
""" Get the input data array.
Returns
-------
data: nd-array
input data/signal.
"""
return self._data
def _set_analysis_data(self, analysis_data):
""" Set the decomposition coefficients array.
Parameters
----------
analysis_data: lsit of nd-array
decomposition coefficients array.
"""
if self.verbose > 0 and self._analysis_data is not None:
print("[info] Replacing existing decomposition coefficients "
"array.")
if len(analysis_data) != sum(self.nb_band_per_scale):
raise ValueError("The wavelet coefficients do not correspond to "
"the wavelet transform parameters.")
self._analysis_data = analysis_data
def _get_analysis_data(self):
""" Get the decomposition coefficients array.
Returns
-------
analysis_data: nd-array
decomposition coefficients array.
"""
return self._analysis_data
def _set_analysis_header(self, analysis_header):
""" Set the decomposition coefficients header.
Parameters
----------
analysis_header: dict
decomposition coefficients array.
"""
if self.verbose > 0 and self._analysis_header is not None:
print("[info] Replacing existing decomposition coefficients "
"header.")
self._analysis_header = analysis_header
def _get_analysis_header(self):
""" Get the decomposition coefficients header.
Returns
-------
analysis_header: dict
decomposition coefficients header.
"""
return self._analysis_header
def _get_info(self):
""" Return the transformation information. This iformation is only
available when using the Python bindings.
"""
if not self.use_wrapping:
self.trf.info()
data = property(_get_data, _set_data)
analysis_data = property(_get_analysis_data, _set_analysis_data)
analysis_header = property(_get_analysis_header, _set_analysis_header)
info = property(_get_info)
##########################################################################
# Public members
##########################################################################
@classmethod
def bands_shapes(cls, bands_lengths, ratio=None):
""" Return the different bands associated shapes given there lengths.
Parameters
----------
bands_lengths: ndarray (<nb_scale>, max(<nb_band_per_scale>, 0))
array holding the length between two bands of the data
vector per scale.
ratio: ndarray, default None
a array containing ratios for eeach scale and each band.
Returns
-------
bands_shapes: list of list of 2-uplet (<nb_scale>, <nb_band_per_scale>)
structure holding the shape of each bands at each scale.
"""
if ratio is None:
ratio = numpy.ones_like(bands_lengths)
bands_shapes = []
for band_number, scale_data in enumerate(bands_lengths):
scale_shapes = []
for scale_number, scale_padd in enumerate(scale_data):
shape = (
int(numpy.sqrt(
scale_padd * ratio[band_number, scale_number])),
int(numpy.sqrt(
scale_padd / ratio[band_number, scale_number])))
scale_shapes.append(shape)
bands_shapes.append(scale_shapes)
return bands_shapes
def show(self):
""" Display the different bands at the different decomposition scales.
"""
plot_transform(self)
def analysis(self, **kwargs):
""" Decompose a real or complex signal using ISAP.
Fill the instance 'analysis_data' and 'analysis_header' parameters.
Parameters
----------
kwargs: dict (optional)
the parameters that will be passed to
'pysap.extensions.mr_tansform'.
"""
# Checks
if self._data is None:
raise ValueError("Please specify first the input data.")
# Update ISAP parameters
kwargs["type_of_multiresolution_transform"] = self.isap_transform_id
kwargs["number_of_scales"] = self.nb_scale
# Analysis
if numpy.iscomplexobj(self._data):
analysis_data_real, self.analysis_header = self._analysis(
self._data.real, **kwargs)
analysis_data_imag, _ = self._analysis(
self._data.imag, **kwargs)
if isinstance(analysis_data_real, numpy.ndarray):
self._analysis_data = (
analysis_data_real + 1.j * analysis_data_imag)
else:
self._analysis_data = [
re + 1.j * ima
for re, ima in zip(analysis_data_real, analysis_data_imag)]
else:
self._analysis_data, self._analysis_header = self._analysis(
self._data, **kwargs)
def synthesis(self):
""" Reconstruct a real or complex signal from the wavelet coefficients
using ISAP.
Returns
-------
data: pysap.Image
the reconstructed data/signal.
"""
# Checks
if self._analysis_data is None:
raise ValueError("Please specify first the decomposition "
"coefficients array.")
if self.use_wrapping and self._analysis_header is None:
raise ValueError("Please specify first the decomposition "
"coefficients header.")
# Message
if self.verbose > 1:
print("[info] Synthesis header:")
pprint(self._analysis_header)
# Reorganize the coefficents with ISAP convention
# TODO: do not backup the list of bands
if self.use_wrapping:
analysis_buffer = numpy.zeros(
self._analysis_buffer_shape, dtype=self.analysis_data[0].dtype)
for scale, nb_bands in enumerate(self.nb_band_per_scale):
for band in range(nb_bands):
self._set_linear_band(scale, band, analysis_buffer,
self.band_at(scale, band))
_saved_analysis_data = self._analysis_data
self._analysis_data = analysis_buffer
self._analysis_data = [self.unflatten_fct(self)]
# Synthesis
if numpy.iscomplexobj(self._analysis_data[0]):
data_real = self._synthesis(
[arr.real for arr in self._analysis_data],
self._analysis_header)
data_imag = self._synthesis(
[arr.imag for arr in self._analysis_data],
self._analysis_header)
data = data_real + 1.j * data_imag
else:
data = self._synthesis(
self._analysis_data, self._analysis_header)
# TODO: remove this code asap
if self.use_wrapping:
self._analysis_data = _saved_analysis_data
return pysap.Image(data=data, metadata=self._image_metadata)
def band_at(self, scale, band):
""" Get the band at a specific scale.
Parameters
----------
scale: int
index of the scale.
band: int
index of the band.
Returns
-------
band_data: nd-arry
the requested band data array.
"""
# Message
if self.verbose > 1:
print("[info] Accessing scale '{0}' and band '{1}'...".format(
scale, band))
# Get the band array
index = numpy.sum(self.nb_band_per_scale[:scale]).astype(int) + band
band_data = self.analysis_data[index]
return band_data
##########################################################################
# Private members
##########################################################################
def _get_linear_band(self, scale, band, analysis_data):
""" Access the desired band data from a 1D linear analysis buffer.
Parameters
----------
scale: int
index of the scale.
band: int
index of the band.
analysis_data: nd-array (N, )
the analysis buffer.
Returns
-------
band_data: nd-arry (M, )
the requested band buffer.
"""
# Compute selected scale/band start/stop indices
start_scale_padd = self.scales_padds[scale]
start_band_padd = (
self.bands_lengths[scale, :band + 1].sum() -
self.bands_lengths[scale, band])
start_padd = start_scale_padd + start_band_padd
stop_padd = start_padd + self.bands_lengths[scale, band]
# Get the band array
band_data = analysis_data[start_padd: stop_padd].reshape(
self.bands_shapes[scale][band])
return band_data
def _set_linear_band(self, scale, band, analysis_data, band_data):
""" Set the desired band data in a 1D linear analysis buffer.
Parameters
----------
scale: int
index of the scale.
band: int
index of the band.
analysis_data: nd-array (N, )
the analysis buffer.
band_data: nd-array (M, M)
the band data to be added in the analysis buffer.
Returns
-------
analysis_data: nd-arry (N, )
the updated analysis buffer.
"""
# Compute selected scale/band start/stop indices
start_scale_padd = self.scales_padds[scale]
start_band_padd = (
self.bands_lengths[scale, :band + 1].sum() -
self.bands_lengths[scale, band])
start_padd = start_scale_padd + start_band_padd
stop_padd = start_padd + self.bands_lengths[scale, band]
# Get the band array
analysis_data[start_padd: stop_padd] = band_data.flatten()
return analysis_data
def _set_transformation_parameters(self):
""" Define the transformation class parameters.
Attributes
----------
name: str
the name of the decomposition.
bands_names: list of str
the name of the different bands.
flatten_fct: callable
a function used to reorganize the ISAP decomposition coefficients,
see 'pysap/extensions/formating.py' module for more details.
unflatten_fct: callable
a function used to reorganize the decomposition coefficients using
ISAP convention, see 'pysap/extensions/formating.py' module for
more details.
is_decimated: bool
True if the decomposition include a decimation of the
band number of coefficients.
nb_band_per_scale: ndarray (<nb_scale>, )
vector of int holding the number of band per scale.
bands_lengths: ndarray (<nb_scale>, max(<nb_band_per_scale>, 0))
array holding the length between two bands of the data
vector per scale.
bands_shapes: list of list of 2-uplet (<nb_scale>, <nb_band_per_scale>)
structure holding the shape of each bands at each scale.
isap_transform_id: int
the label of the ISAP transformation.
"""
raise NotImplementedError("Abstract method should not be declared "
"in derivate classes.")
def _compute_transformation_parameters(self):
""" Compute information in order to split scale/band flatten data.
Attributes
----------
scales_lengths: ndarray (<nb_scale>, )
the length of each band.
scales_padds: ndarray (<nb_scale> + 1, )
the index of the data associated to each scale.
"""
if self.bands_lengths is None:
raise ValueError(
"The transformation parameters have not been set.")
self.scales_lengths = self.bands_lengths.sum(axis=1)
self.scales_padds = numpy.zeros((self.nb_scale + 1, ), dtype=int)
self.scales_padds[1:] = self.scales_lengths.cumsum()
def _analysis(self, data, **kwargs):
""" Decompose a real signal using ISAP.
Parameters
----------
data: nd-array
a real array to be decomposed.
kwargs: dict (optional)
the parameters that will be passed to
'pysap.extensions.mr_tansform'.
Returns
-------
analysis_data: nd_array
the decomposition coefficients.
analysis_header: dict
the decomposition associated information.
"""
# Use subprocess to execute binaries
if self.use_wrapping:
kwargs["verbose"] = self.verbose > 0
with pysap.TempDir(isap=True) as tmpdir:
in_image = os.path.join(tmpdir, "in.fits")
out_mr_file = os.path.join(tmpdir, "cube.mr")
pysap.io.save(data, in_image)
pysap.extensions.mr_transform(in_image, out_mr_file, **kwargs)
image = pysap.io.load(out_mr_file)
analysis_data = image.data
analysis_header = image.metadata
# Reorganize the generated coefficents
self._analysis_shape = analysis_data.shape
analysis_buffer = self.flatten_fct(analysis_data, self)
self._analysis_buffer_shape = analysis_buffer.shape
if not isinstance(self.nb_band_per_scale, list):
self.nb_band_per_scale = (
self.nb_band_per_scale.squeeze().tolist())
analysis_data = []
for scale, nb_bands in enumerate(self.nb_band_per_scale):
for band in range(nb_bands):
analysis_data.append(self._get_linear_band(
scale, band, analysis_buffer))
# Use Python bindings
else:
analysis_data, self.nb_band_per_scale = self.trf.transform(
data.astype(numpy.double), save=False)
analysis_header = None
return analysis_data, analysis_header
def _synthesis(self, analysis_data, analysis_header):
""" Reconstruct a real signal from the wavelet coefficients using ISAP.
Parameters
----------
analysis_data: list of nd-array
the wavelet coefficients array.
analysis_header: dict
the wavelet decomposition parameters.
Returns
-------
data: nd-array
the reconstructed data array.
"""
# Use subprocess to execute binaries
if self.use_wrapping:
cube = pysap.Image(data=analysis_data[0], metadata=analysis_header)
with pysap.TempDir(isap=True) as tmpdir:
in_mr_file = os.path.join(tmpdir, "cube.mr")
out_image = os.path.join(tmpdir, "out.fits")
pysap.io.save(cube, in_mr_file)
pysap.extensions.mr_recons(
in_mr_file, out_image, verbose=(self.verbose > 0))
data = pysap.io.load(out_image).data
# Use Python bindings
else:
data = self.trf.reconstruct(analysis_data)
return data
| StarcoderdataPython |
1773459 | <filename>mayan/apps/events/tests/test_views.py
from django.contrib.contenttypes.models import ContentType
from mayan.apps.acls.classes import ModelPermission
from mayan.apps.documents.tests.base import GenericDocumentViewTestCase
from mayan.apps.messaging.events import event_message_created
from mayan.apps.messaging.models import Message
from mayan.apps.testing.tests.base import GenericViewTestCase
from mayan.apps.storage.events import event_download_file_created
from mayan.apps.storage.models import DownloadFile
from ..events import event_events_exported
from ..models import Notification
from ..permissions import permission_events_export, permission_events_view
from .mixins import (
EventsExportViewTestMixin, EventTypeTestMixin, EventViewTestMixin,
NotificationTestMixin, NotificationViewTestMixin, UserEventViewsTestMixin
)
class EventsViewTestCase(
EventTypeTestMixin, EventViewTestMixin, GenericDocumentViewTestCase
):
auto_upload_test_document = False
def setUp(self):
super().setUp()
self._create_test_event_type()
self._create_test_user()
self.test_object = self.test_document_type
content_type = ContentType.objects.get_for_model(model=self.test_object)
self.view_arguments = {
'app_label': content_type.app_label,
'model_name': content_type.model,
'object_id': self.test_object.pk
}
def create_test_event(self, **kwargs):
self.test_action = self.test_event_type.commit(**kwargs)
self.test_actions.append(self.test_action)
def test_event_list_view_no_permission(self):
self.create_test_event(target=self.test_object)
response = self._request_test_events_list_view()
self.assertNotContains(
response=response, status_code=200, text=str(self.test_event_type)
)
def test_event_list_view_with_access(self):
self.create_test_event(target=self.test_object)
self.grant_access(
obj=self.test_object, permission=permission_events_view
)
response = self._request_test_events_list_view()
self.assertContains(
response=response, status_code=200, text=str(self.test_event_type)
)
def test_events_for_object_view_no_permission(self):
self.create_test_event(
actor=self.test_user, action_object=self.test_object
)
response = self._request_events_for_object_view()
self.assertNotContains(
response=response, text=str(self.test_event_type), status_code=200
)
def test_events_for_object_view_with_access(self):
self.create_test_event(
actor=self.test_user, action_object=self.test_object
)
self.grant_access(
obj=self.test_object, permission=permission_events_view
)
response = self._request_events_for_object_view()
self.assertContains(
response=response, text=str(self.test_event_type), status_code=200
)
def test_events_by_verb_view_no_permission(self):
self.create_test_event(
actor=self.test_user, action_object=self.test_object
)
response = self._request_test_events_by_verb_view()
self.assertContains(
count=3,
response=response, text=str(self.test_event_type), status_code=200
)
def test_events_by_verb_view_with_access(self):
self.create_test_event(
actor=self.test_user, action_object=self.test_object
)
self.grant_access(
obj=self.test_object, permission=permission_events_view
)
response = self._request_test_events_by_verb_view()
self.assertContains(
count=4,
response=response, text=str(self.test_event_type), status_code=200
)
def test_current_user_events_view_no_permission(self):
self.create_test_event(
actor=self._test_case_user, action_object=self.test_object
)
response = self._request_test_current_user_events_view()
self.assertNotContains(
response=response, text=str(self.test_event_type), status_code=200
)
def test_current_user_events_view_with_access(self):
self.create_test_event(
actor=self._test_case_user, action_object=self.test_object
)
self.grant_access(
obj=self.test_object, permission=permission_events_view
)
response = self._request_test_current_user_events_view()
self.assertContains(
response=response, text=str(self.test_event_type), status_code=200
)
class EventExportViewTestCase(
EventTypeTestMixin, EventsExportViewTestMixin, GenericDocumentViewTestCase
):
auto_upload_test_document = False
def setUp(self):
super().setUp()
self._create_test_event_type()
self._create_test_user()
self.test_object = self.test_document_type
content_type = ContentType.objects.get_for_model(model=self.test_object)
self.view_arguments = {
'app_label': content_type.app_label,
'model_name': content_type.model,
'object_id': self.test_object.pk
}
ModelPermission.register(
model=self.test_object._meta.model, permissions=(
permission_events_export,
)
)
self._clear_events()
def create_test_event(self, **kwargs):
self.test_action = self.test_event_type.commit(**kwargs)
self.test_actions.append(self.test_action)
def test_events_list_export_view_no_permission(self):
self.create_test_event(target=self.test_object)
response = self._request_test_events_list_export_view()
self.assertEqual(response.status_code, 302)
test_download_file = DownloadFile.objects.first()
test_message = Message.objects.first()
events = self._get_test_events()
# Test object creation + 3 for the test
self.assertEqual(events.count(), 4)
self.assertEqual(events[1].action_object, None)
self.assertEqual(events[1].actor, self._test_case_user)
self.assertEqual(events[1].target, test_download_file)
self.assertEqual(events[1].verb, event_download_file_created.id)
self.assertEqual(events[2].action_object, None)
self.assertEqual(events[2].actor, self._test_case_user)
self.assertEqual(events[2].target, test_download_file)
self.assertEqual(events[2].verb, event_events_exported.id)
self.assertEqual(events[3].action_object, None)
self.assertEqual(events[3].actor, test_message)
self.assertEqual(events[3].target, test_message)
self.assertEqual(events[3].verb, event_message_created.id)
with test_download_file.open() as file_object:
self.assertTrue(
str(self.test_object).encode() not in file_object.read()
)
def test_events_list_export_view_with_access(self):
self.create_test_event(target=self.test_object)
self.grant_access(
obj=self.test_object, permission=permission_events_export
)
response = self._request_test_events_list_export_view()
self.assertEqual(response.status_code, 302)
test_download_file = DownloadFile.objects.first()
test_message = Message.objects.first()
events = self._get_test_events()
# Test object creation + access grant + 3 for the test
self.assertEqual(events.count(), 5)
self.assertEqual(events[2].action_object, None)
self.assertEqual(events[2].actor, self._test_case_user)
self.assertEqual(events[2].target, test_download_file)
self.assertEqual(events[2].verb, event_download_file_created.id)
self.assertEqual(events[3].action_object, None)
self.assertEqual(events[3].actor, self._test_case_user)
self.assertEqual(events[3].target, test_download_file)
self.assertEqual(events[3].verb, event_events_exported.id)
self.assertEqual(events[4].action_object, None)
self.assertEqual(events[4].actor, test_message)
self.assertEqual(events[4].target, test_message)
self.assertEqual(events[4].verb, event_message_created.id)
with test_download_file.open() as file_object:
self.assertTrue(
str(self.test_object).encode() in file_object.read()
)
def test_events_for_object_export_view_no_permission(self):
self.create_test_event(
actor=self.test_user, action_object=self.test_object
)
response = self._request_events_for_object_export_view()
self.assertEqual(response.status_code, 302)
test_download_file = DownloadFile.objects.first()
test_message = Message.objects.first()
events = self._get_test_events()
# Test object creation + 3 for the test
self.assertEqual(events.count(), 4)
self.assertEqual(events[1].action_object, None)
self.assertEqual(events[1].actor, self._test_case_user)
self.assertEqual(events[1].target, test_download_file)
self.assertEqual(events[1].verb, event_download_file_created.id)
self.assertEqual(events[2].action_object, None)
self.assertEqual(events[2].actor, self._test_case_user)
self.assertEqual(events[2].target, test_download_file)
self.assertEqual(events[2].verb, event_events_exported.id)
self.assertEqual(events[3].action_object, None)
self.assertEqual(events[3].actor, test_message)
self.assertEqual(events[3].target, test_message)
self.assertEqual(events[3].verb, event_message_created.id)
with test_download_file.open() as file_object:
self.assertTrue(
str(self.test_object).encode() not in file_object.read()
)
def test_events_for_object_export_view_with_access(self):
self.create_test_event(
actor=self.test_user, action_object=self.test_object
)
self.grant_access(
obj=self.test_object, permission=permission_events_export
)
response = self._request_events_for_object_export_view()
self.assertEqual(response.status_code, 302)
test_download_file = DownloadFile.objects.first()
test_message = Message.objects.first()
events = self._get_test_events()
# Test object creation + access grant + 3 for the test
self.assertEqual(events.count(), 5)
self.assertEqual(events[2].action_object, None)
self.assertEqual(events[2].actor, self._test_case_user)
self.assertEqual(events[2].target, test_download_file)
self.assertEqual(events[2].verb, event_download_file_created.id)
self.assertEqual(events[3].action_object, None)
self.assertEqual(events[3].actor, self._test_case_user)
self.assertEqual(events[3].target, test_download_file)
self.assertEqual(events[3].verb, event_events_exported.id)
self.assertEqual(events[4].action_object, None)
self.assertEqual(events[4].actor, test_message)
self.assertEqual(events[4].target, test_message)
self.assertEqual(events[4].verb, event_message_created.id)
with test_download_file.open() as file_object:
self.assertTrue(
str(self.test_object).encode() in file_object.read()
)
def test_events_by_verb_export_view_no_permission(self):
self.create_test_event(
actor=self.test_user, action_object=self.test_object
)
response = self._request_test_events_by_verb_export_view()
self.assertEqual(response.status_code, 302)
test_download_file = DownloadFile.objects.first()
test_message = Message.objects.first()
events = self._get_test_events()
# Test object creation + 3 for the test
self.assertEqual(events.count(), 4)
self.assertEqual(events[1].action_object, None)
self.assertEqual(events[1].actor, self._test_case_user)
self.assertEqual(events[1].target, test_download_file)
self.assertEqual(events[1].verb, event_download_file_created.id)
self.assertEqual(events[2].action_object, None)
self.assertEqual(events[2].actor, self._test_case_user)
self.assertEqual(events[2].target, test_download_file)
self.assertEqual(events[2].verb, event_events_exported.id)
self.assertEqual(events[3].action_object, None)
self.assertEqual(events[3].actor, test_message)
self.assertEqual(events[3].target, test_message)
self.assertEqual(events[3].verb, event_message_created.id)
with test_download_file.open() as file_object:
self.assertTrue(
str(self.test_object).encode() not in file_object.read()
)
def test_events_by_verb_view_export_with_access(self):
self.create_test_event(
actor=self.test_user, action_object=self.test_object
)
self.grant_access(
obj=self.test_object, permission=permission_events_export
)
response = self._request_test_events_by_verb_export_view()
self.assertEqual(response.status_code, 302)
test_download_file = DownloadFile.objects.first()
test_message = Message.objects.first()
events = self._get_test_events()
# Test object creation + access grant + 3 for the test
self.assertEqual(events.count(), 5)
self.assertEqual(events[2].action_object, None)
self.assertEqual(events[2].actor, self._test_case_user)
self.assertEqual(events[2].target, test_download_file)
self.assertEqual(events[2].verb, event_download_file_created.id)
self.assertEqual(events[3].action_object, None)
self.assertEqual(events[3].actor, self._test_case_user)
self.assertEqual(events[3].target, test_download_file)
self.assertEqual(events[3].verb, event_events_exported.id)
self.assertEqual(events[4].action_object, None)
self.assertEqual(events[4].actor, test_message)
self.assertEqual(events[4].target, test_message)
self.assertEqual(events[4].verb, event_message_created.id)
with test_download_file.open() as file_object:
self.assertTrue(
str(self.test_object).encode() in file_object.read()
)
def test_current_user_events_export_view_no_permission(self):
self.create_test_event(
actor=self._test_case_user, action_object=self.test_object
)
response = self._request_test_current_user_events_export_view()
self.assertNotContains(
response=response, text=str(self.test_event_type), status_code=302
)
test_download_file = DownloadFile.objects.first()
test_message = Message.objects.first()
events = self._get_test_events()
# Test object creation + 3 for the test
self.assertEqual(events.count(), 4)
self.assertEqual(events[1].action_object, None)
self.assertEqual(events[1].actor, self._test_case_user)
self.assertEqual(events[1].target, test_download_file)
self.assertEqual(events[1].verb, event_download_file_created.id)
self.assertEqual(events[2].action_object, None)
self.assertEqual(events[2].actor, self._test_case_user)
self.assertEqual(events[2].target, test_download_file)
self.assertEqual(events[2].verb, event_events_exported.id)
self.assertEqual(events[3].action_object, None)
self.assertEqual(events[3].actor, test_message)
self.assertEqual(events[3].target, test_message)
self.assertEqual(events[3].verb, event_message_created.id)
with test_download_file.open() as file_object:
self.assertTrue(
str(self.test_object).encode() not in file_object.read()
)
def test_current_user_events_export_view_with_access(self):
self.create_test_event(
actor=self._test_case_user, action_object=self.test_object
)
self.grant_access(
obj=self.test_object, permission=permission_events_export
)
response = self._request_test_current_user_events_export_view()
self.assertEqual(response.status_code, 302)
test_download_file = DownloadFile.objects.first()
test_message = Message.objects.first()
events = self._get_test_events()
# Test object creation + access grant + 3 for the test
self.assertEqual(events.count(), 5)
self.assertEqual(events[2].action_object, None)
self.assertEqual(events[2].actor, self._test_case_user)
self.assertEqual(events[2].target, test_download_file)
self.assertEqual(events[2].verb, event_download_file_created.id)
self.assertEqual(events[3].action_object, None)
self.assertEqual(events[3].actor, self._test_case_user)
self.assertEqual(events[3].target, test_download_file)
self.assertEqual(events[3].verb, event_events_exported.id)
self.assertEqual(events[4].action_object, None)
self.assertEqual(events[4].actor, test_message)
self.assertEqual(events[4].target, test_message)
self.assertEqual(events[4].verb, event_message_created.id)
with test_download_file.open() as file_object:
self.assertTrue(
str(self.test_object).encode() in file_object.read()
)
class NotificationViewTestCase(
NotificationTestMixin, NotificationViewTestMixin,
GenericDocumentViewTestCase
):
auto_upload_test_document = False
def setUp(self):
super().setUp()
self._create_test_event_type()
self._create_test_user()
self.test_event_type.commit(
actor=self.test_user, action_object=self.test_document_type
)
def test_notification_list_view(self):
response = self._request_test_notification_list_view()
self.assertEqual(response.status_code, 200)
def test_notification_mark_read_all_view(self):
self._create_test_notification()
notification_count = Notification.objects.get_unread().count()
response = self._request_test_notification_mark_read_all_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(
Notification.objects.get_unread().count(),
notification_count - 1
)
def test_notification_mark_read_view(self):
self._create_test_notification()
notification_count = Notification.objects.get_unread().count()
response = self._request_test_notification_mark_read()
self.assertEqual(response.status_code, 302)
self.assertEqual(
Notification.objects.get_unread().count(),
notification_count - 1
)
class UserEventViewsTestCase(UserEventViewsTestMixin, GenericViewTestCase):
def test_user_event_type_subscription_list_view(self):
response = self._request_test_user_event_type_subscription_list_view()
self.assertEqual(response.status_code, 200)
| StarcoderdataPython |
81988 | <reponame>simone-pignotti/DnaChisel
from .NoSolutionError import NoSolutionError
from .DnaOptimizationProblem import DnaOptimizationProblem
from .CircularDnaOptimizationProblem import CircularDnaOptimizationProblem
__all__ = [
"NoSolutionError",
"DnaOptimizationProblem",
"CircularDnaOptimizationProblem"
]
| StarcoderdataPython |
1760780 | <gh_stars>1-10
import os
import subprocess as sp
from shlex import split
from pathlib import Path
__version__ = '0.3'
GITHUB_EVENT_NAME = os.environ['GITHUB_EVENT_NAME']
# Set repository
CURRENT_REPOSITORY = os.environ.get('GITHUB_REPOSITORY', '')
# TODO: How about PRs from forks?
TARGET_REPO = os.environ.get('INPUT_TARGET_REPOSITORY', '')
TARGET_REPOSITORY = TARGET_REPO if TARGET_REPO != '' else CURRENT_REPOSITORY
PULL_REQUEST_REPOSITORY = os.environ.get('INPUT_PULL_REQUEST_REPOSITORY', TARGET_REPOSITORY)
REPOSITORY = PULL_REQUEST_REPOSITORY if GITHUB_EVENT_NAME == 'pull_request' else TARGET_REPOSITORY
# Set branches
GITHUB_REF = os.environ['GITHUB_REF']
GITHUB_HEAD_REF = os.environ['GITHUB_HEAD_REF']
GITHUB_BASE_REF = os.environ['GITHUB_BASE_REF']
CURRENT_BRANCH = GITHUB_HEAD_REF if GITHUB_HEAD_REF != '' else GITHUB_REF.rsplit('/', 1)[-1]
TARGET_BRANCH = os.environ.get('INPUT_TARGET_BRANCH', '')
PULL_REQUEST_BRANCH = os.environ.get('INPUT_PULL_REQUEST_BRANCH', GITHUB_BASE_REF)
BRANCH = PULL_REQUEST_BRANCH if GITHUB_EVENT_NAME == 'pull_request' else TARGET_BRANCH
# Branch vars (eg, BRANCH, TARGET_BRANCH) can be empty if no cfg branch
CAN_COMMIT = True if TARGET_BRANCH != '' else False
GITHUB_ACTOR = os.environ['GITHUB_ACTOR']
GITHUB_REPOSITORY_OWNER = os.environ['GITHUB_REPOSITORY_OWNER']
GITHUB_TOKEN = os.environ['INPUT_GITHUB_TOKEN']
# default values
LC_EXTENSIONS = [
".c", ".c++", ".cc", ".cpp", ".cxx",
".h", ".h++", ".hh", ".hpp", ".hxx",
".j", ".jav", ".java",
]
UC_EXTENSIONS = [ext.upper() for ext in LC_EXTENSIONS]
# command related inputs
DO_COMMIT = os.environ.get('INPUT_COMMIT_REPORT', False)
FILE_EXTENSIONS = os.environ.get('INPUT_FILE_EXTENSIONS', "").split()
SOURCE_DIRS = os.environ.get('INPUT_SOURCE_DIRS', "").split()
if FILE_EXTENSIONS == []:
FILE_EXTENSIONS = LC_EXTENSIONS + UC_EXTENSIONS
if SOURCE_DIRS == []:
SOURCE_DIRS = ["."]
LANGUAGE = os.environ.get('INPUT_LANGUAGE', "")
OUTPUT_DIR = os.environ.get('INPUT_OUTPUT_DIR', 'metrics')
REPORT_TYPE = os.environ.get('INPUT_REPORT_TYPE', 'html')
command = ""
def prepare_command():
global command
command = command + "cccc"
command = command + " --outdir=" + OUTPUT_DIR
if LANGUAGE != "":
command = command + " --lang=" + LANGUAGE
source_dirs = SOURCE_DIRS
file_exts = FILE_EXTENSIONS
src_files = []
print(f'File extensions: {file_exts}')
print(f'Source directories: {source_dirs}')
print(f'Source language: {LANGUAGE}')
for srcdir in source_dirs:
files = [f for ext in file_exts
for f in Path(srcdir).glob(f'**/*{ext}')]
src_files += files
print(f'Source files: {src_files}')
print(f'Output directory: {OUTPUT_DIR}')
file_arg = ""
for fname in src_files:
file_arg = file_arg + " " + str(fname)
command = command + "{}".format(file_arg)
print(f'Full command line string: {command}')
print(f'Full command line list: {split(command)}')
print(f'Can we commit the report: {CAN_COMMIT}')
def run_cccc():
sp.check_call(split(command))
def commit_changes():
"""Commits changes.
"""
set_email = 'git config --local user.email "cccc-action@main"'
set_user = 'git config --local user.name "cccc-action"'
sp.check_call(split(set_email))
sp.check_call(split(set_user))
print(f'Base ref var: {GITHUB_BASE_REF}')
print(f'PR branch var: {BRANCH}')
print(f'Current branch: {CURRENT_BRANCH}')
print(f'Target branch: {TARGET_BRANCH}')
print(f'Target repository: {TARGET_REPOSITORY}')
git_checkout = f'git checkout {TARGET_BRANCH}'
git_add = f'git add {OUTPUT_DIR}'
git_commit = 'git commit -m "cccc report added"'
if not DO_COMMIT:
git_commit = 'git commit --dry-run -m "commit report, dry-run only"'
print(f'Committing {OUTPUT_DIR}')
sp.check_call(split(git_checkout))
sp.check_call(split(git_add))
sp.check_call(split(git_commit))
def push_changes():
"""Pushes commit.
"""
set_url = f'git remote set-url origin https://x-access-token:{GITHUB_TOKEN}@github.com/{TARGET_REPOSITORY}'
git_push = f'git push origin {TARGET_BRANCH}'
sp.check_call(split(set_url))
sp.check_call(split(git_push))
def main():
prepare_command()
run_cccc()
if CAN_COMMIT:
commit_changes()
push_changes()
if __name__ == '__main__':
main()
| StarcoderdataPython |
60981 | import json
import psycopg2
import os
from psycopg2._psycopg import IntegrityError
from psycopg2.errorcodes import UNIQUE_VIOLATION
from logging import getLogger
def create_db_connection():
return psycopg2.connect(os.environ['DB_CONNECTION_STRING'])
class RunInTransaction:
def __init__(self, connection):
self.__connection = connection
def __enter__(self):
return self.__connection.cursor()
def __exit__(self, type, value, traceback):
self.__connection.commit()
def write_to_database(event, db_connection):
try:
with RunInTransaction(db_connection) as cursor:
cursor.execute("""
INSERT INTO events
(event_id, event_type, timestamp, details)
VALUES
(%s, %s, %s, %s);
""", [
event.event_id,
event.event_type,
event.timestamp,
json.dumps(event.details)
])
except IntegrityError as integrityError:
if integrityError.pgcode == UNIQUE_VIOLATION:
# The event has already been recorded - don't throw an exception (no need to retry this message), just
# log a notification and move on.
getLogger('event-recorder').warning('Failed to store message. The Event ID {0} already exists in the database'.format(event.event_id))
else:
raise integrityError
| StarcoderdataPython |
3312885 | #!/usr/bin/env python
import unittest
import boostertest
class TestForestDelete(boostertest.BoosterTestCase):
""" Test the forest-delete action """
def setUp(self):
""" Set the action and other commonly used fixture data """
self.params = {}
self.params['action'] = "forest-delete"
self.params['forest-name'] = "pinecone-a"
self.params['delete-data'] = "true"
# collect forest names for later teardown
self.teardown_forests = []
def tearDown(self):
""" Remove items from server created during tests """
params = {}
params['action'] = "forest-delete"
params['delete-data'] = "true"
for forest in self.teardown_forests:
params['forest-name'] = forest
response, body = self.booster.request(params)
self.assertTrue(response.status in (404, 200))
def test_basic_forest_deletion_results_in_200(self):
""" A successful forest deletion should result in 200 """
# create the forest
params = {}
params['action'] = "forest-create"
params['forest-name'] = "firs"
params['host-name'] = "localhost"
params['data-directory'] = ""
self.teardown_forests.append(params['forest-name'])
response, body = self.booster.request(params)
err = response.get("x-booster-error", "none")
self.assertEqual(response.status, 201)
self.assertEqual(err, "none")
# delete and assert
params = self.params
params['forest-name'] = "firs"
response, body = self.booster.request(params)
err = response.get("x-booster-error", "none")
self.assertEqual(response.status, 200)
self.assertEqual(err, "none")
def test_delete_nonexistent_forest_results_in_404(self):
""" Attempting to delete a non-existent forest should return 404 """
params = self.params
params['forest-name'] = "no-such-forest-exists-here"
response, body = self.booster.request(params)
err = response.get("x-booster-error", "")
self.assertEqual(response.status, 404)
self.assertTrue(err.find("does not exist") != 1)
def test_empty_forest_name_results_in_404(self):
""" A forest-delete with empty forest-name value should result in 404 """
params = self.params
params['forest-name'] = ""
response, body = self.booster.request(params)
err = response.get("x-booster-error", "none")
self.assertEqual(response.status, 404)
self.assertTrue(err.find("Forest '' does not exist") != 1)
def test_delete_forest_with_no_forest_name_results_in_400(self):
""" A forest-delete with missing forest-name should result in 400 """
params = self.params
del params['forest-name']
response, body = self.booster.request(self.params)
err = response.get("x-booster-error", "")
self.assertEqual(response.status, 400)
self.assertTrue(err.find("valid set of arguments was not provided") != 1)
if __name__=="__main__":
unittest.main()
| StarcoderdataPython |
3326454 | <filename>dns/qcloud.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import time
import urllib
import base64
import hashlib
import hmac
import json
if sys.version_info < (3,0):
import urllib2
import urllib
else:
import urllib.request as urllib2
import urllib.parse as urllib
root_path = os.path.sep.join([os.path.split(os.path.realpath(__file__))[0], '..'])
sys.path.append(root_path)
from lib import Logger
class Qcloud:
__host = 'cns.api.qcloud.com'
__path = '/v2/index.php'
def __init__(self, secret_id, secret_key):
self.secret_id = secret_id
self.secret_key = secret_key
# @example qcloud.add_domain_record("example.com", "_acme-challenge", "123456", "TXT")
def add_domain_record(self, domain, rr, value, _type = 'TXT'):
params = {
'Action' : 'RecordCreate',
'domain' : domain,
'subDomain' : rr,
'recordType' : _type,
'recordLine' : '默认',
'value' : value
}
self.__request(params)
# @example qcloud.delete_domain_record("example.com", "_acme-challenge", "123456")
def delete_domain_record(self, domain, rr, value, _type = 'TXT'):
result = self.get_domain_records(domain, rr, _type)
result = json.loads(result)
for record in result['data']['records']:
self.delete_domain_record_by_id(domain, record['id'])
def delete_domain_record_by_id(self, domain, _id):
params = {
'Action' : 'RecordDelete',
'domain' : domain,
'recordId' : _id
}
self.__request(params)
def get_domain_records(self, domain, rr, _type = 'TXT'):
params = {
'Action' : 'RecordList',
'domain' : domain,
'subDomain' : rr,
'recordType' : _type
}
return self.__request(params)
def to_string(self):
return 'qcloud[secret_id=%s, secret_key=%s]' % (self.secret_id, self.secret_key)
def __request(self, params):
url = self.__compose_url(params)
Logger.info('Request URL: ' + url)
request = urllib2.Request(url)
try:
f = urllib2.urlopen(request, timeout=45)
response = f.read().decode('utf-8')
Logger.info(response)
return response
except urllib2.HTTPError as e:
Logger.error('aliyun#__request raise urllib2.HTTPError: ' + str(e))
raise SystemExit(e)
def __compose_url(self, params):
common_params = {
'SecretId' : self.secret_id,
'SignatureMethod' : 'HmacSHA1',
'Nonce' : int(round(time.time() * 1000)),
'Timestamp' : int(time.time())
}
final_params = common_params.copy()
final_params.update(params)
final_params['Signature'] = self.__compute_signature(final_params)
Logger.info('Signature ' + str(final_params['Signature']))
url = 'https://%s%s?%s' % (self.__host, self.__path, urllib.urlencode(final_params))
return url
def __compute_signature(self, params):
sorted_params = sorted(params.items(), key=lambda params: params[0])
query_string = ''
for (k, v) in sorted_params:
query_string += '&' + self.__percent_encode(k) + '=' + str(v)
string_to_sign = 'GET' + self.__host + self.__path + '?' + query_string[1:]
try:
if sys.version_info < (3,0):
digest = hmac.new(str(self.secret_key), str(string_to_sign), hashlib.sha1).digest()
else:
digest = hmac.new(self.secret_key.encode(encoding="utf-8"), string_to_sign.encode(encoding="utf-8"), hashlib.sha1).digest()
except Exception as e:
Logger.error(e)
if sys.version_info < (3,1):
signature = base64.encodestring(digest).strip()
else:
signature = base64.encodebytes(digest).strip()
return signature
def __percent_encode(self, string):
return string.replace('_', '.')
if __name__ == '__main__':
Logger.info('开始调用腾讯云 DNS API')
Logger.info(' '.join(sys.argv))
_, action, certbot_domain, acme_challenge, certbot_validation, secret_id, secret_key = sys.argv
qcloud = Qcloud(secret_id, secret_key)
if 'add' == action:
qcloud.add_domain_record(certbot_domain, acme_challenge, certbot_validation)
elif 'delete' == action:
qcloud.delete_domain_record(certbot_domain, acme_challenge, certbot_validation)
Logger.info('结束调用腾讯云 DNS API') | StarcoderdataPython |
186181 | <filename>examples/vision/utils.py
import os
import torch
import time
import pickle
import logging
import lmdb
from contextlib import contextmanager
from io import StringIO
from constants import _STALE_GRAD_SORT_, _ZEROTH_ORDER_SORT_, _FRESH_GRAD_SORT_, _MNIST_
import torch.utils.data as data
from qmcorder.sort.utils import compute_avg_grad_error
def build_task_name(args):
task_name = 'MODEL-' + args.model + \
'_DATA-' + args.dataset + \
'_SFTYPE-' + args.shuffle_type + \
'_SEED-' + str(args.seed)
if args.use_qmc_da:
task_name = 'QMCDA' + task_name
if args.shuffle_type == 'ZO':
task_name = task_name + '_ZOBSZ-' + str(args.zo_batch_size)
if args.shuffle_type == 'fresh':
task_name = task_name + '_proj-' + str(args.zo_batch_size)
if args.shuffle_type == 'greedy' and args.use_random_proj:
task_name = task_name + '_proj-' + str(args.proj_target)
return task_name
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def train(args,
loader,
model,
criterion,
optimizer,
epoch,
tb_logger,
timer=None,
sorter=None):
losses = AverageMeter()
top1 = AverageMeter()
model.train()
train_batches = list(enumerate(loader))
if sorter is not None:
with timer("sorting", epoch=epoch):
if args.shuffle_type == _STALE_GRAD_SORT_:
orders = sorter.sort(epoch)
elif args.shuffle_type == _FRESH_GRAD_SORT_:
orders = sorter.sort(epoch=epoch,
model=model,
train_batches=train_batches,
optimizer=optimizer,
oracle_type='cv')
elif args.shuffle_type == _ZEROTH_ORDER_SORT_:
orders = sorter.sort(epoch=epoch,
model=model,
train_batches=train_batches,
oracle_type='cv')
else:
raise NotImplementedError
else:
orders = {i:0 for i in range(len(train_batches))}
if args.log_metric:
compute_avg_grad_error(args,
model,
train_batches,
optimizer,
epoch,
tb_logger,
oracle_type='cv',
orders=orders)
logging.warning(f"Logging the average gradient error. \
This is only for monitoring and will slow down training, \
please remove --log_metric for full-speed training.")
for i in orders.keys():
_, batch = train_batches[i]
with timer("forward pass", epoch=epoch):
loss, prec1, cur_batch_size = model(batch)
with timer("backward pass", epoch=epoch):
optimizer.zero_grad()
loss.backward()
if sorter is not None and args.shuffle_type == _STALE_GRAD_SORT_:
with timer("sorting", epoch=epoch):
sorter.update_stale_grad(optimizer=optimizer,
batch_idx=i,
epoch=epoch)
logging.info(f"Storing the staled gradient used in StaleGradGreedySort method.")
with timer("backward pass", epoch=epoch):
optimizer.step()
loss = loss.float()
# measure accuracy and record loss
losses.update(loss.item(), cur_batch_size)
top1.update(prec1.item(), cur_batch_size)
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch, i, len(loader), loss=losses, top1=top1))
if args.use_tensorboard:
tb_logger.add_scalar('train/accuracy', top1.avg, epoch)
tb_logger.add_scalar('train/loss', losses.avg, epoch)
total_time = timer.totals["forward pass"] + timer.totals["backward pass"]
if sorter is not None:
total_time += timer.totals["sorting"]
tb_logger.add_scalar('train_time/accuracy', top1.avg, total_time)
tb_logger.add_scalar('train_time/loss', losses.avg, total_time)
return
def validate(args, loader, model, criterion, epoch, tb_logger):
"""
Run evaluation
"""
losses = AverageMeter()
top1 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
for i, batch in enumerate(loader):
loss, prec1, cur_batch_size = model(batch)
loss = loss.float()
# measure accuracy and record loss
losses.update(loss.item(), cur_batch_size)
top1.update(prec1.item(), cur_batch_size)
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
i, len(loader), loss=losses,
top1=top1))
if args.use_tensorboard:
tb_logger.add_scalar('test/accuracy', top1.avg, epoch)
tb_logger.add_scalar('test/loss', losses.avg, epoch)
print(' * Prec@1 {top1.avg:.3f}'.format(top1=top1))
return
class Timer:
"""
Timer for PyTorch code
Comes in the form of a contextmanager:
Example:
>>> timer = Timer()
... for i in range(10):
... with timer("expensive operation"):
... x = torch.randn(100)
... print(timer.summary())
"""
def __init__(self, verbosity_level=1, skip_first=True, use_cuda=True):
self.verbosity_level = verbosity_level
#self.log_fn = log_fn if log_fn is not None else self._default_log_fn
self.skip_first = skip_first
self.cuda_available = torch.cuda.is_available() and use_cuda
self.reset()
def reset(self):
"""Reset the timer"""
self.totals = {} # Total time per label
self.first_time = {} # First occurrence of a label (start time)
self.last_time = {} # Last occurence of a label (end time)
self.call_counts = {} # Number of times a label occurred
@contextmanager
def __call__(self, label, epoch=-1.0, verbosity=1):
# Don't measure this if the verbosity level is too high
if verbosity > self.verbosity_level:
yield
return
# Measure the time
self._cuda_sync()
start = time.time()
yield
self._cuda_sync()
end = time.time()
# Update first and last occurrence of this label
if label not in self.first_time:
self.first_time[label] = start
self.last_time[label] = end
# Update the totals and call counts
if label not in self.totals and self.skip_first:
self.totals[label] = 0.0
del self.first_time[label]
self.call_counts[label] = 0
elif label not in self.totals and not self.skip_first:
self.totals[label] = end - start
self.call_counts[label] = 1
else:
self.totals[label] += end - start
self.call_counts[label] += 1
#if self.call_counts[label] > 0:
# # We will reduce the probability of logging a timing
# # linearly with the number of time we have seen it.
# # It will always be recorded in the totals, though.
# if np.random.rand() < 1 / self.call_counts[label]:
# self.log_fn(
# "timer", {"epoch": epoch, "value": end - start}, {"event": label}
# )
def summary(self):
"""
Return a summary in string-form of all the timings recorded so far
"""
if len(self.totals) > 0:
with StringIO() as buffer:
total_avg_time = 0
print("--- Timer summary ------------------------", file=buffer)
print(" Event | Count | Average time | Frac.", file=buffer)
for event_label in sorted(self.totals):
total = self.totals[event_label]
count = self.call_counts[event_label]
if count == 0:
continue
avg_duration = total / count
total_runtime = (
self.last_time[event_label] - self.first_time[event_label]
)
runtime_percentage = 100 * total / total_runtime
total_avg_time += avg_duration if "." not in event_label else 0
print(
f"- {event_label:30s} | {count:6d} | {avg_duration:11.5f}s | {runtime_percentage:5.1f}%",
file=buffer,
)
print("-------------------------------------------", file=buffer)
event_label = "total_averaged_time"
print(
f"- {event_label:30s}| {count:6d} | {total_avg_time:11.5f}s |",
file=buffer,
)
print("-------------------------------------------", file=buffer)
return buffer.getvalue()
def _cuda_sync(self):
"""Finish all asynchronous GPU computations to get correct timings"""
if self.cuda_available:
torch.cuda.synchronize()
def _default_log_fn(self, _, values, tags):
label = tags["label"]
epoch = values["epoch"]
duration = values["value"]
print(f"Timer: {label:30s} @ {epoch:4.1f} - {duration:8.5f}s")
def raw_reader(path):
with open(path, 'rb') as f:
bin_data = f.read()
return bin_data
def dumps_data(obj):
"""
Serialize an object.
Returns:
Implementation-dependent bytes-like object
"""
return pickle.dumps(obj)
## Helper functions for ImageNet
def folder2lmdb(spath, dpath, name="train", write_frequency=5000):
directory = os.path.expanduser(os.path.join(spath, name))
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
dataset = ImageFolder(directory, loader=raw_reader)
data_loader = DataLoader(dataset, num_workers=16, collate_fn=lambda x: x)
lmdb_path = os.path.join(dpath, "%s.lmdb" % name)
isdir = os.path.isdir(lmdb_path)
db = lmdb.open(lmdb_path, subdir=isdir,
map_size=1099511627776 * 2, readonly=False,
meminit=False, map_async=True)
txn = db.begin(write=True)
for idx, data in enumerate(data_loader):
image, label = data[0]
txn.put(u'{}'.format(idx).encode('ascii'), dumps_data((image, label)))
if idx % write_frequency == 0:
print("[%d/%d]" % (idx, len(data_loader)))
txn.commit()
txn = db.begin(write=True)
# finish iterating through dataset
txn.commit()
keys = [u'{}'.format(k).encode('ascii') for k in range(idx + 1)]
with db.begin(write=True) as txn:
txn.put(b'__keys__', dumps_data(keys))
txn.put(b'__len__', dumps_data(len(keys)))
print("Flushing database ...")
db.sync()
db.close()
class ImageFolderLMDB(data.Dataset):
def __init__(self, db_path, transform=None, target_transform=None):
self.db_path = db_path
self.env = lmdb.open(db_path, subdir=os.path.isdir(db_path),
readonly=True, lock=False,
readahead=False, meminit=False)
with self.env.begin(write=False) as txn:
self.length = loads_data(txn.get(b'__len__'))
self.keys = loads_data(txn.get(b'__keys__'))
self.transform = transform
self.target_transform = target_transform
def __getitem__(self, index):
env = self.env
with env.begin(write=False) as txn:
byteflow = txn.get(self.keys[index])
unpacked = loads_data(byteflow)
# load img
imgbuf = unpacked[0]
buf = six.BytesIO()
buf.write(imgbuf)
buf.seek(0)
img = Image.open(buf).convert('RGB')
# load label
target = unpacked[1]
if self.transform is not None:
img = self.transform(img)
# im2arr = np.array(img)
# im2arr = torch.from_numpy(im2arr)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
# return im2arr, target
def __len__(self):
return self.length
def __repr__(self):
return self.__class__.__name__ + ' (' + self.db_path + ')' | StarcoderdataPython |
1716206 | # -*- coding: utf-8 -*-
"""
Created on Mon May 28 20:21:27 2018
@author: Administrator
"""
import numpy as np
from MyLibrary import *
FPS=120
screenwidth=288
screenheight=512
fontsize=30
player_filename="players.png"
player_frame_width=48
player_frame_height=48
player_frame_num=4
base_filename="base.png"
base_frame_width=80
base_frame_height=15
base_frame_num=1
base_velocity_y=np.arange(-3,-8,-0.5)
stage=0
level=0
maxlevel=len(base_velocity_y)-1
timer_tick=30
interval=90
player_velocity_y=6
final_color=0,0,0
game_over=False
player_moving=False
FPSclock=pygame.time.Clock()
#创建玩家、平板的精灵组
player_group=pygame.sprite.Group()
base_group=pygame.sprite.Group()
#在屏幕下方随机位置生成板
def getRandomBase(filename,framewidth,frameheight,frameamount,velocity_y=-3,distance=100):
base=MySprite()
base.load(filename,framewidth,frameheight,frameamount)
base.position=random.randint(0,screenwidth-base.frame_width),distance+screenheight
base.velocity.y=velocity_y
base_group.add(base)
#当有板超出屏幕上方时,改变其Y坐标至屏幕下方
def chBase():
global stage
for base in base_group:
if base.Y<-base.frame_height:
stage+=1
base.Y=screenheight+interval
base.X=random.randint(0,screenwidth-base.frame_width)
#计算游戏难度、板的速度
def calcLevel():
global level
old_level=level
present_level=stage//20
if present_level>=old_level and level<maxlevel:
level+=1
return base_velocity_y[level]
#计算玩家的速度
def calcVelocity(direction,vel=1.0):
velocity=Point(0,0)
if direction==0:#not move
velocity.x=0
elif direction==1:#to the left
velocity.x=-vel
elif direction==2:#to the right
velocity.x=vel
return velocity
#当玩家按左右键时更改帧的图像
def frameChange():
if player.direction==0:#不动
player.first_frame=3
player.last_frame=3
elif player.direction==1:#向左
player.first_frame=0
player.last_frame=0
elif player.direction==2:#向右
player.first_frame=2
player.last_frame=2
if player.frame<player.first_frame:
player.frame=player.first_frame
#获取每个像素的透明度矩阵
def getHitmasks(image):
mask = []
for x in range(image.get_width()):
mask.append([])
for y in range(image.get_height()):
mask[x].append(bool(image.get_at((x,y))[3]))
return mask
#如果碰撞,则返回True
def checkCrash(player,base,hitmasks):
#碰到底部
if player.Y + player.frame_height >= screenheight - 1:
return True
else:
player_rect = pygame.Rect(player.X, player.Y, player.frame_width, player.frame_height)
for base in base_group:
base_rect = pygame.Rect(base.X, base.Y, base.frame_width, base.frame_height)
player_hit_mask = hitmasks['player']
base_hit_mask = hitmasks['base']
#检测是否有像素碰撞
collide = pixelCollision(player_rect, base_rect, player_hit_mask, base_hit_mask)
if collide:
return True
return False
#检测像素是否碰撞
def pixelCollision(rect1, rect2, hitmask1, hitmask2):
rect = rect1.clip(rect2)
if rect.width == 0 or rect.height == 0:
return False
x1, y1 = rect.x - rect1.x, rect.y - rect1.y
x2, y2 = rect.x - rect2.x, rect.y - rect2.y
for x in range(rect.width):
for y in range(rect.height):
if hitmask1[x1+x][y1+y] and hitmask2[x2+x][y2+y]:
return True
return False
class Game():
def __init__(self,screen0):
global screen, timer, player, font,player_group,base_group
pygame.init()
player_group = pygame.sprite.Group()
base_group = pygame.sprite.Group()
screen = screen0
timer = pygame.time.Clock()
player = MySprite()
player.load(player_filename, player_frame_width, player_frame_height, player_frame_num)
player.position = screenwidth // 3, screenheight // 3
player_group.add(player)
self.reset_score = True # 该标志用于确保score的重置在下一帧才进行
for i in np.arange(0, 501, 100):
getRandomBase(base_filename, base_frame_width, base_frame_height, base_frame_num, base_velocity_y[0], i)
def frameStep(self,input_actions):
if self.reset_score:
self.score=0
self.reset_score=False
pygame.event.pump()
reward=0.2
self.score+=1
terminal=False
timer.tick(timer_tick)
ticks = pygame.time.get_ticks()
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
keys = pygame.key.get_pressed()
if keys[K_ESCAPE]:
pygame.quit()
sys.exit()
if(input_actions[0]==1):
player_moving = True
player.direction = 1
elif input_actions[2]==1:
player_moving = True
player.direction = 2
else:
player.direction = 0
global game_over,level
if game_over:
reward=-5
level = 0
terminal=True
global screen
self.__init__(screen)
game_over=False
else:
if player.direction:
player.velocity = calcVelocity(player.direction, 5)
#改变帧图像
frameChange()
#更新图像
player_group.update(ticks, 50)
#检测碰撞,以确定速度
player_moving = True
for base in base_group:
# player={}
Hitmasks = {}
Hitmasks['base'] = (getHitmasks(base.image))
Hitmasks['player'] = (getHitmasks(player.image))
iscrash = checkCrash(player, base,Hitmasks)
if iscrash:
if player.Y + player.frame_height >= base.Y and player.Y < base.Y:
if player.Y + player.frame_height < base.Y + 15:
player.Y = base.Y - player.frame_height + 2
player.velocity.y = base.velocity.y
elif player.X + player.frame_width >= base.X and player.X < base.X:
player.X = base.X - player.frame_width
player.velocity.y = player_velocity_y
elif player.X <= base.X + base.frame_width and base.X < player.X:
player.X = base.X + base.frame_width
player.velocity.y = player_velocity_y
break
else:
player.velocity.y = player_velocity_y
if player_moving:
player.X += player.velocity.x
player.Y += player.velocity.y
if player.X < 0:
player.X = 0
elif player.X > screenwidth - player.frame_width:
player.X = screenwidth - player.frame_width
if player.Y < 0 or player.Y > screenheight - player.frame_height:
game_over = True
#移动板
calcLevel()
for base in base_group:
base.velocity.y = base_velocity_y[level]
base.Y += base.velocity.y
#改变板的位置(如果碰到边界)
chBase()
screen.fill(final_color)
base_group.draw(screen)
player_group.draw(screen)
image_data = pygame.surfarray.array3d(pygame.display.get_surface())
pygame.display.update()
FPSclock.tick(FPS)
return image_data,reward,terminal,self.score | StarcoderdataPython |
3263197 | from classes import ELF
path = "/tmp/file.elf64"
elf = ELF(path)
print(elf.executable_header.__dict__) | StarcoderdataPython |
3229799 | # Generated by Django 3.2.4 on 2021-07-29 21:36
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('myapp', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='Hike',
new_name='HikeModel',
),
]
| StarcoderdataPython |
3319155 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-14 20:19
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import prosopography.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('letters', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AKA',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nomina', models.CharField(max_length=255)),
('notes', models.TextField(blank=True)),
],
),
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nomina', models.CharField(max_length=255)),
('gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female')], default='M', max_length=1)),
('citizen', prosopography.models.SocialField(blank=True, choices=[('Y', 'Yes'), ('M', 'Maybe'), ('N', 'No'), ('U', 'Unknown')], db_index=True, default='N', max_length=1)),
('equestrian', prosopography.models.SocialField(blank=True, choices=[('Y', 'Yes'), ('M', 'Maybe'), ('N', 'No'), ('U', 'Unknown')], db_index=True, default='N', max_length=1)),
('senatorial', prosopography.models.SocialField(blank=True, choices=[('Y', 'Yes'), ('M', 'Maybe'), ('N', 'No'), ('U', 'Unknown')], db_index=True, default='N', max_length=1)),
('consular', prosopography.models.SocialField(blank=True, choices=[('Y', 'Yes'), ('M', 'Maybe'), ('N', 'No'), ('U', 'Unknown')], db_index=True, default='N', max_length=1)),
('birth', models.PositiveSmallIntegerField(blank=True, null=True)),
('death', models.PositiveSmallIntegerField(blank=True, null=True)),
('cos', models.PositiveSmallIntegerField(blank=True, null=True)),
('floruit', models.PositiveSmallIntegerField(blank=True, null=True)),
('certainty_of_id', models.PositiveSmallIntegerField(default=5, validators=[prosopography.models.valid_range])),
('notes', models.TextField(blank=True)),
('letters_to', models.ManyToManyField(blank=True, related_name='letters_to', to='letters.Letter', verbose_name='letter to')),
('mentioned_in', models.ManyToManyField(blank=True, related_name='mentioned_in', to='letters.Letter')),
],
),
migrations.CreateModel(
name='Relationship',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('relationship_type', models.CharField(choices=[('anc', 'ancestor'), ('des', 'descendant'), ('sib', 'sibling'), ('par', 'parent'), ('chi', 'child'), ('fam', 'member of same familia'), ('ami', 'amicus'), ('oth', 'otherwise related')], db_index=True, max_length=4)),
('from_person', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='from_person', to='prosopography.Person')),
('to_person', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='prosopography.Person')),
],
),
migrations.AddField(
model_name='person',
name='related_to',
field=models.ManyToManyField(through='prosopography.Relationship', to='prosopography.Person'),
),
migrations.AddField(
model_name='aka',
name='person',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='prosopography.Person'),
),
]
| StarcoderdataPython |
1668866 | <filename>presqt/targets/gitlab/utilities/delete_gitlab_project.py<gh_stars>1-10
import requests
def delete_gitlab_project(project_id, token):
"""
Delete the given project from Gitlab.
Parameters
----------
project_id: str
The ID of the project to delete.
token: str
The user's GitLab token
"""
headers = {"Private-Token": "{}".format(token)}
requests.delete("https://gitlab.com/api/v4/projects/{}".format(project_id), headers=headers) | StarcoderdataPython |
199190 | LIST_WORKFLOWS_GQL = '''
query workflowList {
workflowList {
edges{
node {
id
name
objectType
initialPrefetch
initialState {
id
name
}
initialTransition {
id
name
}
}
}
}
}
'''
LIST_STATES_GQL = '''
query stateList {
stateList {
edges{
node {
id
name
active
initial
workflow {
id
name
}
}
}
}
}
'''
MUTATE_WORKFLOW_GRAPH_GQL = '''
mutation workflowMutation($param: WorkflowMutationInput!) {
workflowMutation(input:$param) {
id
name
initialPrefetch
objectType
errors {
messages
}
}
}
'''
MUTATE_STATE_GRAPH_GQL = '''
mutation stateMutation($param: StateMutationInput!) {
stateMutation(input:$param) {
id
name
initial
active
workflow
errors {
messages
}
}
}
'''
LIST_TRANSITIONS_GQL = '''
query transitionList($param: ID) {
transitionList(workflow_Id:$param) {
edges{
node {
id
name
initialState {
id
name
active
initial
variableDefinitions {
edges {
node {
id
name
}
}
}
}
finalState {
id
name
active
initial
variableDefinitions {
edges {
node {
id
name
}
}
}
}
conditionSet {
edges {
node {
id
conditionType
functionSet {
edges {
node {
id
functionModule
functionName
parameters{
edges {
node {
id
name
value
}
}
}
}
}
}
}
}
}
}
}
}
}
'''
LIST_WORKFLOW_STATES_GQL = '''
query stateList($param: ID) {
stateList(workflow_Id:$param) {
edges{
node {
id
name
active
initial
workflow {
id
name
}
}
}
}
}
'''
LIST_WORKFLOW_GRAPH_GQL = '''
query workflowList($param: String) {
workflowList(name:$param) {
edges{
node {
id
name
graph
}
}
}
}
''' | StarcoderdataPython |
104167 | <filename>GA_FeatureSelection.py
import numpy
import matplotlib.pyplot
import sklearn.svm
"""
Reference:
This class is adapted from a GA Feature Selection library:
ahmedfgad/FeatureReductionGenetic
Credit to the original author.
Github Link: https://github.com/ahmedfgad/FeatureReductionGenetic.git
Original Author: <NAME>
Email: <EMAIL>
"""
def feture_reduction(feature_input, label_input):
num_samples = feature_input.shape[0]
num_feature_elements = feature_input.shape[1]
train_indices = numpy.arange(1, num_samples, 4)
test_indices = numpy.arange(0, num_samples, 4)
# print("Number of training samples: ", train_indices.shape[0])
# print("Number of test samples: ", test_indices.shape[0])
"""
Genetic algorithm parameters:
Population size
Mating pool size
Number of mutations
"""
sol_per_pop = 20 # Population size.
num_parents_mating = 4 # Number of parents inside the mating pool.
num_mutations = 3 # Number of elements to mutate.
# Defining the population shape.
pop_shape = (sol_per_pop, num_feature_elements)
# Creating the initial population.
new_population = numpy.random.randint(low=0, high=2, size=pop_shape)
# print(new_population.shape)
best_outputs = []
num_generations = 10
for generation in range(num_generations):
# print("Generation : ", generation)
# Measuring the fitness of each chromosome in the population.
fitness = cal_pop_fitness(new_population, feature_input, label_input, train_indices, test_indices)
best_outputs.append(numpy.max(fitness))
# The best result in the current iteration.
# print("Best result : ", best_outputs[-1])
# Selecting the best parents in the population for mating.
parents = select_mating_pool(new_population, fitness, num_parents_mating)
# Generating next generation using crossover.
offspring_crossover = crossover(parents,
offspring_size=(pop_shape[0] - parents.shape[0], num_feature_elements))
# Adding some variations to the offspring using mutation.
offspring_mutation = mutation(offspring_crossover, num_mutations=num_mutations)
# Creating the new population based on the parents and offspring.
new_population[0:parents.shape[0], :] = parents
new_population[parents.shape[0]:, :] = offspring_mutation
# Getting the best solution after iterating finishing all generations.
# At first, the fitness is calculated for each solution in the final generation.
fitness = cal_pop_fitness(new_population, feature_input, label_input, train_indices, test_indices)
# Then return the index of that solution corresponding to the best fitness.
best_match_idx = numpy.where(fitness == numpy.max(fitness))[0]
best_match_idx = best_match_idx[0]
best_solution = new_population[best_match_idx, :]
best_solution_indices = numpy.where(best_solution == 1)[0]
best_solution_num_elements = best_solution_indices.shape[0]
best_solution_fitness = fitness[best_match_idx]
# print("best_match_idx : ", best_match_idx)
# print("best_solution : ", best_solution)
# print("Selected indices : ", best_solution_indices)
# print("Number of selected elements : ", best_solution_num_elements)
# print("Best solution fitness : ", best_solution_fitness)
# Plot Result
# matplotlib.pyplot.plot(best_outputs)
# matplotlib.pyplot.xlabel("Iteration")
# matplotlib.pyplot.ylabel("Fitness")
# matplotlib.pyplot.show()
# produce features to be deleted
reduced_features = numpy.delete(numpy.arange(num_feature_elements), best_solution_indices)
return reduced_features
def reduce_features(solution, features):
selected_elements_indices = numpy.where(solution == 1)[0]
reduced_features = features[:, selected_elements_indices]
return reduced_features
def classification_accuracy(labels, predictions):
correct = numpy.where(labels == predictions)[0]
accuracy = correct.shape[0]/labels.shape[0]
return accuracy
def cal_pop_fitness(pop, features, labels, train_indices, test_indices):
accuracies = numpy.zeros(pop.shape[0])
idx = 0
for curr_solution in pop:
reduced_features = reduce_features(curr_solution, features)
train_data = reduced_features[train_indices, :]
test_data = reduced_features[test_indices, :]
train_labels = labels[train_indices]
test_labels = labels[test_indices]
SV_classifier = sklearn.svm.SVC(gamma='scale')
SV_classifier.fit(X=train_data, y=train_labels)
predictions = SV_classifier.predict(test_data)
accuracies[idx] = classification_accuracy(test_labels, predictions)
idx = idx + 1
return accuracies
def select_mating_pool(pop, fitness, num_parents):
# Selecting the best individuals in the current generation as parents for producing the offspring of the next generation.
parents = numpy.empty((num_parents, pop.shape[1]))
for parent_num in range(num_parents):
max_fitness_idx = numpy.where(fitness == numpy.max(fitness))
max_fitness_idx = max_fitness_idx[0][0]
parents[parent_num, :] = pop[max_fitness_idx, :]
fitness[max_fitness_idx] = -99999999999
return parents
def crossover(parents, offspring_size):
offspring = numpy.empty(offspring_size)
# The point at which crossover takes place between two parents. Usually, it is at the center.
crossover_point = numpy.uint8(offspring_size[1]/2)
for k in range(offspring_size[0]):
# Index of the first parent to mate.
parent1_idx = k%parents.shape[0]
# Index of the second parent to mate.
parent2_idx = (k+1)%parents.shape[0]
# The new offspring will have its first half of its genes taken from the first parent.
offspring[k, 0:crossover_point] = parents[parent1_idx, 0:crossover_point]
# The new offspring will have its second half of its genes taken from the second parent.
offspring[k, crossover_point:] = parents[parent2_idx, crossover_point:]
return offspring
def mutation(offspring_crossover, num_mutations=2):
mutation_idx = numpy.random.randint(low=0, high=offspring_crossover.shape[1], size=num_mutations)
# Mutation changes a single gene in each offspring randomly.
for idx in range(offspring_crossover.shape[0]):
# The random value to be added to the gene.
offspring_crossover[idx, mutation_idx] = 1 - offspring_crossover[idx, mutation_idx]
return offspring_crossover
| StarcoderdataPython |
1668058 | """WxPython-based implementation of the Eelbrain ui functions."""
from ..._wxgui import wx, get_app
def ask_saveas(title, message, filetypes, defaultDir, defaultFile):
"""See eelbrain.ui documentation"""
app = get_app()
return app.ask_saveas(title, message, filetypes, defaultDir, defaultFile)
def ask_dir(title="Select Folder", message="Please Pick a Folder", must_exist=True):
app = get_app()
return app.ask_for_dir(title, message, must_exist)
def ask_file(title, message, filetypes, directory, mult):
app = get_app()
return app.ask_for_file(title, message, filetypes, directory, mult)
def ask(title="Overwrite File?",
message="Duplicate filename. Do you want to overwrite?",
cancel=False,
default=True, # True=YES, False=NO, None=Nothing
):
style = wx.YES_NO | wx.ICON_QUESTION
if cancel:
style = style | wx.CANCEL
if default:
style = style | wx.YES_DEFAULT
elif default == False:
style = style | wx.NO_DEFAULT
dialog = wx.MessageDialog(None, message, title, style)
answer = dialog.ShowModal()
if answer == wx.ID_NO:
return False
elif answer == wx.ID_YES:
return True
elif answer == wx.ID_CANCEL:
return None
def ask_color(default=(0, 0, 0)):
dlg = wx.ColourDialog(None)
dlg.GetColourData().SetChooseFull(True)
if dlg.ShowModal() == wx.ID_OK:
data = dlg.GetColourData()
out = data.GetColour().Get()
out = tuple([o / 255. for o in out])
else:
out = False
dlg.Destroy()
return out
def ask_str(message, title, default=''):
app = get_app()
return app.ask_for_string(title, message, default)
def message(title, message="", icon='i'):
style = wx.OK
if icon == 'i':
style = style | wx.ICON_INFORMATION
elif icon == '?':
style = style | wx.ICON_QUESTION
elif icon == '!':
style = style | wx.ICON_EXCLAMATION
elif icon == 'error':
style = style | wx.ICON_ERROR
elif icon is None:
pass
else:
raise ValueError("Invalid icon argument: %r" % icon)
dlg = wx.MessageDialog(None, message, title, style)
dlg.ShowModal()
def copy_file(path):
if wx.TheClipboard.Open():
try:
data_object = wx.FileDataObject()
data_object.AddFile(path)
wx.TheClipboard.SetData(data_object)
except:
wx.TheClipboard.Close()
raise
else:
wx.TheClipboard.Close()
def copy_text(text):
if wx.TheClipboard.Open():
try:
data_object = wx.TextDataObject(text)
wx.TheClipboard.SetData(data_object)
except:
wx.TheClipboard.Close()
raise
else:
wx.TheClipboard.Close()
| StarcoderdataPython |
3204090 | x="Hello"
y="World"
z="!"
print (x+" "+y" "+z)
| StarcoderdataPython |
3329951 | # http://stackoverflow.com/questions/14061195/how-to-get-transcript-in-youtube-api-v3
# http://video.google.com/timedtext?lang={LANG}&v={VIDEOID}
import config
import requests
import untangle
from datetime import datetime
import time
import pymysql.cursors
import sys
def printDateNicely(timestamp):
reg_format_date = timestamp.strftime("%d %B %Y %I:%M:%S %p")
return reg_format_date
connection = pymysql.connect(host='localhost',
user='root',
password=config.MYSQL_SERVER_PASSWORD,
db='youtubeProjectDB',
charset='utf8mb4', # deals with the exotic emojis
cursorclass=pymysql.cursors.DictCursor)
with connection.cursor() as cursor1:
sql = "SELECT DISTINCT(videoId) FROM search_api WHERE videoID NOT IN (SELECT videoId FROM captions);"
cursor1.execute(sql)
videoIdsDicts = cursor1.fetchall()
videoIds = [d.get('videoId') for d in videoIdsDicts]
# For testing try just the first 10 first:
# videoIds = videoIds[:100]
time.sleep(2)
with connection.cursor() as cursor2:
for videoId in videoIds:
time.sleep(0.3)
#videoId = 'xthIqXnHL_8'
language = "en"
payload = {'lang':language,
'v':videoId}
queryMethod = 'http://video.google.com/timedtext'
r = requests.get(queryMethod, params=payload)
subTitle_xml = r.text
if len(subTitle_xml) > 5:
print(".", end="") # so you know it's alive and looping
try:
obj = untangle.parse(subTitle_xml)
a = obj.transcript.text
# https://github.com/stchris/untangle/issues/14
# b = a[5]
# b._name
# b._attributes
justText = []
for line in obj.transcript.text:
justText.append(line.cdata)
jt = ""
for line in obj.transcript.text:
jt += line.cdata + " " # need a space at the end of line or words crushed together!
except:
jt = ""
queriedAt = printDateNicely(datetime.now())
captionsText = jt
captionsXML = subTitle_xml
captionsFileFormat = ".xml"
try:
sql = "INSERT INTO captions (videoId, captionsText, captionsFile, language, captionsFileFormat, queryMethod, queriedAt) VALUES (%s,%s,%s,%s,%s,%s,%s)"
cursor2.execute(sql, (videoId, captionsText, captionsXML, language, captionsFileFormat, queryMethod, queriedAt))
connection.commit() # safer here otherwise might lose hours of work that this program does
except:
print("\n", "Oops!",sys.exc_info()[0],"occured with videoId", videoId)
else:
print("X", end="") # looping but not so healthy
#connection.rollback()
connection.close()
| StarcoderdataPython |
98131 | <filename>aiomodrinth/models/utils.py
from datetime import datetime
from abc import ABC, abstractmethod
def string_to_datetime(date: str, format_: str = None) -> datetime:
if format_ is None:
format_ = "%Y/%m/%d %H:%M:%S.%f"
dt = datetime.strptime(date.replace('-', '/').replace('T', ' ').replace('Z', ''), format_)
return dt
| StarcoderdataPython |
172472 | <filename>utils/profiling.py
# -*- coding: utf-8 -*-
import sys
import time
import torch
from functools import wraps
import numpy as np
def get_gpumem():
return torch.cuda.memory_allocated() / 1024. / 1024.
def get_cputime():
return time.perf_counter()
def seqstat(arr):
a = np.array(arr)
return '[ {:.3f} / {:.3f} / {:.3f} / {:.3f} ]'.format(
np.mean(a), np.min(a), np.max(a), np.std(a))
t0 = 0
def report_time(msg=''):
global t0
t1 = get_cputime()
fr = sys._getframe(1)
print ("CPU Time: {} {} @ {} : {:.3f} dt: {:.3f} sec".format(
msg.center(20,' '), fr.f_code.co_name, fr.f_lineno, t1, t1 - t0))
t0 = t1
m0 = 0
def report_mem(msg=''):
global m0
m1 = get_gpumem()
fr = sys._getframe(1)
print ("GPU Mem: {} {} @ {} : {:.3f} dt: {:.3f} MB".format(
msg.center(20,' '), fr.f_code.co_name, fr.f_lineno, m1, m1 - m0))
m0 = m1
mtable = {}
def profile_mem(function):
@wraps(function)
def gpu_mem_profiler(*args, **kwargs):
m1 = get_gpumem()
result = function(*args, **kwargs)
m2 = get_gpumem()
fp = m2 - m1
fname = function.__name__
if fname in mtable:
mtable[fname].append(fp)
else:
mtable[fname] = [fp]
print ("GPU Mem: {}: {:.3f} / {:.3f} / {:.3f} / {} MB".format(
fname.center(20,' '), m1, m2, fp, seqstat(mtable[fname])))
return result
return gpu_mem_profiler
ttable = {}
def profile_time(function):
@wraps(function)
def function_timer(*args, **kwargs):
t1 = get_cputime()
result = function(*args, **kwargs)
t2 = get_cputime()
lat = t2 - t1
fname = function.__name__
if fname in ttable:
ttable[fname].append(lat)
else:
ttable[fname] = [lat]
print ("CPU Time: {}: {:.3f} / {:.3f} / {:.3f} / {} sec".format(
fname.center(20,' '), t1, t2, lat, seqstat(ttable[fname])))
return result
return function_timer
class profile_ctx():
def __init__(self, name):
self.name = name
def __enter__(self):
self.m0 = get_gpumem()
self.t0 = get_cputime()
def __exit__(self):
self.t1 = get_cputime()
self.m1 = get_gpumem()
lat = self.t1 - self.t0
mem = self.m1 - self.m0
fname = self.name
if fname in ttable:
ttable[fname].append(lat)
else:
ttable[fname] = [lat]
if fname in mtable:
mtable[fname].append(fp)
else:
mtable[fname] = [fp]
print ("CPU Time: {}: {:.3f} / {:.3f} / {:.3f} / {} sec".format(
fname.center(20,' '), self.t0, self.t1, lat, seqstat(ttable[fname])))
print ("GPU Mem: {}: {:.3f} / {:.3f} / {:.3f} / {} MB".format(
fname.center(20,' '), self.m0, self.m1, mem, seqstat(mtable[fname])))
def report(self):
t1 = get_cputime()
fr = sys._getframe(1)
print ("CPU Time: {} {} @ {} : {:.3f} dt: {:.3f} sec".format(
self.name.center(20,' '), fr.f_code.co_name, fr.f_lineno, t1, t1 - self.t0))
m1 = get_gpumem()
print ("GPU Mem: {} {} @ {} : {:.3f} dt: {:.3f} MB".format(
self.name.center(20,' '), fr.f_code.co_name, fr.f_lineno, m1, m1 - self.m0))
class TimeProfiler(object):
def __init__(self):
self.table = {}
self.acc_table = {}
self.offset = 0
self.timer_start('ofs')
self.timer_stop('ofs')
self.offset = self.table['ofs'][0]
def timer_start(self, id):
t0 = time.perf_counter()
if not id in self.table:
self.table[id] = np.array([-t0])
else:
arr = self.table[id]
self.table[id] = np.append(arr, -t0)
def timer_stop(self, id):
t1 = time.perf_counter()
self.table[id][-1] += t1 - self.offset
def print_stat(self, id):
if not id in self.table: return
arr = self.table[id]
avg = np.mean(arr)
tmin = np.min(arr)
tmax = np.max(arr)
std = np.std(arr)
print('Time {}: {} / {:.3f} / {:.3f} / {:.3f} / {:.3f} / {:.3f}'.format(
id.center(10,' '), len(arr), arr[-1], avg, tmin, tmax, std))
def stat_all(self):
for i in self.table:
self.print_stat(i)
def begin_acc_item(self, cid):
if not cid in self.acc_table:
self.acc_table[cid] = np.array([0.])
else:
arr = self.acc_table[cid]
self.acc_table[cid] = np.append(arr, [0.])
def add_acc_item(self, cid, id):
arr = self.acc_table[cid]
item = self.table[id]
arr[-1] += item[-1]
def clear_acc_item(self, cid):
arr = self.acc_table[cid]
arr.clear()
def stat_acc(self, cid):
if not cid in self.acc_table: return
arr = self.acc_table[cid]
tsum = np.sum(arr)
avg = np.mean(arr)
tmin = np.min(arr)
tmax = np.max(arr)
std = np.std(arr)
print('Acc Time {} : {} / {:.3f} / {:.3f} / {:.3f} / {:.3f} / {:.3f}'.format(
cid.center(10,' '), len(arr), arr[-1], avg, tmax, tmin, std))
def avg(self, id):
return 0 if not id in self.table else np.mean(self.table[id])
tprof = TimeProfiler() | StarcoderdataPython |
1786902 | <filename>src/buildstream/_options/optionflags.py
#
# Copyright (C) 2017 Codethink Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# <NAME> <<EMAIL>>
from .._exceptions import LoadError
from ..exceptions import LoadErrorReason
from .option import Option, OPTION_SYMBOLS
# OptionFlags
#
# A flags project option
#
class OptionFlags(Option):
OPTION_TYPE = "flags"
def __init__(self, name, definition, pool):
self.values = None
super().__init__(name, definition, pool)
def load(self, node):
self.load_special(node)
def load_special(self, node, allow_value_definitions=True):
super().load(node)
valid_symbols = OPTION_SYMBOLS + ["default"]
if allow_value_definitions:
valid_symbols += ["values"]
node.validate_keys(valid_symbols)
# Allow subclass to define the valid values
self.values = self.load_valid_values(node)
if not self.values:
raise LoadError(
"{}: No values specified for {} option '{}'".format(
node.get_provenance(), self.OPTION_TYPE, self.name
),
LoadErrorReason.INVALID_DATA,
)
value_node = node.get_sequence("default", default=[])
self.value = value_node.as_str_list()
self.validate(self.value, value_node)
def load_value(self, node):
value_node = node.get_sequence(self.name)
self.value = sorted(value_node.as_str_list())
self.validate(self.value, value_node)
def set_value(self, value):
# Strip out all whitespace, allowing: "value1, value2 , value3"
stripped = "".join(value.split())
# Get the comma separated values
list_value = stripped.split(",")
self.validate(list_value)
self.value = sorted(list_value)
def get_value(self):
return ",".join(self.value)
def validate(self, value, node=None):
for flag in value:
if flag not in self.values:
if node is not None:
provenance = node.get_provenance()
prefix = "{}: ".format(provenance)
else:
prefix = ""
raise LoadError(
"{}Invalid value for flags option '{}': {}\n".format(prefix, self.name, value)
+ "Valid values: {}".format(", ".join(self.values)),
LoadErrorReason.INVALID_DATA,
)
def load_valid_values(self, node):
# Allow the more descriptive error to raise when no values
# exist rather than bailing out here (by specifying default_value)
return node.get_str_list("values", default=[])
| StarcoderdataPython |
1743899 | <gh_stars>0
from dal import autocomplete
from dal_select2.widgets import Select2Multiple
from dal_select2_taggit.widgets import TaggitSelect2
from django.db import models
from django.utils import timezone
from django.utils.text import slugify
from modelcluster.contrib.taggit import ClusterTaggableManager
from modelcluster.fields import ParentalManyToManyField, ParentalKey
from taggit.models import TaggedItemBase, TagBase
from wagtail.wagtailadmin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel
from wagtail.wagtailcore.fields import RichTextField
from wagtail.wagtailcore.models import Page
from wagtail.wagtailimages.edit_handlers import ImageChooserPanel
from wagtail.wagtailsnippets.edit_handlers import SnippetChooserPanel
from charsleft_widget.widgets import CharsLeftArea
from flis_horison_scanning.abstract_models import GenericElement, SourcesMixin, CategoryBase, \
SortableCategoryWithDescriptionBase
from pages.abstract_models import FlisPage, Orderable
from pages.widgets import FlisListModelSelect2
from .choices import TREND_TYPES, UNCERTAINTY_TYPES
class Source(GenericElement):
title_original = models.CharField(max_length=512, null=True, blank=True)
published_year = models.CharField(max_length=4)
legacy_file = models.CharField(max_length=512, null=True, blank=True)
summary = models.TextField(null=True, blank=True)
author = models.CharField(max_length=512)
def __unicode__(self):
return self.name
class Figure(GenericElement, SourcesMixin):
legacy_file = models.CharField(max_length=512, blank=True, null=True)
theme = models.ForeignKey('flis_metadata.EnvironmentalTheme')
class SteepCategory(CategoryBase):
class Meta:
verbose_name_plural = 'Steep Categories'
class ImpactType(CategoryBase):
pass
class Impact(GenericElement, SourcesMixin):
impact_type = models.ForeignKey('ImpactType', related_name='impact_type', blank=True, null=True)
steep_category = models.ForeignKey('SteepCategory', related_name='impact_category', blank=True, null=True)
description = models.TextField()
class Implication(GenericElement, SourcesMixin):
AREA_POLICY = (
('mock_policy', 'Mock policy'),
)
policy_area = models.CharField(
max_length=64,
choices=AREA_POLICY,
default=0,
blank=True,
null=True,
)
description = models.TextField(max_length=2048)
def __unicode__(self):
return self.name
class Indicator(GenericElement, SourcesMixin):
theme = models.ForeignKey('flis_metadata.EnvironmentalTheme')
start_date = models.DateField(editable=True, null=True, blank=True)
end_date = models.DateField(editable=True)
assessment = models.TextField(null=True, blank=True)
assessment_author = models.CharField(max_length=64, null=True, blank=True)
class DriverOfChange(FlisPage):
is_creatable = False
image = models.ForeignKey(
'pages.FlisImage',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
short_title = models.CharField(max_length=64)
geographical_scope = models.ForeignKey(
'flis_metadata.GeographicalScope',
null=True,
blank=True,
on_delete=models.SET_NULL
)
country = models.ForeignKey(
'flis_metadata.Country',
null=True,
blank=True,
on_delete=models.SET_NULL
)
steep_category = models.ForeignKey(
'SteepCategory',
null=True,
blank=True,
on_delete=models.SET_NULL
)
time_horizon = models.ForeignKey(
'TimeHorizon',
null=True,
blank=True,
on_delete=models.SET_NULL
)
summary = RichTextField(null=True, blank=True)
impacts = ParentalManyToManyField(
'Impact',
blank=True,
)
implications = ParentalManyToManyField(
'Implication',
blank=True,
)
indicators = ParentalManyToManyField(
'Indicator',
blank=True,
)
sources = ParentalManyToManyField(
'Source',
blank=True,
)
figures = ParentalManyToManyField(
'Figure',
blank=True,
)
content_panels = Page.content_panels + [
ImageChooserPanel('image'),
FieldPanel('short_title'),
SnippetChooserPanel('geographical_scope'),
SnippetChooserPanel('country'),
SnippetChooserPanel('steep_category'),
SnippetChooserPanel('time_horizon'),
FieldPanel('summary'),
FieldPanel('impacts', widget=autocomplete.ModelSelect2Multiple(url='impacts-autocomplete')),
FieldPanel('implications', widget=autocomplete.ModelSelect2Multiple(url='implications-autocomplete')),
FieldPanel('indicators', widget=autocomplete.ModelSelect2Multiple(url='indicators-autocomplete')),
FieldPanel('sources', widget=autocomplete.ModelSelect2Multiple(url='sources-autocomplete')),
FieldPanel('figures', widget=autocomplete.ModelSelect2Multiple(url='figures-autocomplete')),
]
class Meta:
verbose_name = 'Signal of Change'
verbose_name_plural = 'Signals of Change'
class Trend(DriverOfChange):
parent_page_types = ['pages.StaticIndex']
trend_type = models.IntegerField(choices=TREND_TYPES, default=1)
content_panels2 = DriverOfChange.content_panels + [
SnippetChooserPanel('trend_type')
]
class Uncertainty(DriverOfChange):
parent_page_types = ['pages.StaticIndex']
uncertainty_type = models.IntegerField(choices=UNCERTAINTY_TYPES, default=1)
content_panels2 = DriverOfChange.content_panels + [
SnippetChooserPanel('uncertainty_type')
]
class Meta:
verbose_name_plural = 'Uncertainties'
class WeakSignal(DriverOfChange):
parent_page_types = ['pages.StaticIndex']
class WildCard(DriverOfChange):
parent_page_types = ['pages.StaticIndex']
class SignalTagItem(TagBase):
class Meta:
ordering = ('name',)
class SignalTag(TaggedItemBase):
content_object = ParentalKey('Signal', related_name='tagged_signals')
tag = models.ForeignKey(SignalTagItem,
related_name="%(app_label)s_%(class)s_items")
class Signal(FlisPage):
class Meta:
ordering = ('-first_published_at',)
SIGNAL_TYPES = (
('megatrend', 'Megatrend'),
('trend', 'Trend'),
('weak_signal', 'Weak Signal'),
('wild_card', 'Wild Card'),
('other', 'Other'),
)
parent_page_types = ['pages.StaticIndex']
short_title = models.CharField(max_length=256)
type_of_signal = models.ForeignKey(
'TypeOfSignal',
null=True,
blank=False,
on_delete=models.SET_NULL
)
cover_image = models.ForeignKey(
'pages.FlisImage',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
geographical_scope = models.ForeignKey(
'GeographicalScope',
null=True,
blank=True,
on_delete=models.SET_NULL
)
headline = models.TextField(max_length=256)
description = RichTextField()
origin_of_signal = models.ForeignKey(
'OriginOfSignal',
null=True,
blank=True,
on_delete=models.SET_NULL
)
time_horizon = models.ForeignKey(
'TimeHorizon',
null=True,
blank=True,
on_delete=models.SET_NULL
)
overall_impact = models.ForeignKey(
'OverallImpact',
null=True,
blank=True,
on_delete=models.SET_NULL
)
impact_description = RichTextField(blank=True, null=True)
implications = RichTextField(blank=True, null=True)
strategies = ParentalManyToManyField(
'EUStrategy',
blank=True,
)
date_of_signal_detection = models.DateField(null=True)
date_of_last_modification = models.DateField(blank=True, null=True,
verbose_name='Date of last modification to the signal')
likelihood = models.ForeignKey(
'RelevanceOfSignalLikelihood',
null=True,
blank=True,
on_delete=models.SET_NULL
)
severity = models.ForeignKey(
'RelevanceOfSignalSeverity',
null=True,
blank=True,
on_delete=models.SET_NULL
)
keywords = ClusterTaggableManager(through=SignalTag, blank=True)
content_panels = [
FieldPanel('short_title'),
FieldPanel('type_of_signal', widget=FlisListModelSelect2),
FieldPanel('title'),
ImageChooserPanel('cover_image'),
FieldPanel('geographical_scope', widget=FlisListModelSelect2),
FieldPanel('headline', widget=CharsLeftArea),
FieldPanel('description'),
InlinePanel('images', label='Images'),
FieldPanel('origin_of_signal', widget=FlisListModelSelect2),
FieldPanel('time_horizon', widget=FlisListModelSelect2),
FieldPanel('overall_impact', widget=FlisListModelSelect2),
FieldPanel('impact_description'),
FieldPanel('implications'),
InlinePanel('signal_sources', label='Source'),
InlinePanel('eea_indicators', label='Related EEA Indicator'),
FieldPanel('overall_impact', widget=FlisListModelSelect2),
FieldPanel('strategies', widget=Select2Multiple),
FieldPanel('date_of_signal_detection'),
FieldPanel('date_of_last_modification'),
MultiFieldPanel([
FieldPanel('likelihood'),
FieldPanel('severity'),
], heading='Relevance of the signal'),
FieldPanel('keywords', widget=TaggitSelect2(url='tags-autocomplete')),
]
def save(self, *args, **kwargs):
original = self.__class__.objects.get(pk=self.pk) if self.pk else None
if any(
[
original and self.date_of_last_modification == original.date_of_last_modification,
self.date_of_last_modification is None
]
):
self.date_of_last_modification = timezone.now()
super().save(*args, **kwargs)
Signal._meta.get_field('title').verbose_name = 'Long title'
class SignalImage(models.Model):
signal = ParentalKey('Signal', related_name='images')
image = models.ForeignKey(
'pages.FlisImage',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
caption = models.TextField(blank=True, null=True)
panels = [
ImageChooserPanel('image'),
FieldPanel('caption'),
]
class Meta:
ordering = ('pk',)
class SignalSource(models.Model):
signal = ParentalKey('Signal', related_name='signal_sources')
source = RichTextField(blank=False)
class Meta:
ordering = ('pk',)
class EEAIndicator(models.Model):
signal = ParentalKey('Signal', related_name='eea_indicators')
title = models.CharField(max_length=512)
url = models.URLField(max_length=512)
class Meta:
ordering = ('pk',)
class OriginOfSignal(SortableCategoryWithDescriptionBase):
class Meta:
verbose_name = 'Origin of Signal'
verbose_name_plural = 'Origins of Signals'
class OverallImpact(SortableCategoryWithDescriptionBase):
pass
class TimeHorizon(SortableCategoryWithDescriptionBase):
pass
class GeographicalScope(SortableCategoryWithDescriptionBase):
pass
class TypeOfSignal(SortableCategoryWithDescriptionBase):
slug = models.SlugField(max_length=50, primary_key=True, editable=False)
def save(self, *args, **kwargs):
if self.slug is None or len(self.slug) == 0:
self.slug = slugify(self.title)
super(TypeOfSignal, self).save(*args, **kwargs)
class Meta:
verbose_name_plural = 'Types of Signals'
class EUStrategy(models.Model):
title = models.CharField(max_length=255)
def __str__(self):
return self.title
class Meta:
verbose_name_plural = 'EU Strategies'
class RelevanceOfSignalLikelihood(SortableCategoryWithDescriptionBase):
class Meta:
verbose_name = 'Relevance of Signal: Likelihood'
verbose_name_plural = 'Relevance of Signal: Likelihoods'
class RelevanceOfSignalSeverity(SortableCategoryWithDescriptionBase):
class Meta:
verbose_name = 'Relevance of Signal: Severity'
verbose_name_plural = 'Relevance of Signal: Severities'
| StarcoderdataPython |
1603113 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# #*** <License> ************************************************************#
# This module is part of the program FFW.
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# #*** </License> ***********************************************************#
import sys, os
import re
import uuid
import pickle
from datetime import datetime, tzinfo, timedelta
from rsclib.IP_Address import IP4_Address, IP6_Address
from rsclib.Phone import Phone
from rsclib.sqlparser import make_naive, SQL_Parser
from _GTW import GTW
from _TFL import TFL
from _TFL.pyk import pyk
from _CNDB import CNDB
import _CNDB._OMP
from _GTW._OMP._PAP import PAP
from _GTW._OMP._Auth import Auth
from _MOM.import_MOM import Q
from ff_olsr.parser import get_olsr_container
from ff_spider.parser import Guess
from ff_spider.common import unroutable, Interface, Inet4, WLAN_Config
import _TFL.CAO
import Command
def ip_mask_key (x) :
""" Key for sorting IPs (as key of a dict iter) """
return (x [0].mask, x [0], x [1:])
# end def ip_mask_key
class Consolidated_Interface (object) :
""" An interface built from several redeemer devices using
information from the OLSR MID table and the spider data.
Originally each redeemer device will create a
Consolidated_Device for each IP-Address it has. If a redeemer
device has more than one IP-Address we issue a warning (this is
not supported via the user interface of redeemer).
These interfaces can later be merged using spider info (from the
olsr mid table we don't know if these belong to the same or
different interfaces of the same device).
The original ip address is used as identity of this object, it
is used to store the interface in a Consolidated_Device.
"""
wl_modes = WLAN_Config.modes
def __init__ (self, convert, device, ip, idx) :
self.convert = convert
self.debug = convert.debug
self.device = device
self.idxdev = device
self.ip = ip.ip
self.idx = idx
self.ips = { ip.ip : ip }
self.merged_ifs = []
self.merged = None
self.debug = convert.debug
self.is_wlan = False
self.wlan_info = None
self.names = []
self.spider_ip = None
self.verbose = convert.verbose
assert self.device
# end def __init__
def create (self) :
assert not self.merged
dev = self.device.net_device
if self.debug :
print ("device: %s ip: %s" % (self.device, self.ip))
ffw = self.convert.ffw
desc = []
if self.names :
desc.append ('Spider Interfaces: %s' % ', '.join (self.names))
if self.spider_ip :
desc.append ('Spider IP: %s' % self.spider_ip)
desc = '\n'.join (desc) or None
if self.is_wlan :
iface = self.net_interface = ffw.Wireless_Interface \
(left = dev, name = self.ifname, desc = desc, raw = True)
if self.wlan_info :
std = None
if self.wlan_info.standard is not None :
std = ffw.Wireless_Standard.instance \
(name = self.wlan_info.standard, raw = True)
mode = None
if self.wlan_info.mode :
mode = self.wlan_info.mode.lower ()
mode = self.wl_modes [mode]
bsid = self.wlan_info.bssid
ssid = self.wlan_info.ssid
if bsid is not None and len (bsid.split (':')) != 6 :
print ("INFO: Ignoring bssid: %s" % bsid)
bsid = None
if ssid is not None :
ssid = ssid.replace (r'\x09', '\x09')
if len (ssid) > 32 :
print ("WARN: Ignoring long ssid %s" % ssid)
ssid = None
iface.set_raw \
( mode = mode
, essid = ssid
, bssid = bsid
, standard = std
)
if self.wlan_info.channel is not None :
chan = ffw.Wireless_Channel.instance \
(std, self.wlan_info.channel, raw = True)
ffw.Wireless_Interface_uses_Wireless_Channel (iface, chan)
else :
iface = self.net_interface = ffw.Wired_Interface \
(left = dev, name = self.ifname, desc = desc, raw = True)
manager = dev.node.manager
scope = self.convert.scope
for ip in pyk.itervalues (self.ips) :
if self.verbose :
print \
( "Adding IP %s to iface: %s/%s (of dev %s)"
% (ip.ip, self.name, self.idxdev.name, self.device.name)
)
assert not ip.done
ip.set_done ()
net = IP4_Address (ip.ip, ip.cidr)
network = ffw.IP4_Network.instance (net)
netadr = network.reserve (ip.ip, manager)
ffw.Net_Interface_in_IP4_Network \
(iface, netadr, mask_len = 32, name = self.ipname)
if len (scope.uncommitted_changes) > 10 :
scope.commit ()
# end def create
@property
def ifname (self) :
if self.names :
return self.names [0]
return self.ipname
# end def ifname
@property
def ipname (self) :
if self.idxdev.if_idx > 1 :
return "%s-%s" % (self.name, self.idx)
return self.name
# end def ipname
def merge (self, other) :
""" Merge other interface into this one """
assert other.device == self.device
assert not other.merged
if self.debug :
print ("Merge: %s\n -> %s" % (other, self))
print ("Merge: dev: %s" % self.device)
self.ips.update (other.ips)
del other.device.interfaces [other.ip]
self.merged_ifs.append (other)
other.merged = self
# end def merge
def __getattr__ (self, name) :
if not hasattr (self, 'idxdev') :
raise AttributeError ("device info gone")
r = getattr (self.idxdev, name)
setattr (self, name, r)
return r
# end def __getattr__
def __repr__ (self) :
return "%s (ip = %s)" % (self.__class__.__name__, self.ip)
# end def __repr__
__str__ = __repr__
# end class Consolidated_Interface
class Consolidated_Device (object) :
""" A device built from several redeemer devices using information
from the OLSR MID table and the spider data.
Initially we have a single interface with our devid.
"""
def __init__ (self, convert, redeemer_dev) :
self.convert = convert
self.debug = convert.debug
self.devid = redeemer_dev.id
self.redeemer_devs = {}
self.interfaces = {}
self.merged = None
self.merged_devs = []
self.mid_ip = None
self.debug = convert.debug
self.if_idx = 0
self.net_device = None
self.hna = False
self.redeemer_devs [self.devid] = redeemer_dev
self.node = convert.node_by_id [self.id_nodes]
# end def __init__
@property
def ffw_node (self) :
return self.convert.ffw_node_by_id [self.id_nodes]
# end def ffw_node
def add_redeemer_ip (self, ip) :
""" Add redeemer ip address. """
assert not ip.id_nodes
if ip.id_members or ip.id_members != 1 :
print \
( "WARN: IP %s %s has member ID %s" \
% (ip.ip, ip.id, ip.id_members)
)
assert not self.merged_devs
assert ip.ip not in self.interfaces
self.interfaces [ip.ip] = \
Consolidated_Interface (self.convert, self, ip, self.if_idx)
self.if_idx += 1
# end def add_redeemer_ip
def create (self) :
""" Create device in database """
assert self.net_device is None
assert not self.merged
ffw = self.convert.ffw
if self.debug :
print ('dev:', self.id, self.name)
if self.if_idx > 1 :
print \
( "WARN: dev %s.%s has %d ips in redeemer" \
% (self.node.name, self.name, self.if_idx)
)
for d in self.merged_devs :
if d.if_idx > 1 :
print \
( "WARN: dev %s.%s has %d ips in redeemer" \
% (d.node.name, d.name, self.if_idx)
)
# FIXME: We want correct info from nodes directly
# looks like most firmware can give us this info
devtype = ffw.Net_Device_Type.instance (name = 'Generic')
comments = dict \
( hardware = 'Hardware'
, antenna = 'Antenne'
, comment = 'Kommentar'
)
d = self.redeemer_devs [self.devid]
desc = '\n'.join \
(': '.join ((v, d [k])) for k, v in pyk.iteritems (comments) if d [k])
dev = self.net_device = ffw.Net_Device \
( left = devtype
, node = self.ffw_node
, name = self.shortest_name
, desc = desc
, raw = True
)
self.convert.set_last_change (dev, self.changed, self.created)
# no member info in DB:
assert not self.id_members
for iface in pyk.itervalues (self.interfaces) :
iface.create ()
self.convert.scope.commit ()
return dev
# end def create
def ip_iter (self) :
for ip in pyk.iterkeys (self.interfaces) :
yield ip
# end def ip_iter
def merge (self, other) :
""" Merge other device into this one """
self.redeemer_devs.update (other.redeemer_devs)
self.interfaces.update (other.interfaces)
if self.debug :
print ("Merge: %s\n -> %s" % (other, self))
#assert not other.merged
if other.merged :
msg = "Merge: already merged to %s" % other.merged
print (msg)
raise ValueError (msg)
for ifc in pyk.itervalues (other.interfaces) :
ifc.device = self
other.merged = self
other.set_done ()
self.merged_devs.append (other)
assert self.devid in self.redeemer_devs
assert other.devid in self.redeemer_devs
# end def merge
@property
def shortest_name (self) :
""" Shortest name of all merged devices """
sn = self.name
for d in self.merged_devs :
if len (d.name) < len (sn) :
sn = d.name
return sn
# end def shortest_name
def __getattr__ (self, name) :
if not hasattr (self, 'redeemer_devs') or not hasattr (self, 'devid') :
raise AttributeError ("redeemer dev info gone")
r = getattr (self.redeemer_devs [self.devid], name)
setattr (self, name, r)
return r
# end def __getattr__
def __repr__ (self) :
ndev = self.net_device
if ndev :
ndev = ndev.name
return "%s (devid=%s, net_device=%s, merged=%s)" \
% (self.__class__.__name__, self.devid, ndev, bool (self.merged))
# end def __repr__
__str__ = __repr__
# end class Consolidated_Device
class Convert (object) :
def __init__ (self, cmd, scope, debug = False) :
self.debug = debug
self.verbose = cmd.verbose
self.anonymize = cmd.anonymize
if len (cmd.argv) > 0 :
f = open (cmd.argv [0])
else :
f = sys.stdin
self.ip4nets = {}
self.ip6nets = {}
if cmd.network :
for n in cmd.network :
ip, comment = n.split (';', 1)
if ':' in ip :
ip = IP6_Address (ip)
self.ip6nets [ip] = comment
else :
ip = IP4_Address (ip)
self.ip4nets [ip] = comment
self.spider_ignore_ip = {}
if cmd.spider_ignore_ip :
for i in cmd.spider_ignore_ip :
ip_dev, ip = i.split (':')
if ip_dev not in self.spider_ignore_ip :
self.spider_ignore_ip [ip_dev] = []
self.spider_ignore_ip [ip_dev].append (ip)
olsr = get_olsr_container (cmd.olsr_file)
self.olsr_nodes = {}
for t in pyk.iterkeys (olsr.topo.forward) :
self.olsr_nodes [t] = True
for t in pyk.iterkeys (olsr.topo.reverse) :
self.olsr_nodes [t] = True
self.olsr_mid = olsr.mid.by_ip
self.olsr_hna = olsr.hna
self.rev_mid = {}
for k, v in pyk.iteritems (self.olsr_mid) :
if k not in self.olsr_nodes :
print ("WARN: MIB %s: not in OLSR Topology" % k)
#assert k in self.olsr_nodes
for mid in v :
assert mid not in self.rev_mid
self.rev_mid [mid] = True
self.spider_info = pickle.load (open (cmd.spider_dump, 'rb'))
self.spider_devs = {}
self.spider_iface = {}
for ip, dev in pyk.iteritems (self.spider_info) :
if self.verbose :
print ("IP:", ip)
ignore = {}
if str (ip) in self.spider_ignore_ip :
ignore = dict.fromkeys (self.spider_ignore_ip [str (ip)])
# ignore spider errors
if not isinstance (dev, Guess) :
continue
dev.mainip = ip
dev.done = False
for iface in pyk.itervalues (dev.interfaces) :
iface.done = False
for ip4 in iface.inet4 :
i4 = ip4.ip
# ignore rfc1918, link local, localnet
if unroutable (i4) :
continue
# ignore explicitly specified ips
if str (i4) in ignore :
print ("INFO: Ignoring %s/%s" % (ip, i4))
continue
if ( i4 in self.spider_devs
and self.spider_devs [i4] != dev
) :
print ("WARN: Device %s/%s not equal:" % (ip, i4))
print ("=" * 60)
print (dev.verbose_repr ())
print ("-" * 60)
print (self.spider_devs [i4].verbose_repr ())
print ("=" * 60)
continue
elif ( i4 in self.spider_iface
and self.spider_iface [i4] != iface
) :
assert dev == self.spider_devs [i4]
spif = self.spider_iface [i4]
print \
( "WARN: Interfaces %s/%s of dev-ip %s share ip %s"
% (iface.name, spif.name, ip, i4)
)
spif.names.append (iface.name)
if iface.is_wlan :
spif.is_wlan = iface.is_wlan
spif.wlan_info = getattr (iface, 'wlan_info', None)
if self.verbose :
print ("=" * 60)
print (iface)
print (spif)
print ("-" * 60)
print (dev.verbose_repr ())
print ("=" * 60)
iface = spif
self.spider_devs [i4] = dev
self.spider_iface [i4] = iface
iface.device = dev
if ip not in self.spider_devs :
print ("WARN: ip %s not in dev" % ip)
if self.verbose :
print ("=" * 60)
print (dev.verbose_repr ())
print ("=" * 60)
name = 'unknown'
assert name not in dev.interfaces
iface = Interface (4711, name)
iface.done = False
dev.interfaces [name] = iface
iface.device = dev
iface.append_inet4 (Inet4 (ip, None, None, iface = name))
self.spider_iface [ip] = iface
self.spider_devs [ip] = dev
self.scope = scope
self.ffw = self.scope.CNDB
self.pap = self.scope.GTW.OMP.PAP
self.mentor = {}
self.rsrvd_nets = {}
self.ffw_node_by_id = {}
self.node_by_id = {}
self.ip_by_ip = {}
self.email_ids = {}
self.manager_by_id = {}
self.phone_ids = {}
self.person_by_id = {}
self.member_by_id = {}
self.dev_by_node = {}
self.cons_dev = {}
self.parser = SQL_Parser \
(verbose = False, fix_double_encode = True)
self.parser.parse (f)
self.contents = self.parser.contents
self.tables = self.parser.tables
# end def __init__
def set_last_change (self, obj, change_time, create_time) :
change_time = make_naive (change_time)
create_time = make_naive (create_time)
self.scope.ems.convert_creation_change \
(obj.pid, c_time = create_time, time = change_time or create_time)
#print (obj, obj.creation_date, obj.last_changed)
# end def set_last_change
def create_nodes (self) :
scope = self.scope
for n in self.contents ['nodes'] :
if n.id < 0 and n.id != -803 :
print ("WARN: Ignoring Node %s/%s" % (n.name, n.id))
continue
if n.name == '-803' :
n.name = 'n-803'
print ("Processing Node: %s" % n.name)
if len (scope.uncommitted_changes) > 100 :
scope.commit ()
gps = None
#print ("LAT:", n.gps_lat_deg, n.gps_lat_min, n.gps_lat_sec)
#print ("LON:", n.gps_lon_deg, n.gps_lon_min, n.gps_lon_sec)
if n.gps_lat_deg is None :
assert n.gps_lat_min is None
assert n.gps_lat_sec is None
assert n.gps_lon_deg is None
assert n.gps_lon_min is None
assert n.gps_lon_sec is None
elif n.gps_lat_min is None :
assert n.gps_lat_sec is None
assert n.gps_lon_min is None
assert n.gps_lon_sec is None
if self.anonymize :
lat = "%2.2f" % n.gps_lat_deg
lon = "%2.2f" % n.gps_lon_deg
else :
lat = "%f" % n.gps_lat_deg
lon = "%f" % n.gps_lon_deg
gps = dict (lat = lat, lon = lon)
elif n.gps_lat_min == 0 and n.gps_lat_sec == 0 :
assert not n.gps_lon_min
assert not n.gps_lon_sec
if self.anonymize :
lat = "%2.2f" % n.gps_lat_deg
lon = "%2.2f" % n.gps_lon_deg
else :
lat = "%f" % n.gps_lat_deg
lon = "%f" % n.gps_lon_deg
gps = dict (lat = lat, lon = lon)
else :
assert n.gps_lat_deg == int (n.gps_lat_deg)
assert n.gps_lat_min == int (n.gps_lat_min)
assert n.gps_lon_deg == int (n.gps_lon_deg)
assert n.gps_lon_min == int (n.gps_lon_min)
lat = "%d d %d m" % (int (n.gps_lat_deg), int (n.gps_lat_min))
lon = "%d d %d m" % (int (n.gps_lon_deg), int (n.gps_lon_min))
if n.gps_lat_sec is not None :
lat = lat + " %f s" % n.gps_lat_sec
if n.gps_lon_sec is not None :
lon = lon + " %f s" % n.gps_lon_sec
gps = dict (lat = lat, lon = lon)
if self.anonymize :
lat = \
( n.gps_lat_deg
+ (n.gps_lat_min or 0) / 60.
+ (n.gps_lat_sec or 0) / 3600.
)
lon = \
( n.gps_lon_deg
+ (n.gps_lon_min or 0) / 60.
+ (n.gps_lon_sec or 0) / 3600.
)
gps = dict (lat = "%2.2f" % lat, lon = "%2.2f" % lon)
id = self.person_dupes.get (n.id_members, n.id_members)
owner = self.person_by_id.get (id)
if self.anonymize :
manager = owner
elif not isinstance (owner, self.pap.Person) :
manager = self.manager_by_id [id]
elif n.id_tech_c and n.id_tech_c != n.id_members :
tid = self.person_dupes.get (n.id_tech_c, n.id_tech_c)
manager = self.person_by_id.get (tid)
assert (manager)
if not isinstance (manager, self.pap.Person) :
manager = self.manager_by_id [tid]
print ("INFO: Tech contact found: %s" % n.id_tech_c)
else :
manager = owner
# node with missing manager has devices, use 0xff admin as owner
if not owner and n.id in self.dev_by_node :
owner = self.person_by_id.get (1)
manager = self.manager_by_id [1]
print \
( "WARN: Node %s: member %s not found, using 1"
% (n.id, n.id_members)
)
if owner :
node = self.ffw.Node \
( name = n.name
, position = gps
, show_in_map = n.map
, manager = manager
, owner = owner
, raw = True
)
self.set_last_change (node, n.changed, n.created)
assert (node)
self.ffw_node_by_id [n.id] = node
else :
print \
( "ERR: Node %s: member %s not found"
% (n.id, n.id_members)
)
# end def create_nodes
# first id is the one to remove, the second one is the correct one
person_dupes = dict (( (373, 551) # checked, real dupe
, (338, 109) # checked, real dupe
, (189, 281) # checked, 189 contains almost no data
# and 189 has no nodes
, (285, 284) # checked, real dupe
, (299, 297) # checked, real dupe
, (300, 462) # checked, real dupe
, (542, 586) # checked, real dupe
, (251, 344) # checked, real dupe
, (188, 614) # checked, real dupe
, (177, 421) # checked, real dupe
, (432, 433) # checked, real dupe, merge addresses
, ( 26, 480) # probably: almost same nick, merge adrs
, ( 90, 499) # FIXME: same person? merge adrs?
# 90 has node 1110
# 499 has node 1105 and 812
# two accounts, one for HTL, one private?
# maybe create company?
# make company owner of 1110 and
# 499 tech-c of all nodes?
, (505, 507) # checked, real dupe
, (410, 547) # checked, real dupe
, (712, 680) # checked, real dupe
, (230, 729) # checked, real dupe
, (375, 743) # checked, real dupe
, (755, 175) # checked, real dupe
, (219, 759) # Probably same (nick similar), merge adr
, (453, 454) # checked, real dupe
, (803, 804) # checked, real dupe
, (295, 556) # same gmx address, merge adr
, (697, 814) # checked, real dupe
, (476, 854) # checked, real dupe
, (312, 307) # checked, real dupe
, (351, 355) # checked, real dupe
, (401, 309) # checked, real dupe
, (871, 870) # checked, real dupe
, (580, 898) # checked, real dupe
, (894, 896) # checked, real dupe
, (910, 766) # checked, real dupe
, (926, 927) # checked, real dupe
, (938, 939) # checked, real dupe
, (584, 939) # not entirely sure but all
# lowercase in both records
# indicates same person
, (756, 758) # checked, real dupe
, ( 0, 1) # ignore <NAME>
, (442,1019) # checked, old address listed in whois
, (1082, 1084) # checked, same email and phone
, (1096, 1094) # checked, same attributes
, (1113, 1114) # checked
))
rev_person_dupes = dict ((v, k) for k, v in pyk.iteritems (person_dupes))
merge_adr = dict.fromkeys ((432, 26, 759, 295))
phone_bogus = dict.fromkeys \
(( '01111111'
, '1234567'
, '0048334961656'
, '001123456789'
, '+972 1234567'
, '003468110524227'
, '1234'
, '0'
, '-'
, '+49 1 35738755'
, '974 5517 9729'
, '0525001340'
, '59780'
, '1013'
, '\\t'
))
companies = dict.fromkeys ((112, ))
associations = dict.fromkeys ((146, 176, 318, 438, 737, 809))
company_actor = {134 : 37}
association_actor = { 1 : 15, 838 : 671}
person_disable = dict.fromkeys ((263, 385, 612, 621))
person_remove = dict.fromkeys ((549, 608))
def try_insert_phone (self, person, m, x, c) :
if x :
x = x.strip ()
if x :
p = None
if x in self.phone_bogus :
return
try :
p = Phone (x, m.town, c)
except ValueError as err :
if str (err).startswith ('WARN') :
print (err)
return
if not p :
return
t = self.pap.Phone.instance (* p)
k = str (p)
if t :
eid = self.phone_ids [k]
prs = self.person_by_id [eid]
if ( prs.pid == person.pid
or self.pap.Subject_has_Phone.instance (person, t)
) :
return # don't insert twice
print \
( "WARN: %s/%s %s/%s: Duplicate phone: %s"
% (eid, prs.pid, m.id, person.pid, x)
)
else :
t = self.pap.Phone (* p)
self.phone_ids [k] = m.id
self.pap.Subject_has_Phone (person, t)
# end def try_insert_phone
def try_insert_email (self, person, m, attr = 'email', second = False) :
mail = getattr (m, attr)
email = self.pap.Email.instance (address = mail)
if email :
if mail.lower () in (e.address for e in person.emails) :
return
eid = self.email_ids [mail.lower ()]
prs = self.person_by_id [eid]
print \
( "WARN: %s/%s %s/%s: Duplicate email: %s"
% (eid, prs.pid, m.id, person.pid, mail)
)
else :
desc = None
if second :
desc = "von 2. Account"
print \
( "INFO: Second email for %s/%s: %s"
% (m.id, person.pid, mail)
)
self.email_ids [mail.lower ()] = m.id
email = self.pap.Email (address = mail, desc = desc)
self.pap.Subject_has_Email (person, email)
if ( m.id not in self.company_actor
and m.id not in self.association_actor
) :
# Some accounts are in fixtures
auth = self.scope.Auth.Account.instance (mail)
if not auth :
auth = self.scope.Auth.Account.create_new_account_x \
( mail
, enabled = True
, suspended = True
, password = <PASSWORD>
)
self.pap.Person_has_Account (person, auth)
# end def try_insert_email
def try_insert_url (self, m, person) :
hp = m.homepage
if not hp.startswith ('http') :
hp = 'http://' + hp
url = self.pap.Url.instance (hp, raw = True)
assert url is None or url.value == hp.lower ()
if url :
return
url = self.pap.Url (hp, desc = 'Homepage', raw = True)
self.pap.Subject_has_Url (person, url)
# end def try_insert_url
im_hash = re.compile (r"^[0-9a-f]{32}$")
def try_insert_im (self, person, m) :
if m.instant_messenger_nick.endswith ('@aon.at') :
self.try_insert_email (person, m, attr = 'instant_messenger_nick')
return
if m.instant_messenger_nick.startswith ('alt/falsch') :
return
if m.instant_messenger_nick.startswith ('housing') :
return
if self.im_hash.match (m.instant_messenger_nick) :
print ("WARN: Got hash in nick: %s" % m.instant_messenger_nick)
return
if m.instant_messenger_nick.startswith ('Wohnadresse:') :
adr = m.instant_messenger_nick.split (':', 1) [1].strip ()
# delimiter is a literal backslash followed by n
street, r = adr.split (r'\n')
plz, ort = r.split ()
address = self.pap.Address.instance_or_new \
( street = street
, zip = plz
, city = ort
, country = pyk.decoded ('Austria', 'utf-8')
)
self.pap.Subject_has_Address (person, address)
return
print \
("INFO: Instant messenger nickname: %s" % m.instant_messenger_nick)
im = self.pap.IM_Handle (address = m.instant_messenger_nick)
self.pap.Subject_has_IM_Handle (person, im)
# end def try_insert_im
phone_types = dict \
( telephone = 'Festnetz'
, mobilephone = 'Mobil'
, fax = 'Fax'
)
def try_insert_address (self, m, person) :
street = ' '.join (x for x in (m.street, m.housenumber) if x)
if street or m.town or m.zip :
country = pyk.decoded ('Austria', 'utf-8')
if not m.town :
print \
( 'INFO: no city (setting to "Wien"): %s/%s'
% (m.id, person.pid)
)
m ['town'] = 'Wien'
if not m.zip :
if m.id == 653 :
m ['zip'] = 'USA'
elif m.id == 787 :
m ['zip'] = '2351'
elif m.id == 836 :
m ['zip'] = '1160'
else :
print ("INFO: no zip: %s/%s" % (m.id, person.pid))
elif m.zip.startswith ('I-') :
m ['zip'] = m.zip [2:]
country = pyk.decoded ('Italy', 'utf-8')
if not street and not m.zip and m.town == 'Wien' :
return
if not street :
print ("INFO: no street: %s/%s" % (m.id, person.pid))
return
address = self.pap.Address.instance_or_new \
( street = street
, zip = m.zip
, city = m.town
, country = country
)
self.pap.Subject_has_Address (person, address)
# end def try_insert_address
def create_persons (self) :
# FIXME: Set role for person so that person can edit only their
# personal data, see self.person_disable
scope = self.scope
# ignore person dupes that have meanwhile been removed
known_ids = {}
for m in self.contents ['members'] :
known_ids [m.id] = True
for d_id, m_id in self.person_dupes.items () :
if m_id not in known_ids or d_id not in known_ids :
del self.person_dupes [d_id]
del self.rev_person_dupes [m_id]
for id, act in self.company_actor.items () :
if id not in known_ids or act not in known_ids :
del self.company_actor [id]
for id, act in self.association_actor.items () :
if id not in known_ids or act not in known_ids :
del self.association_actor [id]
for m in sorted (self.contents ['members'], key = lambda x : x.id) :
if len (scope.uncommitted_changes) > 10 :
scope.commit ()
self.member_by_id [m.id] = m
if m.id == 309 and m.street.startswith ("'") :
m.street = m.street [1:]
if m.id in self.person_remove :
print \
( "INFO: removing person %s %s %s"
% (m.id, m.firstname, m.lastname)
)
continue
if m.id in self.person_dupes :
print \
( "INFO: skipping person %s (duplicate of %s)"
% (m.id, self.person_dupes [m.id])
)
continue
if not m.firstname and not m.lastname :
print ("WARN: skipping person, no name:", m.id)
continue
if not m.lastname :
print ("WARN: skipping person, no lastname: %s" % m.id)
continue
if m.firstname.startswith ('Armin"/><script') :
m.firstname = 'Armin'
cls = self.pap.Person
name = ' '.join ((m.firstname, m.lastname))
pd = dict (name = name)
if m.id in self.company_actor :
cls = self.pap.Company
elif m.id in self.association_actor :
cls = self.pap.Association
else :
pd = dict (first_name = m.firstname, last_name = m.lastname)
if self.anonymize :
cls = self.pap.Person
pd = dict (first_name = m.id, last_name = 'Funkfeuer')
if self.verbose :
typ = cls._etype.__name__.lower ()
print ( "Creating %s: %s" % (typ, repr (name)))
person = cls (raw = True, ** pd)
if m.id == 1 :
self.ff_subject = person
if m.id not in self.rev_person_dupes :
self.set_last_change (person, m.changed, m.created)
self.person_by_id [m.id] = person
if self.anonymize :
continue
self.try_insert_address (m, person)
if m.email :
self.try_insert_email (person, m)
if m.fax and '@' in m.fax :
self.try_insert_email (person, m, attr = 'fax')
print \
("INFO: Using email %s in fax field as email" % m.fax)
if m.instant_messenger_nick :
self.try_insert_im (person, m)
for a, c in pyk.iteritems (self.phone_types) :
x = getattr (m, a)
self.try_insert_phone (person, m, x, c)
if m.mentor_id and m.mentor_id != m.id :
self.mentor [m.id] = m.mentor_id
if m.nickname :
nick = self.pap.Nickname (m.nickname, raw = True)
self.pap.Subject_has_Nickname (person, nick)
if m.homepage :
self.try_insert_url (m, person)
if m.id in self.companies or m.id in self.associations :
if m.id in self.companies :
cls = self.pap.Company
if m.id in self.associations :
cls = self.pap.Association
name = ' '.join ((m.firstname, m.lastname))
typ = cls._etype.__name__.lower ()
print ( "Creating %s: %s" % (typ, repr (name)))
legal = cls (name = name, raw = True)
# copy property links over
q = self.pap.Subject_has_Property.query
for p in q (left = person).all () :
self.pap.Subject_has_Property (legal, p.right)
self.pap.Person_in_Group (person, legal)
self.manager_by_id [m.id] = person
if self.anonymize :
return
x = dict (self.company_actor)
x.update (self.association_actor)
for l_id, p_id in pyk.iteritems (x) :
person = self.person_by_id [p_id]
legal = self.person_by_id [l_id]
self.pap.Person_in_Group (person, legal)
self.manager_by_id [l_id] = person
# Retrieve info from dupe account
for dupe, id in pyk.iteritems (self.person_dupes) :
# older version of db or dupe removed:
if id not in self.person_by_id :
continue
d = self.member_by_id [dupe]
m = self.member_by_id [id]
print \
( "Handling dupe: %s->%s %s %s" \
% (dupe, id, d.firstname, d.lastname)
)
person = self.person_by_id [id]
changed = max \
(d for d in (m.changed, d.changed, m.created, d.created) if d)
created = min (m.created, d.created)
self.set_last_change (person, changed, created)
if d.email :
self.try_insert_email (person, d, second = True)
for a, c in pyk.iteritems (self.phone_types) :
x = getattr (d, a)
self.try_insert_phone (person, d, x, c)
if ( d.mentor_id is not None
and d.mentor_id != d.id
and d.mentor_id != id
and d.mentor_id != 305
) :
print ("WARN mentor: %s->%s %s" % (d.id, id, d.mentor_id))
assert (False)
if d.mentor_id is not None and d.mentor_id != d.id :
if m.id not in self.mentor :
self.mentor [m.id] = d.mentor_id
if d.nickname :
nick = self.pap.Nickname (d.nickname, raw = True)
self.pap.Subject_has_Nickname (person, nick)
if d.homepage :
self.try_insert_url (d, person)
if d.instant_messenger_nick :
self.try_insert_im (person, d)
if dupe in self.merge_adr :
self.try_insert_address (d, person)
for mentor_id, person_id in pyk.iteritems (self.mentor) :
# can happen if a duplicate inserted this:
if mentor_id == person_id :
continue
mentor_id = self.person_dupes.get (mentor_id, mentor_id)
person_id = self.person_dupes.get (person_id, person_id)
mentor = self.person_by_id [mentor_id]
person = self.person_by_id [person_id]
actors = (self.company_actor, self.association_actor)
for a in actors :
if mentor_id in a :
mentor = self.person_by_id [a [mentor_id]]
break
if ( person_id in self.company_actor
or person_id in self.association_actor
) :
self.ffw.Person_acts_for_Legal_Entity.instance_or_new \
(mentor, person)
else :
self.ffw.Person_mentors_Person (mentor, person)
# end def create_persons
def check_spider_dev (self, sdev, in4, ips, nodeid) :
i4 = in4.ip
nodename = self.ffw_node_by_id [nodeid].name
if not routable (i4) :
return
ip4 = IP4_Address (i4)
if i4 not in ips :
print \
( "WARN: IP %s of spidered device %s not in mid dev for node %s"
% (i4, sdev.mainip, nodename)
)
if ip4 not in self.ip_by_ip :
print \
( "WARN: IP %s of spidered device %s not in ips"
% (i4, sdev.mainip)
)
else :
d = self.ip_by_ip [ip4].id_devices
if d :
dev = self.dev_by_id [d]
nid = dev.id_nodes
node = self.ffw_node_by_id [dev.id_nodes]
print \
( "WARN: IP %s of spidered device %s"
" belongs to dev %s node %s"
% (i4, sdev.mainip, dev.name, node.name)
)
else :
print \
( "WARN: IP %s of spidered device %s has no device"
% (i4, sdev.mainip)
)
# end def check_spider_dev
def create_ips_and_devices (self) :
# devices and reserved nets from hna table
for ip4 in pyk.iterkeys (self.olsr_hna.by_dest) :
for n in pyk.iterkeys (self.ip4nets) :
if ip4 in n :
break
else :
# only subnets of one of our ip4nets
if self.verbose :
print ("HNA: %s not in our networks" % ip4)
continue
if ip4.mask == 32 :
if ip4 not in self.olsr_nodes :
if ip4 not in self.ip_by_ip :
print ("WARN: IP %s not in DB" % ip4)
else :
ip = self.ip_by_ip [ip4]
if ip.id_devices :
d = self.cons_dev [ip.id_devices]
d.hna = True
else :
# FIXME: Reserve network in database
self.rsrvd_nets [ip4] = True
else :
# FIXME: Reserve network in database
self.rsrvd_nets [ip4] = True
for i in ip4 :
if i in self.olsr_nodes :
print \
( "WARN: IP %s from hna-range %s also in olsr nodes"
% (i, ip4)
)
assert i not in self.rev_mid
if self.verbose :
for k in pyk.iterkeys (self.rsrvd_nets) :
print ("HNA route to: %s" % k)
if self.debug :
for ip4 in self.olsr_hna.by_dest :
for nw in pyk.iterkeys (self.ip4nets) :
if ip4 in nw :
print ("HNA: %s" % ip4)
for dev in pyk.itervalues (self.cons_dev) :
if dev.merged :
continue
dev.create ()
# end def create_ips_and_devices
def reserve_net (self, nets, typ) :
for net, comment in sorted (pyk.iteritems (nets), key = ip_mask_key) :
if self.verbose :
print (net, comment)
r = typ.query \
( Q.net_address.CONTAINS (net)
, sort_key = TFL.Sorted_By ("-net_address.mask_len")
).first ()
reserver = r.reserve if r else typ
network = reserver (net, owner = self.ff_subject)
if isinstance (comment, type ('')) :
network.set_raw (desc = comment [:80])
# end def reserve_net
def build_device_structure (self) :
for n in self.contents ['nodes'] :
self.node_by_id [n.id] = n
for d in self.contents ['devices'] :
if d.id_nodes not in self.dev_by_node :
self.dev_by_node [d.id_nodes] = []
self.dev_by_node [d.id_nodes].append (d)
self.cons_dev [d.id] = Consolidated_Device (self, d)
for ip in self.contents ['ips'] :
self.ip_by_ip [IP4_Address (ip.ip)] = ip
if ip.id_devices :
did = ip.id_devices
self.cons_dev [did].add_redeemer_ip (ip)
net = IP4_Address (ip.ip, ip.cidr)
if net not in self.ip4nets :
print ("WARN: Adding network reservation: %s" % net)
self.ip4nets [net] = True
# consistency check of olsr data against redeemer db
# check nodes from topology
for ip4 in self.olsr_nodes :
if ip4 not in self.ip_by_ip :
print ("WARN: ip %s from olsr topo not in ips" % ip4)
del self.olsr_nodes [ip4]
# check mid table
midkey = []
midtbl = {}
for ip4, aliases in pyk.iteritems (self.olsr_mid) :
if ip4 not in self.ip_by_ip :
print ("WARN: key ip %s from olsr mid not in ips" % ip4)
midkey.append (ip4)
for a in aliases :
if a not in self.ip_by_ip :
print ("WARN: ip %s from olsr mid not in ips" % a)
if ip4 not in midtbl :
midtbl [ip4] = []
midtbl [ip4].append (a)
assert not midkey
for k, v in pyk.iteritems (midtbl) :
x = dict.fromkeys (self.olsr_mid [k])
for ip4 in v :
del x [ip4]
self.olsr_mid [k] = x.keys ()
# consolidate devices using information from spider data
node_by_sdev = {}
for mainip, sdev in sorted (pyk.iteritems (self.spider_devs)) :
if sdev.done :
continue
sdev.done = True
seen_ip = {}
nodeid = None
for sif in sorted (pyk.itervalues (sdev.interfaces)) :
assert not sif.done
sif.done = True
for in4 in sorted (sif.inet4) :
if unroutable (in4.ip) :
continue
seen_ip [in4.ip] = 1
i4 = IP4_Address (in4.ip)
ip = self.ip_by_ip.get (i4)
if not ip :
print \
("WARN: ip %s from spider not in redeemer" % i4)
continue
if not ip.id_devices :
print ("ERR: ip %s from spider has no device" % i4)
continue
d = self.cons_dev [ip.id_devices]
if sdev not in node_by_sdev :
node_by_sdev [sdev] = {}
if d.id_nodes not in node_by_sdev [sdev] :
node_by_sdev [sdev] [d.id_nodes] = {}
if d.id not in node_by_sdev [sdev] [d.id_nodes] :
node_by_sdev [sdev] [d.id_nodes] [d.id] = {}
node_by_sdev [sdev] [d.id_nodes] [d.id] [in4.ip] = True
assert mainip in seen_ip
for sdev, nodes in sorted (pyk.iteritems (node_by_sdev)) :
if len (nodes) > 1 :
print \
( "WARN: spider device %s expands to %s nodes: %s"
% ( sdev.mainip
, len (nodes)
, ', '.join
(self.node_by_id [n].name for n in pyk.iterkeys (nodes))
)
)
for n, devs in sorted (pyk.iteritems (nodes)) :
sdevs = {}
sifs = {}
dev1 = None
err = False
for devid, ips in sorted (pyk.iteritems (devs)) :
d = self.cons_dev [devid]
if d.merged :
print \
("ERR: %s already merged to %s" % (d, d.merged))
err = True
continue
if dev1 and d.id != dev1.id :
if self.verbose :
print \
( "Spider %-15s: Merging device %s.%s to %s.%s"
% ( sdev.mainip
, d.node.name
, d.name
, dev1.node.name
, dev1.name
)
)
assert dev1 != d
assert not dev1.merged
dev1.merge (d)
else :
dev1 = d
for ip in sorted (pyk.iterkeys (ips)) :
sdevs [self.spider_devs [ip]] = True
if self.spider_iface [ip] not in sifs :
sifs [self.spider_iface [ip]] = {}
sifs [self.spider_iface [ip]] [ip] = True
if not err :
assert len (sdevs) == 1
if sdev not in sdevs :
print ("ERR: Merged interface differ:")
print ("------------------------------")
print (sdevs.keys () [0].verbose_repr ())
print ("------------------------------")
print (sdev.verbose_repr ())
print ("------------------------------")
assert len (sifs) >= 1
assert dev1
for sif, ips in sorted (pyk.iteritems (sifs)) :
l = len (ips)
assert l >= 1
ifaces = {}
for ip in ips :
ifaces [ip] = dev1.interfaces [ip]
assert len (ifaces) == len (ips)
if1 = ip1 = None
for ip, ifc in sorted (pyk.iteritems (ifaces)) :
if if1 :
print \
( "Spider %-15s: "
"Merging iface %s.%s:%s to %s.%s:%s"
% ( sdev.mainip
, d.node.name
, d.name
, ip
, dev1.node.name
, dev1.name
, ip1
)
)
if1.merge (ifc)
else :
if1 = ifc
ip1 = ip
if sif.is_wlan :
if1.is_wlan = True
if1.wlan_info = getattr (sif, 'wlan_info', None)
if1.names = sif.names
if1.spider_ip = sif.device.mainip
# compound devices from mid table
# We index nodes by mid-table entry (by the mid key-ip address)
# for each mid entry there can be several nodes (config bug)
for ip4, aliases in sorted (pyk.iteritems (self.olsr_mid)) :
nodes = {}
ip = self.ip_by_ip [ip4]
if ip.id_devices :
d = self.cons_dev [ip.id_devices]
d.mid_ip = ip4
nodes [d.id_nodes] = d
else :
print ("ERR: key %s from mid has no device" % ip4)
for a in sorted (aliases) :
ip = self.ip_by_ip [a]
if not ip.id_devices :
print ("ERR: %s from mid %s has no device" % (a, ip4))
continue
d = self.cons_dev [ip.id_devices]
d.mid_ip = ip4
if d.id_nodes not in nodes :
nodes [d.id_nodes] = d
elif d != nodes [d.id_nodes] :
if d.merged :
if d.merged != nodes [d.id_nodes] :
print \
( "ERR: %s already merged to %s "
"not merging to %s"
% (d, d.merged, nodes [d.id_nodes])
)
continue
if nodes [d.id_nodes].merged :
print \
("ERR: %s already merged" % (nodes [d.id_nodes]))
else :
nodes [d.id_nodes].merge (d)
if len (nodes) > 1 :
print \
("WARN: mid %s expands to %s nodes" % (ip4, len (nodes)))
# end def build_device_structure
def debug_output (self) :
for k in sorted (pyk.iterkeys (self.olsr_nodes)) :
print (k)
for node in self.contents ['nodes'] :
nn = pyk.encoded (node.name, 'utf-8')
print ("Node: %s (%s)" % (nn, node.id))
for d in self.dev_by_node.get (node.id, []) :
print (" Device: %s" % d.name)
# end def debug_output
def create (self) :
self.build_device_structure ()
if self.debug :
self.debug_output ()
self.create_persons ()
self.reserve_net (self.ip4nets, self.ffw.IP4_Network)
self.reserve_net (self.ip6nets, self.ffw.IP6_Network)
self.create_nodes ()
self.create_ips_and_devices ()
# end def create
# end def Convert
def _main (cmd) :
scope = Command.scope (cmd)
if cmd.Break :
TFL.Environment.py_shell ()
c = Convert (cmd, scope, debug = False)
#c.dump ()
c.create ()
scope.commit ()
scope.ems.compact ()
scope.destroy ()
Command.command._handle_load_auth_mig \
(cmd, mig_auth_file = Command.command.default_mig_auth_file + ".0xff")
# end def _main
_Command = TFL.CAO.Cmd \
( handler = _main
, args =
( "file:S?PG database dumpfile to convert"
,
)
, opts =
( "verbose:B"
, "create:B"
, "anonymize:B"
, "olsr_file:S=olsr/txtinfo.txt?OLSR dump-file to convert"
, "spider_dump:S=Funkfeuer.dump?Spider pickle dump"
, "network:S,?Networks already reserved"
, "spider_ignore_ip:S,?<IP>:<IP> ignore sub-IP for spidered device"
) + Command.opts
, min_args = 1
, defaults = Command.command.defaults
)
if __name__ == "__main__" :
_Command ()
### __END__ convert_0xff
| StarcoderdataPython |
164744 | <filename>tests/test_socialbot.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_socialbot
----------------------------------
Tests for `socialbot` module.
"""
import unittest
import threading
import requests
from socialbot.main import SlackBotHandler
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from socialbot.plugins.facebook_plugin import FacebookPlugin
class TestSocialbot(unittest.TestCase):
def setUp(self):
print('---- setup start')
self.httpd = HTTPServer(('', 8081), SlackBotHandler)
print('---- setting plugins')
SlackBotHandler.plugin_list = [FacebookPlugin()]
threading.Thread(target=self.serve).start()
print('---- setup complete')
def serve(self):
try:
self.httpd.serve_forever()
finally:
self.httpd.server_close()
def tearDown(self):
print('---- teardown start')
self.httpd.shutdown()
print('---- teardown complete')
def test1(self):
print('---- test1 start')
print(threading.current_thread())
result = requests.post('http://127.0.0.1:8081', {'user_name':'ska', 'text':'wikisave <http://wwww.lanux.org.ar> este es otro texto largo'})
print result
print('---- test1 complete')
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
4831570 | from typing import Any, Dict, List, Union
from xpanse.const import V2_PREFIX
from xpanse.endpoint import ExEndpoint
from xpanse.error import UnexpectedValueError
from xpanse.iterator import ExResultIterator
class CertificatesEndpoint(ExEndpoint):
"""
Part of the Assets v2 API for handling asset certificates.
See: https://api.expander.expanse.co/api/v1/docs/
"""
def list(self, **kwargs: Any) -> ExResultIterator:
"""
Returns the list of asset certificates. Arguments should be passed as keyword args using
the names below.
Args:
limit (int, optional):
Returns at most this many results in a single api call.
pageToken (str, optional):
Returns results starting at this page Token.
commonNameSearch (str, optional):
Search for given domain value via substring match.
recentIp (str, optional):
Filter by IP; Returns only assets with a recent IP matching the provided filter.
providerId (str, optional):
Comma-separated string; Returns only results that were found on the given providers.
If not set, results will include anything regardless of provider status.
providerName (str, optional):
Comma-separated string; Returns only results that were found on the given providers.
If not set, results will include anything regardless of provider status.
businessUnitId (str, optional):
Comma-separated string; Returns only results whose Business Unit's ID falls in the provided list.
NOTE: If omitted, API will return results for all Business Units the user has permissions to view.
businessUnitName (str, optional):
Comma-separated string; Returns only results whose Business Unit's Name falls in the provided list.
NOTE: If omitted, API will return results for all Business Units the user has permissions to view.
property (str, optional):
Comma-separated string; Returns only results whose certificate property falls in the provided list.
NOTE: If omitted, API will return results for all properties the user has permissions to view.
minLastObservedDate (str, optional):
Filter by last observed timestamp. Date formatted string (YYYY-MM-DD).
certificateAdvertisementStatus (str, optional):
Comma-separated string; Returns only result whose asset's certificate advertisement statuses fall in the provided list.
Valid values are `HAS_CERTIFICATE_ADVERTISEMENT` and `NO_CERTIFICATE_ADVERTISEMENT`.
serviceStatus (str, optional):
Comma-separated string; Returns only result whose asset's service statuses fall in the provided list.
Valid values are `HAS_ACTIVE_SERVICE`, `NO_ACTIVE_SERVICE`, `HAS_ACTIVE_CLOUD_SERVICE`, `NO_ACTIVE_CLOUD_SERVICE`,
`HAS_ACTIVE_ON_PREM_SERVICE`, and `NO_ACTIVE_ON_PREM_SERVICE`.
hostingEnvironment (str, optional):
Filter by Hosting Environment. Allowed values are `ON_PREM`, `CLOUD`, `NONE`, `RESERVED_IPS`.
hasRelatedCloudResources (boolean, optional):
Filter by whether the asset has a related cloud resource asset.
tagId (str, optional):
Comma-separated string; Returns any assets with a tagId in the provided set.
tagName (str, optional):
Comma-separated string; Returns any assets with a tagName in the provided set.
include (str, optional:
Comma-separated string; Include the provided fields as part of the serialized result.
Allowed Values:
certDetails: populate all elements in data[*].details.
Note: If param is not specified, data[*].details will be empty.
sort (str, optional):
Comma-separated string; orders results by the given fields. If the field name is
prefixed by a -, then the ordering will be descending for that field.
Allowed values are `commonName`, `-commonName`, `dateAdded`, `-dateAdded`, `lastObserved`, `-lastObserved`.
Returns:
:obj:`ExResultIterator`:
An iterator containing all of the certificate results. Results can be iterated
or called by page using `<iterator>.next()`.
Examples:
>>> # Prints all certificate objects:
>>> for res in client.assets.certificates.v2.list():
... for cert in res:
... print(cert)
"""
return ExResultIterator(self._api, f"{V2_PREFIX}/assets/certificates", kwargs)
def count(self, **kwargs: Any) -> int:
"""
Returns the total count of certificates. This will return -1 if for some reason the count attribute
is not returned in an otherwise valid response payload.
Args:
pageToken (str, optional):
Returns results starting at this page Token.
commonNameSearch (str, optional):
Search for given domain value via substring match.
recentIp (str, optional):
Filter by IP; Returns only assets with a recent IP matching the provided filter.
providerId (str, optional):
Comma-separated string; Returns only results that were found on the given providers.
If not set, results will include anything regardless of provider status.
providerName (str, optional):
Comma-separated string; Returns only results that were found on the given providers.
If not set, results will include anything regardless of provider status.
formattedIssuerOrg (str, optional):
Comma-separated string; Returns only results that were found on the given formatted issuer orgs.
If not set, filter is ignored.
businessUnitId (str, optional):
Comma-separated string; Returns only results whose Business Unit's ID falls in the provided list.
NOTE: If omitted, API will return results for all Business Units the user has permissions to view.
businessUnitName (str, optional):
Comma-separated string; Returns only results whose Business Unit's Name falls in the provided list.
NOTE: If omitted, API will return results for all Business Units the user has permissions to view.
property (str, optional):
Comma-separated string; Returns only results whose certificate property falls in the provided list.
NOTE: If omitted, API will return results for all properties the user has permissions to view.
minLastObservedDate (str, optional):
Filter by last observed timestamp. Date formatted string (YYYY-MM-DD).
certificateAdvertisementStatus (str, optional):
Comma-separated string; Returns only result whose asset's certificate advertisement statuses fall in the provided list.
Valid values are `HAS_CERTIFICATE_ADVERTISEMENT` and `NO_CERTIFICATE_ADVERTISEMENT`.
serviceStatus (str, optional):
Comma-separated string; Returns only result whose asset's service statuses fall in the provided list.
Valid values are `HAS_ACTIVE_SERVICE`, `NO_ACTIVE_SERVICE`, `HAS_ACTIVE_CLOUD_SERVICE`, `NO_ACTIVE_CLOUD_SERVICE`,
`HAS_ACTIVE_ON_PREM_SERVICE`, and `NO_ACTIVE_ON_PREM_SERVICE`.
issueStatus (str, optional):
Comma-separated string; Returns only result whose asset's issue statuses fall in the provided list.
Valid values are `New`, `Investigating`, `In Progress`, `No Risk`, `Acceptable Risk`, `Resolved`
hostingEnvironment (str, optional):
Filter by Hosting Environment. Allowed values are `ON_PREM`, `CLOUD`, `NONE`, `RESERVED_IPS`.
hasRelatedCloudResources (boolean, optional):
Filter by whether the asset has a related cloud resource asset.
tagId (str, optional):
Comma-separated string; Returns any assets with a tagId in the provided set.
tagName (str, optional):
Comma-separated string; Returns any assets with a tagName in the provided set.
Returns:
:int:
The total count of certificates.
Examples:
>>> # Print total count of certificates containing `dev` substring.
>>> print(client.assets.certificates.v2.count(commonNameSearch="dev"))
"""
return (
self._api.get(f"{V2_PREFIX}/assets/certificates/count", params=kwargs)
.json()
.get("count", -1)
)
def get(self, pemMd5Hash: str, **kwargs: Any) -> Dict[str, Any]:
"""
Returns the details for a given Certificate. Arguments should be passed as keyword args using
the names below.
Args:
pemMd5Hash (str):
Fully qualified domain name.
minRecentIpLastObservedDate (str, optional):
Filter by last observed timestamp for recent IPs. Date formatted string (YYYY-MM-DD).
Returns:
:obj:`dict`:
A dictionary containing all of the details about the domain.
Examples:
>>> # Return Domain.
>>> domain = client.assets.certificates.v2.get(<pemMd5Hash>)
"""
return self._api.get(
f"{V2_PREFIX}/assets/certificates/{pemMd5Hash}", params=kwargs
).json()
def csv(self, file: str, **kwargs: Any):
"""
Downloads filtered certificates as a csv file. Arguments should be passed as keyword args using
the names below.
Args:
file (str):
A relative path for saving the downloaded csv file.
limit (int, optional):
Returns at most this many results.
commonNameSearch (str, optional):
Search for given domain value via substring match.
recentIp (str, optional):
Filter by IP; Returns only assets with a recent IP matching the provided filter.
providerId (str, optional):
Comma-separated string; Returns only results that were found on the given providers.
If not set, results will include anything regardless of provider status.
providerName (str, optional):
Comma-separated string; Returns only results that were found on the given providers.
If not set, results will include anything regardless of provider status.
formattedIssuerOrg (str, optional):
Comma-separated string; Returns only results that were found on the given formatted issuer orgs.
If not set, filter is ignored.
businessUnitId (str, optional):
Comma-separated string; Returns only results whose Business Unit's ID falls in the provided list.
NOTE: If omitted, API will return results for all Business Units the user has permissions to view.
businessUnitName (str, optional):
Comma-separated string; Returns only results whose Business Unit's Name falls in the provided list.
NOTE: If omitted, API will return results for all Business Units the user has permissions to view.
property (str, optional):
Comma-separated string; Returns only results whose certificate property falls in the provided list.
NOTE: If omitted, API will return results for all properties the user has permissions to view.
minLastObservedDate (str, optional):
Filter by last observed timestamp. Date formatted string (YYYY-MM-DD).
certificateAdvertisementStatus (str, optional):
Comma-separated string; Returns only result whose asset's certificate advertisement statuses fall in the provided list.
Valid values are `HAS_CERTIFICATE_ADVERTISEMENT` and `NO_CERTIFICATE_ADVERTISEMENT`.
serviceStatus (str, optional):
Comma-separated string; Returns only result whose asset's service statuses fall in the provided list.
Valid values are `HAS_ACTIVE_SERVICE`, `NO_ACTIVE_SERVICE`, `HAS_ACTIVE_CLOUD_SERVICE`, `NO_ACTIVE_CLOUD_SERVICE`,
`HAS_ACTIVE_ON_PREM_SERVICE`, and `NO_ACTIVE_ON_PREM_SERVICE`.
issueStatus (str, optional):
Comma-separated string; Returns only result whose asset's issue statuses fall in the provided list.
Valid values are `New`, `Investigating`, `In Progress`, `No Risk`, `Acceptable Risk`, `Resolved`
hostingEnvironment (str, optional):
Filter by Hosting Environment. Allowed values are `ON_PREM`, `CLOUD`, `NONE`, `RESERVED_IPS`.
hasRelatedCloudResources (boolean, optional):
Filter by whether the asset has a related cloud resource asset.
tagId (str, optional):
Comma-separated string; Returns any assets with a tagId in the provided set.
tagName (str, optional):
Comma-separated string; Returns any assets with a tagName in the provided set.
Returns:
:obj:`boolean`:
`True` if the download was successful, otherwise `False`.
Examples:
>>> # Download a csv named `api-certs.csv` for all certificates that contain `api` in their name:
>>> cli.assets.certificates.v2.csv(file="api-certs.csv", commonNameSearch="api")
"""
return self._api.csv(
path=f"{V2_PREFIX}/assets/certificates/csv", file_=file, **kwargs
)
def bulk_tag(
self,
operation: str,
asset_ids: List[str],
tag_ids: List[str],
return_raw: bool = False,
) -> Union[bool, Dict[str, Any]]:
"""
Assigns or unassigns tags to assets.
Args:
operation (str):
Operation type. Must be ASSIGN or UNASSIGN.
asset_ids (list):
A list of asset uuids to assign or unassign tags from.
tag_ids (list):
A list of tag uuids to assign or unassign to assets.
return_raw (bool, optional):
If False this will return a boolean response that reflects whether
all of the operations were successful. If True the raw json response
will be returned. Defaults to False.
Returns:
:bool:
Returns a bool reflecting the operations success unless `return_raw`
is true, in which case a dict is returned.
Examples:
>>> # Assign a tag to a certificate
>>> cli.assets.certificates.bulk_tag("ASSIGN",
... ["8e589910-c1af-3c32-ae88-9a4b2dbcfe76"],
... ["f6164347-86d1-30cc-baf2-28bbb395403d"])
"""
if operation not in ["ASSIGN", "UNASSIGN"]:
raise UnexpectedValueError(
f"The operation type '{operation}' is not valid.'"
)
payload: Dict[str, Any] = {"operations": []}
for asset_id in asset_ids:
payload["operations"].append(
{"operationType": operation, "assetId": asset_id, "tagIds": tag_ids}
)
resp = self._api.post(
f"{V2_PREFIX}/assets/certificates/tag-assignments/bulk", json=payload
).json()
if not return_raw:
return resp.get("meta", {}).get("failureCount") == 0
return resp
def bulk_poc(
self,
operation: str,
asset_ids: List[str],
contact_ids: List[str],
return_raw: bool = False,
) -> Union[bool, Dict[str, Any]]:
"""
Assigns or unassigns Point-of-Contacts to assets.
Args:
operation (str):
Operation type. Must be ASSIGN or UNASSIGN.
asset_ids (list):
A list of asset uuids to assign or unassign pocs from.
contact_ids (list):
A list of poc uuids to assign or unassign to assets.
return_raw (bool, optional):
If False this will return a boolean response that reflects whether
all of the operations were successful. If True the raw json response
will be returned. Defaults to False.
Returns:
:bool:
Returns a bool reflecting the operations success unless `return_raw`
is true, in which case a dict is returned.
Examples:
>>> # Assign a poc to a certificate
>>> cli.assets.certificates.bulk_poc("ASSIGN",
... ["8e589910-c1af-3c32-ae88-9a4b2dbcfe76"],
... ["f6164347-86d1-30cc-baf2-28bbb395403d"])
"""
if operation not in ["ASSIGN", "UNASSIGN"]:
raise UnexpectedValueError(
f"The operation type '{operation}' is not valid.'"
)
payload: Dict[str, Any] = {"operations": []}
for asset_id in asset_ids:
payload["operations"].append(
{
"operationType": operation,
"assetId": asset_id,
"contactIds": contact_ids,
}
)
resp = self._api.post(
f"{V2_PREFIX}/assets/certificates/contact-assignments/bulk", json=payload
).json()
if not return_raw:
return resp.get("meta", {}).get("failureCount") == 0
return resp
def annotation_update(
self,
certificate_id: str,
contacts: List[str] = [],
tags: List[str] = [],
note: str = "",
) -> Dict[str, Any]:
"""
Updates the annotations for a single certificate.
Note: This will overwrite the existing annotations for an asset.
If any arguments are omitted, that annotation type will be
cleared for the asset.
Args:
certificate_id (str):
The uuid of the certificate.
contacts (list, optional):
A list of poc emails to apply to the certificate.
tags (list, optional):
A list of tag names to apply to the certificate.
note (str, optional):
An optional note that can be added to the certificate.
Returns:
:obj:`dict`:
A dictionary containing the current annotations for the certificate.
Examples:
>>> # Update annotations for a certificate
>>> cli.assets.certificates.annotation_update(
... certificate_id="e5bdc732-522a-3864-8ff3-307d35f0f0a0",
... contacts=["<EMAIL>"],
... tags=["sdk_test"],
... note="SDK Note")
"""
payload: Dict[str, Any] = {"tags": [], "note": note or "", "contacts": []}
for contact in contacts:
payload["contacts"].append({"email": contact})
for tag in tags:
payload["tags"].append({"name": tag})
return self._api.put(
f"{V2_PREFIX}/assets/certificates/{certificate_id}/annotations",
json=payload,
).json()
| StarcoderdataPython |
1602396 | <reponame>leddartech/pioneer.common<gh_stars>1-10
from pioneer.common import plane, linalg
from pioneer.common.logging_manager import LoggingManager
from numpy.matlib import repmat
import math
import numpy as np
import os
import transforms3d
def grid(v, h, v_from, v_to, h_from, h_to, dtype = np.float32):
'''
Computes a matrix of all possible pairs of angles in the 2d field of view.
\param v vertical resolution
\param h horizontal resolution
\param v_fov vertical field of view (degrees)
\param h_fov horizontal field of view (degrees)
\param dtype the numpy data type
'''
# If you are wondering why there is a complex number here. Read the
# numpy mgrid documentation. The lines below are equivalent to:
# a = np.linspace(v_from, v_to, v)
# b = np.linspace(h_from, h_to, h)
# b, a = np.meshgrid(b, a)
a, b = np.mgrid[ v_from:v_to:complex(0,v)
, h_from:h_to:complex(0,h)]
return np.c_[a.ravel(), b.ravel()].astype(dtype)
def from_specs_dict(specs):
return (specs[k] for k in ['v', 'h', 'v_fov', 'h_fov'])
def angles(v, h = None, v_fov = None, h_fov = None, dtype = np.float32):
'''
Computes a matrix of all possible pairs of angles in the 2d field of view.
The generated grid follows the LCA axis system convention. That is the
bottom-left corner (0, 0) corresponds to (-v_fov/2, -h_fov/2) and the top
right corner (v-1, h-1) is (+v_fov/2, +h_fov/2). The grid is generated in
a row-major order.
\param v vertical resolution (or a dict with keys v', 'h', 'v_fov', 'h_fov')
\param h horizontal resolution
\param v_fov vertical field of view (degrees)
\param h_fov horizontal field of view (degrees)
\param dtype the numpy data type
'''
if isinstance(v, dict):
v, h, v_fov, h_fov = from_specs_dict(v)
v_fov_rad = math.radians(v_fov)
h_fov_rad = math.radians(h_fov)
v_offset = v_fov_rad/v/2
h_offset = h_fov_rad/h/2
return grid(v,h, -v_fov_rad/2 + v_offset, v_fov_rad/2 - v_offset
, -h_fov_rad/2 + h_offset, h_fov_rad/2 - h_offset, dtype)
def raycast_angles(v, h = None, v_fov = None, h_fov = None, density = 10, dtype = np.float32):
'''
Computes a densified matrix of all possible pairs of angles in the 2d field of view.
This matrix can be used to cast density * density rays per fov solid angle ('pixel')
\return the angle grid, and a mapping matrix m, where, m[dense_ray_i] == channel_i
'''
if isinstance(v, dict):
v, h, v_fov, h_fov = from_specs_dict(v)
v_fov_rad = math.radians(v_fov)
h_fov_rad = math.radians(h_fov)
dense_to_sparse = np.empty(v*h*density*density, 'u4')
sparse_to_dense = np.empty((v*h, density, density), 'u4')
dense_to_sub = np.empty((v*h*density*density, 2), 'u4')
m_i = 0
for v_i in range(v):
for vd_i in range(density):
for h_i in range(h):
for hd_i in range(density):
sparse_i = v_i * h + h_i
dense_to_sparse[m_i] = sparse_i
sparse_to_dense[sparse_i, vd_i, hd_i] = m_i
dense_to_sub[m_i] = [vd_i, hd_i]
m_i += 1
return grid(v * density,h * density, -v_fov_rad/2, v_fov_rad/2
, -h_fov_rad/2, h_fov_rad/2, dtype), dense_to_sparse, sparse_to_dense, dense_to_sub
def custom_v_angles(v, h = None, v_fov = None, h_fov = None, factor = 1, filename = os.path.join(os.path.dirname(__file__), 'eagle_angles_80.txt'), dtype = np.float32):
'''
similar to \a angles() but using a file to define scan direction angles
'''
if isinstance(v, dict):
v, h, v_fov, h_fov = from_specs_dict(v)
h_fov_rad = math.radians(h_fov)
h_offset = h_fov_rad/h/2
a = np.genfromtxt(filename, delimiter='\n', converters={_:lambda s: int(s, 16) for _ in range(1)})
a = a[:v]
a = a/2**16 * v_fov - v_fov/2
a = np.deg2rad(a) * factor
b = np.linspace(-h_fov_rad/2 + h_offset, h_fov_rad/2 - h_offset, num = h, dtype = dtype)
b, a = np.meshgrid(b, a)
return np.c_[a.ravel(), b.ravel()].astype(dtype)
def custom_v_quad_directions(v, h = None, v_fov = None, h_fov = None, factor = 1, filename = os.path.join(os.path.dirname(__file__), 'eagle_angles_80.txt'), dtype = np.float32):
'''
similar to \a quad_directions() but using a file to define scan direction angles
'''
if isinstance(v, dict):
v, h, v_fov, h_fov = from_specs_dict(v)
v_fov_rad = math.radians(v_fov)
h_fov_rad = math.radians(h_fov)
v_cell_size = v_fov_rad/v
h_cell_size = h_fov_rad/h
file_angles = np.genfromtxt(filename, delimiter='\n', converters={_:lambda s: int(s, 16) for _ in range(1)})
def custom_grid(v, h, v_offset, h_offset_from, h_offset_to, dtype):
a = file_angles[:v]
a = a/2**16 * v_fov - v_fov/2
a = (np.radians(a) - v_offset) * factor
b = np.linspace(-h_fov_rad/2+h_offset_from, h_fov_rad/2+h_offset_to, num = h, dtype = dtype)
b, a = np.meshgrid(b, a)
return np.c_[a.ravel(), b.ravel()].astype(dtype)
return np.vstack((
directions(custom_grid(v,h,-v_cell_size/2 ,h_cell_size , 0 , dtype))
,directions(custom_grid(v,h,+v_cell_size/2 ,h_cell_size , 0 , dtype))
,directions(custom_grid(v,h,+v_cell_size/2 ,0 , -h_cell_size, dtype))
,directions(custom_grid(v,h,-v_cell_size/2 ,0 , -h_cell_size, dtype)))
)
def direction(theta_x, theta_y):
'''
Convert angles of a spherical axis sytem into a cartesian direction vector.
The cartesian axis system is the camera axis system.
z
+-------> x
|
|
y v
The z axis enters your screen (or paper if you are the kind of person that
still prints code).
Angles go from -fov/2 to fov/2 in both horizontal and vertical direction, always computed
using "right hand" convention. In each direction, maximum z component will be attained at angle 0.
In the x-z plane (viewed from above):
pi/2
x ^
|
|<--.
|th_y\
------------(.)-------------> z
y
|
|
-pi/2
x = sin(theta_y)
z = cos(theta_y) //we want x,z = (0,1) at theta_y = 0
In the y-z plane (view from side):
z ^
|
|<--.
|th_x \ y
pi ------------(.)------------->
x
y = cos(theta_x + pi/2)
z = sin(theta_x + pi/2) //we want (y,z) = (0,1) at theta_x = 0
So the x, y, z coordinates should follow the equations below
x = sin(theta_y)
y = cos(theta_x + pi/2)
z = cos(theta_y) * sin(theta_x + pi/2)
'''
x = np.sin(theta_y)
y = np.cos(theta_x + np.pi/2)
z = np.sin(theta_x + np.pi/2) * np.cos(theta_y)
return x, y, z
def direction_spherical(thetas_x, thetas_y):
'''
LeddarConfig implementation
'''
x = np.cos(thetas_x) * np.sin(thetas_y)
y = np.sin(thetas_x)
z = np.cos(thetas_x) * np.cos(thetas_y)
return x, y, z
def direction_orthogonal(thetas_x, thetas_y):
'''
Simulator implementation using orthogonal camera depth projection
'''
x = np.tan(thetas_y)
y = np.tan(-thetas_x)
z = np.ones_like(x)
n = np.sqrt(z**2 + x**2 + y**2)
return x/n, y/n, z/n
def directions(angles, direction_f = direction):
'''Generate a set of cartesian direction vectors from a grid of
spherical coordinates angles. This function uses the same convention as
the `direction` function.
'''
thetas_x, thetas_y = angles.T
return np.stack(direction_f(thetas_x, thetas_y), axis=1)
def directions_orthogonal(v,h=None,v_fov=None,h_fov=None, dtype = np.float32):
'''Generate a set of cartesian direction vectors from a grid of
2D pixels coordinates (eg : camera depth) using Carla Simulator implementation
and camera depth projection
'''
if isinstance(v, dict):
v, h, v_fov, h_fov = from_specs_dict(v)
if h_fov > 90:
LoggingManager.instance().warning("The projection model is not adapted for horizontal fov greater than 90 degrees. Trying to correct the" \
+" situation by spliting the fov in three parts and re-merging them. Use 'projection: direction_carla_pixell' instead.")
return directions_orthogonal_pixell(v=v, h=h, v_fov=v_fov, h_fov=h_fov, dtype=dtype)
# (Intrinsic) K Matrix
k = np.identity(3)
k[0, 2] = h / 2.0
k[1, 2] = v / 2.0
k[0, 0] = k[1, 1] = h / \
(2.0 * math.tan(h_fov * math.pi / 360.0))
# 2d pixel coordinates
pixel_length = h * v
u_coord = repmat(np.r_[h-1:-1:-1],
v, 1).reshape(pixel_length)
v_coord = repmat(np.c_[v-1:-1:-1],
1, h).reshape(pixel_length)
# pd2 = [u,v,1]
p2d = np.array([u_coord, v_coord, np.ones_like(u_coord)])
direction = np.dot(np.linalg.inv(k), p2d).T
direction[:,0] = -direction[:,0]
v_cell_size, h_cell_size = v_h_cell_size_rad(v, h, v_fov, h_fov)
# First face
face_a = np.zeros((direction.shape))
face_a[:,0] = direction[:,0] - h_cell_size/2
face_a[:,1] = direction[:,1] - v_cell_size/2
face_a[:,2] = direction[:,2]
# Second face
face_b = np.zeros((direction.shape))
face_b[:,0] = direction[:,0] + h_cell_size/2
face_b[:,1] = direction[:,1] - v_cell_size/2
face_b[:,2] = direction[:,2]
# Third face
face_c = np.zeros((direction.shape))
face_c[:,0] = direction[:,0] + h_cell_size/2
face_c[:,1] = direction[:,1] + v_cell_size/2
face_c[:,2] = direction[:,2]
# Fourth face
face_d = np.zeros((direction.shape))
face_d[:,0] = direction[:,0] - h_cell_size/2
face_d[:,1] = direction[:,1] + v_cell_size/2
face_d[:,2] = direction[:,2]
quad_direction = np.vstack((face_a,face_b,face_c,face_d))
return direction,quad_direction
def directions_orthogonal_pixell(v, h=None, v_fov=None, h_fov=None, dtype = np.float32):
"""Returns directions and quad_directions for the carla simulator projection, in the case of a h_fov greater than 90 deg."""
if isinstance(v, dict):
v, h, v_fov, h_fov = from_specs_dict(v)
directions_central_third, quad_directions_central_third = directions_orthogonal(v=v, h=int(h/3), v_fov=v_fov, h_fov=h_fov/3)
rot_left = transforms3d.euler.euler2mat(0,np.deg2rad(h_fov/3),0)
rot_right = transforms3d.euler.euler2mat(0,np.deg2rad(-h_fov/3),0)
directions_left_third = directions_central_third @ rot_left
directions_right_third = directions_central_third @ rot_right
quad_directions_left_third = quad_directions_central_third @ rot_left
quad_directions_right_third = quad_directions_central_third @ rot_right
ind_tpm = np.arange(v*int(h/3)).reshape((v,int(h/3)))
ind = np.ravel(np.hstack([ind_tpm,ind_tpm+v*int(h/3),ind_tpm+2*v*int(h/3)]))
quad_ind_tpm = np.arange(4*v*int(h/3)).reshape((4*v,int(h/3)))
quad_ind = np.ravel(np.hstack([quad_ind_tpm,quad_ind_tpm+4*v*int(h/3),quad_ind_tpm+2*4*v*int(h/3)]))
directions = np.vstack([directions_left_third, directions_central_third, directions_right_third])[ind]
quad_directions = np.vstack([quad_directions_left_third, quad_directions_central_third, quad_directions_right_third])[quad_ind]
return directions, quad_directions
def v_h_cell_size_rad(v, h = None, v_fov = None, h_fov = None, output_fov = False):
if isinstance(v, dict):
v, h, v_fov, h_fov = from_specs_dict(v)
v_fov_rad = math.radians(v_fov)
h_fov_rad = math.radians(h_fov)
v_cell_size = v_fov_rad/v
h_cell_size = h_fov_rad/h
if output_fov:
return v_cell_size, h_cell_size, v_fov_rad, h_fov_rad
else:
return v_cell_size, h_cell_size
def quad_angles(v, h=None, v_fov=None, h_fov=None,
dtype=np.float32):
""" Like angles(), but for quad-stuff.
"""
if isinstance(v, dict):
v, h, v_fov, h_fov = from_specs_dict(v)
v_cell_size, h_cell_size, v_fov_rad, h_fov_rad = v_h_cell_size_rad(v, h, v_fov, h_fov, True)
return np.vstack((
grid(v, h, -v_fov_rad/2, v_fov_rad/2-v_cell_size , -h_fov_rad/2, h_fov_rad/2-h_cell_size, dtype)
,grid(v, h, -v_fov_rad/2+v_cell_size, v_fov_rad/2, -h_fov_rad/2, h_fov_rad/2-h_cell_size, dtype)
,grid(v, h, -v_fov_rad/2+v_cell_size, v_fov_rad/2, -h_fov_rad/2+h_cell_size, h_fov_rad/2, dtype)
,grid(v, h, -v_fov_rad/2, v_fov_rad/2-v_cell_size, -h_fov_rad/2+h_cell_size, h_fov_rad/2, dtype)
))
def custom_quad_angles(specs,angles, dtype = np.float32):
v, h, v_fov, h_fov = from_specs_dict(specs)
v_fov_rad = math.radians(v_fov)
h_fov_rad = math.radians(h_fov)
v_cell_size = v_fov_rad/v
h_cell_size = h_fov_rad/h
def custom_grid(v_offset, h_offset, dtype):
a = angles[:,0] - v_offset
b = angles[:,1] - h_offset
return np.c_[a.ravel(), b.ravel()].astype(dtype)
return np.vstack((
custom_grid(-v_cell_size/2 ,-h_cell_size/2 , dtype)
,custom_grid(+v_cell_size/2 ,-h_cell_size/2 , dtype)
,custom_grid(+v_cell_size/2 ,+h_cell_size/2 , dtype)
,custom_grid(-v_cell_size/2 ,+h_cell_size/2 , dtype))
)
def quad_directions(v, h = None, v_fov = None, h_fov = None, dtype = np.float32, direction_f = direction):
if isinstance(v, dict):
v, h, v_fov, h_fov = from_specs_dict(v)
v_cell_size, h_cell_size, v_fov_rad, h_fov_rad = v_h_cell_size_rad(v, h, v_fov, h_fov, True)
return np.vstack((
directions(grid(v,h,-v_fov_rad/2 ,v_fov_rad/2-v_cell_size ,-h_fov_rad/2 , h_fov_rad/2-h_cell_size , dtype), direction_f = direction_f)
,directions(grid(v,h,-v_fov_rad/2+v_cell_size ,v_fov_rad/2 ,-h_fov_rad/2 , h_fov_rad/2-h_cell_size , dtype), direction_f = direction_f)
,directions(grid(v,h,-v_fov_rad/2+v_cell_size ,v_fov_rad/2 ,-h_fov_rad/2+h_cell_size , h_fov_rad/2 , dtype), direction_f = direction_f)
,directions(grid(v,h,-v_fov_rad/2 ,v_fov_rad/2-v_cell_size ,-h_fov_rad/2+h_cell_size , h_fov_rad/2 , dtype), direction_f = direction_f))
)
def frustrum_old(v_fov, h_fov, scale, dtype = np.float32, direction_f = direction):
v_fov_rad_2 = math.radians(v_fov)/2
h_fov_rad_2 = math.radians(h_fov)/2
d = [direction_f(-v_fov_rad_2, -h_fov_rad_2)
, direction_f(-v_fov_rad_2, h_fov_rad_2)
, direction_f(v_fov_rad_2, -h_fov_rad_2)
, direction_f(v_fov_rad_2, h_fov_rad_2)]
vertices = np.empty((5, 3), dtype)
vertices[0] = [0,0,0]
vertices[1:] = np.array(d, dtype) * scale
indices = np.array([ 0,1 , 0,2 , 0,3 , 0,4 , 1,2 , 2,4 , 4,3, 3,1], dtype = "uint32")
return indices, vertices
def frustrum_directions(v_fov, h_fov, dtype = np.float32, direction_f = direction):
v_fov_rad_2 = math.radians(v_fov)/2
h_fov_rad_2 = math.radians(h_fov)/2
return np.array([direction_f(-v_fov_rad_2, -h_fov_rad_2)
, direction_f(-v_fov_rad_2, h_fov_rad_2)
, direction_f(v_fov_rad_2, h_fov_rad_2)
, direction_f(v_fov_rad_2, -h_fov_rad_2)], dtype)
def custom_frustrum_directions(custom_v_angles, v_cell_size, h_cell_size, dtype = np.float32):
min_v, max_v = custom_v_angles[:,0].min(), custom_v_angles[:,0].max()
min_h, max_h = custom_v_angles[:,1].min(), custom_v_angles[:,1].max()
return np.array([
direction(min_v - v_cell_size/2, min_h - h_cell_size/2)
, direction(min_v - v_cell_size/2, max_h + h_cell_size/2)
, direction(max_v + v_cell_size/2, max_h + h_cell_size/2)
, direction(max_v + v_cell_size/2, min_h - h_cell_size/2)], dtype)
def frustrum(frustrum_directions, scale = 10):
vertices = np.empty((5, 3), frustrum_directions.dtype)
vertices[0] = [0,0,0]
vertices[1:] = frustrum_directions * scale
indices = np.array([ 0,1 , 0,2 , 0,3 , 0,4 , 1,2 , 2,3 , 3,4, 4,1], dtype = 'u4')
return indices, vertices
def frustrum_planes(frustrum_directions):
d = frustrum_directions
return np.vstack((plane.make_plane([0,0,0], linalg.normalized(np.cross(d[0], d[1])), d.dtype),
plane.make_plane([0,0,0], linalg.normalized(np.cross(d[1], d[2])), d.dtype),
plane.make_plane([0,0,0], linalg.normalized(np.cross(d[2], d[3])), d.dtype),
plane.make_plane([0,0,0], linalg.normalized(np.cross(d[3], d[0])), d.dtype)))
def to_point_cloud(selection, distances, directions, dtype = np.float32):
sn = selection.shape[0]
points = np.empty((sn, 3), dtype)
points.resize((sn, 3))
points[:] = directions[selection] * distances.reshape(sn, 1)
return points
def generate_quads_indices(n, dtype = np.uint32):
iota = np.arange(n, dtype = dtype)
iota_2n = iota+2*n
return np.stack((iota, iota_2n, iota+n, iota, iota + 3*n, iota_2n), axis=1)
def triangle_to_echo_index(triangle):
return triangle[0] # must be in accordance with generate_quads_indices()
def quad_stack(scalars):
return np.concatenate((scalars,scalars,scalars,scalars))
def to_quad_cloud(selection, distances, amplitudes, quad_directions, v, h, dtype = np.float32):
sn = selection.shape[0]
n = v * h
points = np.empty((sn*4, 3), dtype)
quad_amplitudes = np.empty((sn*4, 1), dtype)
# four points per quad, 1 different direction per point, same distance for each
points[0:sn] = quad_directions[selection ] * distances[:, np.newaxis]
points[sn:2*sn] = quad_directions[selection+n ] * distances[:, np.newaxis]
points[2*sn:3*sn] = quad_directions[selection+2*n] * distances[:, np.newaxis]
points[3*sn:] = quad_directions[selection+3*n] * distances[:, np.newaxis]
# same amplitude for each four points
quad_amplitudes[:] = quad_stack(amplitudes)[:, np.newaxis]
# a quad is formed with 2 triangles
quad_indices = generate_quads_indices(sn, np.uint32)
return points, quad_amplitudes, quad_indices.flatten()
def convert_echo_package(old, specs = {"v" : None, "h" : None, "v_fov" : None, "h_fov" : None}):
return to_echo_package(old['indices']
, distances = old['data'][:,1]
, amplitudes = old['data'][:,2]
, timestamps = old['data'][:,0].astype('u2')
, flags = old['flags'].astype('u2')
, timestamp = old['timestamp'] if 'timestamp' in old else old['data'][0,0]
, specs = specs
, distance_scale = 1.0, amplitude_scale = 1.0, led_power = 1.0)
def to_echo_package(indices = np.array([], 'u4'), distances = np.array([], 'f4'), amplitudes = np.array([], 'f4')
, timestamps = None, flags = None, timestamp = 0
, specs = {"v" : None, "h" : None, "v_fov" : None, "h_fov" : None}
, distance_scale = 1.0, amplitude_scale = 1.0, led_power = 1.0, eof_timestamp = None
, additionnal_fields = {}):
'''
This format MUST remain in synch with the one in LeddarPyDevice::PackageEchoes()
additionnal_fields format example : {'widths':[np.array([values]), 'f4']}
'''
package = specs.copy()
if eof_timestamp is None:
eof_timestamp = timestamp
package.update({"timestamp": timestamp
, "eof_timestamp": eof_timestamp
, "distance_scale": distance_scale
, "amplitude_scale": amplitude_scale
, "led_power": led_power})
assert(indices.size == distances.size == amplitudes.size)
if timestamps is not None:
assert(indices.size == timestamps.size)
if flags is not None:
assert(indices.size == flags.size)
default_fields = [('indices', 'u4')
, ('distances', 'f4')
, ('amplitudes', 'f4')
, ('timestamps', 'u8')
, ('flags', 'u2')]
dtype = np.dtype(default_fields + [(key,additionnal_fields[key][1]) for key in additionnal_fields.keys()])
package['data'] = np.empty(indices.size, dtype = dtype)
package['data']["indices"] = indices
package['data']["distances"] = distances
package['data']["amplitudes"] = amplitudes
package['data']["timestamps"] = 0 if timestamps is None else timestamps
package['data']["flags"] = 1 if flags is None else flags
for key in additionnal_fields.keys():
package['data'][key] = additionnal_fields[key][0]
return package
def to_traces_package(traces, start_index = 0, timestamp = 0):
return dict(data=traces, start_index=start_index, timestamp=timestamp)
def echo_package_to_point_cloud(package):
""" Return all the points and corresponding amplitudes from an echo package in one step.
"""
theta = angles(package['v'], package['h'], package['v_fov'], package['h_fov'], dtype = np.float32)
vec = directions(theta)
X = to_point_cloud(package['data']["indices"], package['data']["distances"], vec, dtype = np.float32)
return X, package['data']["amplitudes"] | StarcoderdataPython |
1780276 | <filename>backend/api/urls.py
from .apiviews import DigitsViewSet
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register(r'digits', DigitsViewSet)
urlpatterns = router.urls
| StarcoderdataPython |
180918 | <reponame>NumberAI/python-bandwidth-iris
#!/usr/bin/env python
from iris_sdk.models.base_resource import BaseData
from iris_sdk.models.data.telephone_number_list import TelephoneNumberList
from iris_sdk.models.maps.ord.existing_search_order import \
ExistingSearchOrderMap
from iris_sdk.models.data.reservation_list import ReservationList
class ExistingSearchOrder(ExistingSearchOrderMap, BaseData):
def __init__(self):
self.reservation_id_list = ReservationList()
self.telephone_number_list = TelephoneNumberList() | StarcoderdataPython |
76706 | <reponame>LucasBoTang/Piecewise_Affine_Fitting<gh_stars>0
#!/usr/bin/env python
# coding: utf-8
from matplotlib import pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from sklearn.linear_model import LinearRegression
import cv2
import numpy as np
import heuristics
import ilp
import utils
import generator
# random seed
np.random.seed(23)
if __name__ == "__main__":
# choose image and size
_, image = generator.generate_images(5)[0]
# add Guassian noise
noise = 0.001
image = image + noise * np.random.normal(loc=0.0, scale=1.0, size=image.shape)
image = np.clip(image, 0, 1)
# visualize input signal
plt.imshow(image)
plt.show()
# visualize 3d input signal
X = np.arange(image.shape[1])
Y = np.arange(image.shape[0])
X, Y = np.meshgrid(X, Y)
fig = plt.figure()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(X, Y, image, cmap=cm.jet, linewidth=0, antialiased=False)
plt.show()
# initialize with hueristic method
heuristic_graph = heuristics.solve(image, 0.02)
heuristic_seg, heuristics_output = utils.graph_to_image(heuristic_graph)
plt.imshow(heuristic_seg)
plt.show()
# build ilp
model = ilp.build_model(image, 1, 0.5, cycle4=True, cycle8=False, facet=True)
timelimit = 600
model.parameters.timelimit.set(timelimit)
# warm start
ilp.warm_start(model, heuristic_seg)
# solve ilp
print("Solving ilp with b&c...")
model.solve()
# get result
gap = model.solution.MIP.get_mip_relative_gap()
print("MIP relative gap:", gap)
obj = model.solution.get_objective_value()
print("Objective value:", obj)
# visualize boundaries
boundaries = utils.vis_cut(image, model)
plt.imshow(boundaries)
plt.show()
# visualize segmentation
segmentations = utils.vis_seg(image, model)
plt.imshow(segmentations)
plt.show()
# visualize depth
depth = utils.reconstruct(image, model)
plt.imshow(depth)
plt.show()
cv2.imwrite('/home/bo/Desktop/sample/depth.png', (depth*255).astype(np.uint8))
# visualize 3d input signal
X = np.arange(depth.shape[1])
Y = np.arange(depth.shape[0])
X, Y = np.meshgrid(X, Y)
fig = plt.figure()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(X, Y, depth, cmap=cm.jet, linewidth=0, antialiased=False)
plt.show()
if not utils.check_plane(segmentations, depth):
print("The solution includes non-planar surfaces")
| StarcoderdataPython |
1613142 | <reponame>joncotton/armstrong.hatband<filename>armstrong/hatband/widgets/ckeditor.py<gh_stars>0
from django.forms import widgets
from django.conf import settings
class CKEditorWidget(widgets.Textarea):
class Media:
js = (''.join((settings.STATIC_URL, "ckeditor/ckeditor.js")),)
def __init__(self, attrs=None):
final_attrs = {'class': 'ckeditor'}
if attrs is not None:
final_attrs.update(attrs)
if 'class' in attrs:
final_attrs['class'] = ' '.join((attrs['class'], 'ckeditor'))
super(CKEditorWidget, self).__init__(attrs=final_attrs)
| StarcoderdataPython |
3398687 | # -*- coding: utf-8 -*-
import torch
import argparse
import os
import sys
import random
import numpy as np
from os.path import join, dirname, abspath
#parser = argparse.ArgumentParser(description="Run scan-net and save to given location")
#parser.add_argument('--path', dest='path', default='./scan_model_ICO.pth')
mp0 = os.path.abspath(os.path.join('../.././evidence_inference/'))
sys.path.insert(0, abspath(join(dirname(abspath(__file__)), '..', '..')))
from evidence_inference.preprocess import preprocessor
USE_CUDA = True
USE_TEST = True
use_attn = False
from evidence_inference.models.model_ico_scan import ScanNet, train_scan, scan_reform
from evidence_inference.preprocess.preprocessor import SimpleInferenceVectorizer as SimpleInferenceVectorizer
from evidence_inference.models.model_0 import PaddedSequence
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, roc_auc_score
def test_model(scan_net, test_Xy, inference_vectorizer):
test_Xy = scan_reform(test_Xy)
with torch.no_grad():
instances = test_Xy
y_test = torch.FloatTensor([inst['y'] for inst in instances])
if (USE_CUDA):
y_test = y_test.cuda()
unk_idx = int(inference_vectorizer.str_to_idx[SimpleInferenceVectorizer.PAD])
y_preds = [] # predictions
# we batch this so the GPU doesn't run out of memory
token_predictions = []
token_label = []
for i in range(0, len(instances)):
batch_instances = instances[i:i+1] # batch size of 1
sentences = [torch.LongTensor(inst["sentence_span"]) for inst in batch_instances]
I = [torch.LongTensor(inst["I"]) for inst in batch_instances]
C = [torch.LongTensor(inst["C"]) for inst in batch_instances]
O = [torch.LongTensor(inst["O"]) for inst in batch_instances]
t_labels = batch_instances[0]['token_ev_labels']
sens, I, C, O = [PaddedSequence.autopad(to_enc, batch_first=True, padding_value=unk_idx) for to_enc in [sentences, I, C, O]]
if USE_CUDA:
sens = sens.cuda()
I = I.cuda()
C = C.cuda()
O = O.cuda()
preds = scan_net(sens, I, C, O)
for p in preds:
y_preds.append(p)
for j in range(len(sentences)):
for k in range(len(sentences[j])):
token_predictions.append(preds[j])
token_label.append(t_labels[k])
y_preds = torch.FloatTensor(y_preds).cuda()
y_bin = [1 if y > .5 else 0 for y in y_preds]
auc = roc_auc_score(token_label, token_predictions) # token auc
acc = accuracy_score(y_test, y_bin)
f1 = f1_score(y_test, y_bin)
prc = precision_score(y_test, y_bin)
rc = recall_score(y_test, y_bin)
return acc, f1, prc, rc, auc
def run_scan_net_ico(loc = "scan_net_ICO_no_attn_test.pth"):
print("Modules loaded.")
parent_path = abspath(os.path.join(dirname(abspath(__file__)), '..', '..'))
vocab_f = os.path.join(parent_path, "annotations", "vocab.txt")
train_Xy, inference_vectorizer = preprocessor.get_train_Xy(list(preprocessor.train_document_ids()), sections_of_interest=None,
vocabulary_file=vocab_f,
include_sentence_span_splits=True)
print("Train Data Achieved")
if not(USE_TEST):
# create an internal validation set from the training data; use 90% for training and 10% for validation.
split_index = int(len(train_Xy) * .9)
val_Xy = train_Xy[split_index:]
train_Xy = train_Xy[:split_index]
test_Xy = preprocessor.get_Xy(list(preprocessor.validation_document_ids()), inference_vectorizer, sections_of_interest=None, include_sentence_span_splits = True)
else:
val_Xy = preprocessor.get_Xy(preprocessor.validation_document_ids(), inference_vectorizer, sections_of_interest=None, include_sentence_span_splits = True)
test_Xy = preprocessor.get_Xy(preprocessor.test_document_ids(), inference_vectorizer, sections_of_interest=None, include_sentence_span_splits = True)
print("Test Data Achieved")
if USE_CUDA:
se_scn = ScanNet(inference_vectorizer, use_attention=use_attn).cuda()
else:
se_scn = ScanNet(inference_vectorizer, use_attention=use_attn)
print("Model loaded")
# train with 50 epochs, batch_size of 1, and patience of 3 (early stopping)
train_scan(se_scn, inference_vectorizer, train_Xy, val_Xy, 50, 32, 10)
acc, f1, prc, rc, auc = test_model(se_scn, test_Xy, inference_vectorizer)
# save to specified path
#args = parser.parse_args()
torch.save(se_scn.state_dict(), loc)
| StarcoderdataPython |
195010 | n = int(input())
a = n // 365
n = n - a*365
m = n // 30
n = n - m*30
d = n
print('{} ano(s)'.format(a))
print('{} mes(es)'.format(m))
print('{} dia(s)'.format(d))
| StarcoderdataPython |
1645030 | import discord
from libs.utils import get_now_timestamp_jst
# 共通で利用するカスタム embed を返します
def get_custum_embed() -> discord.Embed:
embed = discord.Embed()
embed.timestamp = get_now_timestamp_jst()
return embed
| StarcoderdataPython |
3335484 | <filename>check/tests/__init__.py
#
# Tests for the CellML validation methods
#
| StarcoderdataPython |
10935 | <filename>mne/time_frequency/psd.py
# Authors : <NAME>, <EMAIL> (2011)
# <NAME> <<EMAIL>>
# License : BSD 3-clause
import numpy as np
from ..parallel import parallel_func
from ..io.pick import _pick_data_channels
from ..utils import logger, verbose, _time_mask
from ..fixes import get_spectrogram
from .multitaper import psd_array_multitaper
def _psd_func(epoch, noverlap, n_per_seg, nfft, fs, freq_mask, func):
"""Aux function."""
return func(epoch, fs=fs, nperseg=n_per_seg, noverlap=noverlap,
nfft=nfft, window='hamming')[2][..., freq_mask, :]
def _check_nfft(n, n_fft, n_per_seg, n_overlap):
"""Ensure n_fft, n_per_seg and n_overlap make sense."""
if n_per_seg is None and n_fft > n:
raise ValueError(('If n_per_seg is None n_fft is not allowed to be > '
'n_times. If you want zero-padding, you have to set '
'n_per_seg to relevant length. Got n_fft of %d while'
' signal length is %d.') % (n_fft, n))
n_per_seg = n_fft if n_per_seg is None or n_per_seg > n_fft else n_per_seg
n_per_seg = n if n_per_seg > n else n_per_seg
if n_overlap >= n_per_seg:
raise ValueError(('n_overlap cannot be greater than n_per_seg (or '
'n_fft). Got n_overlap of %d while n_per_seg is '
'%d.') % (n_overlap, n_per_seg))
return n_fft, n_per_seg, n_overlap
def _check_psd_data(inst, tmin, tmax, picks, proj, reject_by_annotation=False):
"""Check PSD data / pull arrays from inst."""
from ..io.base import BaseRaw
from ..epochs import BaseEpochs
from ..evoked import Evoked
if not isinstance(inst, (BaseEpochs, BaseRaw, Evoked)):
raise ValueError('epochs must be an instance of Epochs, Raw, or'
'Evoked. Got type {0}'.format(type(inst)))
time_mask = _time_mask(inst.times, tmin, tmax, sfreq=inst.info['sfreq'])
if picks is None:
picks = _pick_data_channels(inst.info, with_ref_meg=False)
if proj:
# Copy first so it's not modified
inst = inst.copy().apply_proj()
sfreq = inst.info['sfreq']
if isinstance(inst, BaseRaw):
start, stop = np.where(time_mask)[0][[0, -1]]
rba = 'NaN' if reject_by_annotation else None
data = inst.get_data(picks, start, stop + 1, reject_by_annotation=rba)
elif isinstance(inst, BaseEpochs):
data = inst.get_data()[:, picks][:, :, time_mask]
else: # Evoked
data = inst.data[picks][:, time_mask]
return data, sfreq
@verbose
def psd_array_welch(x, sfreq, fmin=0, fmax=np.inf, n_fft=256, n_overlap=0,
n_per_seg=None, n_jobs=1, verbose=None):
"""Compute power spectral density (PSD) using Welch's method.
Parameters
----------
x : array, shape=(..., n_times)
The data to compute PSD from.
sfreq : float
The sampling frequency.
fmin : float
The lower frequency of interest.
fmax : float
The upper frequency of interest.
n_fft : int
The length of FFT used, must be ``>= n_per_seg`` (default: 256).
The segments will be zero-padded if ``n_fft > n_per_seg``.
n_overlap : int
The number of points of overlap between segments. Will be adjusted
to be <= n_per_seg. The default value is 0.
n_per_seg : int | None
Length of each Welch segment (windowed with a Hamming window). Defaults
to None, which sets n_per_seg equal to n_fft.
n_jobs : int
Number of CPUs to use in the computation.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
psds : ndarray, shape (..., n_freqs) or
The power spectral densities. All dimensions up to the last will
be the same as input.
freqs : ndarray, shape (n_freqs,)
The frequencies.
Notes
-----
.. versionadded:: 0.14.0
"""
spectrogram = get_spectrogram()
dshape = x.shape[:-1]
n_times = x.shape[-1]
x = x.reshape(-1, n_times)
# Prep the PSD
n_fft, n_per_seg, n_overlap = _check_nfft(n_times, n_fft, n_per_seg,
n_overlap)
win_size = n_fft / float(sfreq)
logger.info("Effective window size : %0.3f (s)" % win_size)
freqs = np.arange(n_fft // 2 + 1, dtype=float) * (sfreq / n_fft)
freq_mask = (freqs >= fmin) & (freqs <= fmax)
freqs = freqs[freq_mask]
# Parallelize across first N-1 dimensions
parallel, my_psd_func, n_jobs = parallel_func(_psd_func, n_jobs=n_jobs)
x_splits = np.array_split(x, n_jobs)
f_spectrogram = parallel(my_psd_func(d, noverlap=n_overlap, nfft=n_fft,
fs=sfreq, freq_mask=freq_mask,
func=spectrogram, n_per_seg=n_per_seg)
for d in x_splits)
# Combining, reducing windows and reshaping to original data shape
psds = np.concatenate([np.nanmean(f_s, axis=-1)
for f_s in f_spectrogram], axis=0)
psds.shape = dshape + (-1,)
return psds, freqs
@verbose
def psd_welch(inst, fmin=0, fmax=np.inf, tmin=None, tmax=None, n_fft=256,
n_overlap=0, n_per_seg=None, picks=None, proj=False, n_jobs=1,
reject_by_annotation=True, verbose=None):
"""Compute the power spectral density (PSD) using Welch's method.
Calculates periodograms for a sliding window over the time dimension, then
averages them together for each channel/epoch.
Parameters
----------
inst : instance of Epochs or Raw or Evoked
The data for PSD calculation
fmin : float
Min frequency of interest
fmax : float
Max frequency of interest
tmin : float | None
Min time of interest
tmax : float | None
Max time of interest
n_fft : int
The length of FFT used, must be ``>= n_per_seg`` (default: 256).
The segments will be zero-padded if ``n_fft > n_per_seg``.
If n_per_seg is None, n_fft must be >= number of time points
in the data.
n_overlap : int
The number of points of overlap between segments. Will be adjusted
to be <= n_per_seg. The default value is 0.
n_per_seg : int | None
Length of each Welch segment (windowed with a Hamming window). Defaults
to None, which sets n_per_seg equal to n_fft.
picks : array-like of int | None
The selection of channels to include in the computation.
If None, take all channels.
proj : bool
Apply SSP projection vectors. If inst is ndarray this is not used.
n_jobs : int
Number of CPUs to use in the computation.
reject_by_annotation : bool
Whether to omit bad segments from the data while computing the
PSD. If True, annotated segments with a description that starts
with 'bad' are omitted. Has no effect if ``inst`` is an Epochs or
Evoked object. Defaults to True.
.. versionadded:: 0.15.0
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
psds : ndarray, shape (..., n_freqs)
The power spectral densities. If input is of type Raw,
then psds will be shape (n_channels, n_freqs), if input is type Epochs
then psds will be shape (n_epochs, n_channels, n_freqs).
freqs : ndarray, shape (n_freqs,)
The frequencies.
See Also
--------
mne.io.Raw.plot_psd
mne.Epochs.plot_psd
psd_multitaper
psd_array_welch
Notes
-----
.. versionadded:: 0.12.0
"""
# Prep data
data, sfreq = _check_psd_data(inst, tmin, tmax, picks, proj,
reject_by_annotation=reject_by_annotation)
return psd_array_welch(data, sfreq, fmin=fmin, fmax=fmax, n_fft=n_fft,
n_overlap=n_overlap, n_per_seg=n_per_seg,
n_jobs=n_jobs, verbose=verbose)
@verbose
def psd_multitaper(inst, fmin=0, fmax=np.inf, tmin=None, tmax=None,
bandwidth=None, adaptive=False, low_bias=True,
normalization='length', picks=None, proj=False,
n_jobs=1, verbose=None):
"""Compute the power spectral density (PSD) using multitapers.
Calculates spectral density for orthogonal tapers, then averages them
together for each channel/epoch. See [1] for a description of the tapers
and [2] for the general method.
Parameters
----------
inst : instance of Epochs or Raw or Evoked
The data for PSD calculation.
fmin : float
Min frequency of interest
fmax : float
Max frequency of interest
tmin : float | None
Min time of interest
tmax : float | None
Max time of interest
bandwidth : float
The bandwidth of the multi taper windowing function in Hz. The default
value is a window half-bandwidth of 4.
adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD
(slow, use n_jobs >> 1 to speed up computation).
low_bias : bool
Only use tapers with more than 90% spectral concentration within
bandwidth.
normalization : str
Either "full" or "length" (default). If "full", the PSD will
be normalized by the sampling rate as well as the length of
the signal (as in nitime).
picks : array-like of int | None
The selection of channels to include in the computation.
If None, take all channels.
proj : bool
Apply SSP projection vectors. If inst is ndarray this is not used.
n_jobs : int
Number of CPUs to use in the computation.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
psds : ndarray, shape (..., n_freqs)
The power spectral densities. If input is of type Raw,
then psds will be shape (n_channels, n_freqs), if input is type Epochs
then psds will be shape (n_epochs, n_channels, n_freqs).
freqs : ndarray, shape (n_freqs,)
The frequencies.
References
----------
.. [1] <NAME>. "Prolate spheroidal wave functions, Fourier analysis,
and uncertainty V: The discrete case." Bell System Technical
Journal, vol. 57, 1978.
.. [2] <NAME>. and <NAME>. "Spectral Analysis for Physical
Applications: Multitaper and Conventional Univariate Techniques."
Cambridge University Press, 1993.
See Also
--------
mne.io.Raw.plot_psd
mne.Epochs.plot_psd
psd_array_multitaper
psd_welch
csd_multitaper
Notes
-----
.. versionadded:: 0.12.0
"""
# Prep data
data, sfreq = _check_psd_data(inst, tmin, tmax, picks, proj)
return psd_array_multitaper(data, sfreq, fmin=fmin, fmax=fmax,
bandwidth=bandwidth, adaptive=adaptive,
low_bias=low_bias, normalization=normalization,
n_jobs=n_jobs, verbose=verbose)
| StarcoderdataPython |
1730138 | <filename>littlebrother/test/helpers.py
"""Test helpers."""
import os.path
from stenographer import CassetteAgent
from twisted.internet import reactor
from twisted.web.client import (ContentDecoderAgent, RedirectAgent,
Agent, GzipDecoder)
def cassette_path(name):
"""Return the full path of a cassette file in our fixtures."""
return os.path.join(os.path.dirname(__file__),
'fixtures', 'cassettes', name + '.json')
class CassetteTestMixin(object):
extractor = None
def assert_title(self, cassette_name, expected):
cassette_agent = CassetteAgent(Agent(reactor),
cassette_path(cassette_name),
preserve_exact_body_bytes=True)
agent = ContentDecoderAgent(RedirectAgent(cassette_agent),
[('gzip', GzipDecoder)])
finished = agent.request(
'GET', 'http://127.0.0.1:5000/{}'.format(cassette_name))
finished.addCallback(self.extractor.extract)
finished.addCallback(self.assertEqual, expected)
finished.addBoth(cassette_agent.save)
return finished
| StarcoderdataPython |
1738568 | """
You're given two integers, n and m.
Find position of the rightmost pair of equal bits in their binary representations (it is guaranteed that such a pair exists),
counting from right to left.
Return the value of 2position_of_the_found_pair (0-based).
Example
For n = 10 and m = 11, the output should be
equalPairOfBits(n, m) = 2.
1010 = 10102, 1110 = 10112, the position of the rightmost pair of equal bits is the bit at position 1 (0-based) from the right in the binary representations.
So the answer is 21 = 2.
"""
def equalPairOfBits(n, m):
return n + m + 1 & ~m - n
| StarcoderdataPython |
112633 | import tensorflow as tf
import os
from tensorflow.python.framework import graph_util
from tensorflow.python.platform import gfile
def show_help():
help(tf.contrib.lite.TocoConverter)
# 本地的pb文件转换成TensorFlow Lite (float)
def pb_to_tflite(pb_file, save_name, input_arrays, output_arrays):
# graph_def_file = "./models/faceboxes.pb"
# input_arrays = ["inputs"]
# output_arrays = ['out_locs', 'out_confs']
# converter = tf.contrib.lite.TFLiteConverter.from_frozen_graph(pb_file, input_arrays, output_arrays)
converter = tf.contrib.lite.TocoConverter.from_frozen_graph(pb_file, input_arrays, output_arrays)
# converter = tf.contrib.lite.toco_convert.from_frozen_graph(pb_file, input_arrays, output_arrays)
tflite_model = converter.convert()
open(save_name, "wb").write(tflite_model)
# 用tf.Session,将GraphDef转换成TensorFlow Lite (float)
def sess_to_tflite(sess, save_name, input_arrays=['inputs'], output_arrays=['out_locs', 'out_confs']):
# converter = tf.contrib.lite.TFLiteConverter.from_session(sess, input_arrays, output_arrays)
converter = tf.contrib.lite.TocoConverter.from_session(sess, input_arrays, output_arrays)
# converter = tf.contrib.lite.toco_convert.from_session(sess, input_arrays, output_arrays)
tflite_model = converter.convert()
open(save_name, "wb").write(tflite_model)
# 本地的saveModel文件转换成TensorFlow Lite (float)
def save_model_to_tflite_float(saved_model_dir, save_name, input_arrays=None, output_arrays=None):
# converter = tf.contrib.lite.TFLiteConverter.from_saved_model(saved_model_dir=saved_model_dir,
# input_arrays=input_arrays,
# output_arrays=output_arrays)
converter = tf.contrib.lite.TocoConverter.from_saved_model(saved_model_dir=saved_model_dir,
input_arrays=input_arrays,
output_arrays=output_arrays)
# converter = tf.contrib.lite.toco_convert.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
open(save_name, "wb").write(tflite_model)
# 本地的keras文件转换成TensorFlow Lite (float)(该tf.keras文件必须包含模型和权重。)
def keras_to_tflite():
converter = tf.contrib.lite.TFLiteConverter.from_keras_model_file("keras_model.h5")
tflite_model = converter.convert()
open("converted_model.tflite", "wb").write(tflite_model)
# =========================================================================================================
# 本地的saveModel文件转换成TensorFlow Lite (quant)
def save_model_to_tflite_quant(saved_model_dir, save_name, input_arrays=None, output_arrays=None):
converter = tf.contrib.lite.TFLiteConverter.from_saved_model(saved_model_dir=saved_model_dir,
input_arrays=input_arrays,
output_arrays=output_arrays)
# converter = tf.contrib.lite.TocoConverter.from_saved_model(saved_model_dir=saved_model_dir,
# input_arrays=input_arrays,
# output_arrays=output_arrays)
# converter = tf.contrib.lite.toco_convert.from_saved_model(saved_model_dir)
converter.inference_type = tf.contrib.lite.constants.QUANTIZED_UINT8
input_arrays = converter.get_input_arrays()
converter.quantized_input_stats = {input_arrays[0]: (128., 127.)}
tflite_model = converter.convert()
open(save_name, "wb").write(tflite_model)
# =========================================================================================================
def save_pbtxt(save_path, save_name='graph.pbtxt', output_node_names=['inputs', 'out_locs', 'out_confs']):
with tf.Session() as sess:
print('save model graph to .pbtxt: %s' % os.path.join(save_path, save_name))
save_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def, output_node_names)
tf.train.write_graph(save_graph, '', os.path.join(save_path, save_name))
# 保存为pb格式
def save_pb(save_path, save_name='faceboxes.pb', output_node_names=['inputs', 'out_locs', 'out_confs']):
with tf.Session() as sess:
print('save model to .pb: %s' % os.path.join(save_path, save_name))
# convert_variables_to_constants 需要指定output_node_names,list(),可以多个
# 此处务必和前面的输入输出对应上,其他的不用管
constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def, output_node_names)
with tf.gfile.FastGFile(os.path.join(save_path, save_name), mode='wb') as f:
f.write(constant_graph.SerializeToString())
# 加载pb格式
def load_pb(load_path, save_name='faceboxes.pb'):
# sess = tf.Session()
with tf.Session() as sess:
with gfile.FastGFile(os.path.join(load_path, save_name), mode='rb') as f: # 加载模型
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
sess.graph.as_default()
tf.import_graph_def(graph_def, name='') # 导入计算图
# # 需要有一个初始化的过程
# sess.run(tf.global_variables_initializer())
# # 需要先复原变量
# print(sess.run('b:0'))
# # 下面三句,是能否复现模型的关键
# # 输入
# input_x = sess.graph.get_tensor_by_name('x:0') # 此处的x一定要和之前保存时输入的名称一致!
# input_y = sess.graph.get_tensor_by_name('y:0') # 此处的y一定要和之前保存时输入的名称一致!
# op = sess.graph.get_tensor_by_name('op_to_store:0') # 此处的op_to_store一定要和之前保存时输出的名称一致!
# ret = sess.run(op, feed_dict={input_x: 5, input_y: 5})
# print(ret)
if __name__ == '__main__':
# show_help()
# pb_to_tflite(pb_file="./models/faceboxes.pb",
# save_name="./models/faceboxes.tflite",
# input_arrays=["inputs"],
# output_arrays=['out_locs', 'out_confs'])
save_model_to_tflite_float(saved_model_dir='./export/run00/1557046559',
save_name='./models/faceboxes_float.tflite',
input_arrays=["image_tensor"],
output_arrays=['reshaping/loc_predict', 'reshaping/conf_predict'])
# save_model_to_tflite_float(saved_model_dir='./export/run00/1557046559',
# save_name='./models/faceboxes_float.tflite',
# input_arrays=["image_tensor"],
# output_arrays=['nms/map/TensorArrayStack/TensorArrayGatherV3',
# 'nms/map/TensorArrayStack_1/TensorArrayGatherV3',
# 'nms/map/TensorArrayStack_2/TensorArrayGatherV3'])
# save_model_to_tflite_quant(saved_model_dir='./export/run00/1555989957',
# save_name='./models/faceboxes_quant.tflite',
# input_arrays=["image_tensor"],
# output_arrays=['reshaping/loc_predict', 'reshaping/conf_predict'])
| StarcoderdataPython |
4826455 | <filename>gistmagic/__init__.py<gh_stars>0
__version__ = '0.0.1'
from .gistmagic import GistMagic
def load_ipython_extension(ipython):
token = input("\nGitHub token: ")
gistmagic = GistMagic(ipython, token)
ipython.register_magics(gistmagic)
| StarcoderdataPython |
3347256 | <filename>app/models/mongo_base.py<gh_stars>0
"""
@File : mongo_base.py
@Author: GaoZizhong
@Date : 2020/6/11 14:22
@Desc : mongo模型类
"""
import datetime
from flask_mongoengine import MongoEngine
mongo_db = MongoEngine()
class BaseModel(object):
"""
所有模型基类
"""
createDate = mongo_db.DateTimeField(defualt=datetime.datetime.utcnow)
modifyDate = mongo_db.DateTimeField(defualt=datetime.datetime.utcnow)
# 津南违建表
class jhwj(BaseModel, mongo_db.Document):
_id = mongo_db.ObjectIdField()
content = mongo_db.StringField()
image_name = mongo_db.StringField()
file_number = mongo_db.StringField()
image = mongo_db.FileField()
| StarcoderdataPython |
1631059 | <gh_stars>1-10
from flask import Blueprint,request,redirect,flash
from . import db # Importing Database Variable
from .models import User # Importing User from models.py to access Name of User
from .models import Question
import uuid
# from flask.typing import StatusCode
askQuestion = Blueprint('askQuestion',__name__,template_folder='templates') # Creating Blueprint to adress /askQuestion endpoint
@askQuestion.route('/<string:profileUserName>',methods=['POST'])
def askQuestionFunction(profileUserName):
userData = User.query.filter_by(userName=profileUserName).first() # Getting Data of User
askedQuestionId = uuid.uuid4().hex # Creating Question Id of Question
askedQuestionTitle = request.form['questionTitle'] # Getting Question Title
askedQuestionDescription = request.form['questionDescription'] # Getting Question Description
askedQuestionCategory = request.form['selectACategory'] # Getting Category Id of the question
# Checking if question is having a title and a description
if len(askedQuestionTitle) < 0 or len(askedQuestionDescription)<0:
print('asjs')
flash('The Question must have a title and a description',category='error') # Flashing Error message if question is not having a title and a description
return redirect(f'/profile/{profileUserName}') # Returning to same URL if question is not having a title or description
else:
newQuestion = Question(questionId=askedQuestionId,questionTitle=askedQuestionTitle,questionDescription=askedQuestionDescription,userNameOfAsker=profileUserName,realNameOfAsker=userData.realNameOfUser,categoryId=askedQuestionCategory) # Creating Instance of Question Object of Database
try:
db.session.add(newQuestion)
db.session.commit()
flash('Question Added Succesfully',category='success')
return redirect(f'/question/{askedQuestionId}') # Redirecting to questions page
except:
flash('Some Error Occured while adding the question',category='error')
return redirect(f'/profile/{profileUserName}') # Redirecting to questions page | StarcoderdataPython |
96219 | <reponame>adisakshya/pycrypto
"""
MODULE NAME: helper_cryptoid
Author: <NAME>
"""
from lists import list_symmetric_ciphers, list_asymmetric_ciphers
import codecs
from Crypto.Random import get_random_bytes
op_formats = ["", "base64", "hex"]
def save_result(text):
file_name = 'output.txt'
if input("\nWant to save result in a file? (Y/N) ") in ['y', 'Y']:
file_name = input('Enter file name:(with extension) ')
f = open(file_name, 'w+')
f.write(text)
def get_cipher():
print("Select Cipher: \n\t\t1. AES\n\t\t2. DES\n\t\t3. Triple DES\n\t\t4. RC2\n\t\t5. CAST\n\t\t6. Blowfish\n\t\t7. RC4\n\t\t8. Salsa20\n\t\t9. ChaCha20")
print("\n\t\t10. RSA\n\t\t11. Viginere Cipher\n\t\t12. Caesar Cipher\n\t\t13. --Playfair Cipher--\n\t\t14. Square Cipher\n\t\t15. --Serpant--")
return int(input('\nEnter cipher number -> '))
def valid_key_size(cipher_no):
return list_symmetric_ciphers[cipher_no].get_valid_key_size()
def transform(cipher_text, op_format_no):
return codecs.encode(cipher_text, op_formats[op_format_no]).decode('utf-8')
def get_mode():
print("Select operating mode\n\t\t1. Encryption\n\t\t2. Decryption")
return int(input('-> '))
def get_key(cipher_no, valid_key_size_list):
print("\nValid key size(s): ", valid_key_size_list)
choice = input("Want a auto-generated key? (Y/N): ")
if choice in ['y', 'Y']:
key = get_random_bytes(valid_key_size_list[0])
print("Generated Key: ", key)
return key
else:
return input("Enter key: ")
def get_sym_cipher_text(message, cipher_no, mode_no=1,
iv="This is an IV456", key="This is a key123",
op_format_no=1):
print("\nEncrypting message: ",message)
print("With ", list_symmetric_ciphers[cipher_no])
cipher_text = " "
# symmetric cipher
cipher = list_symmetric_ciphers[cipher_no]
# call to appropriate cipher
try:
try:
cipher_text = cipher.encrypt(message, key, mode_no, iv)
except:
cipher_text = cipher.encrypt(message, key, mode_no)
except TypeError:
cipher_text = cipher.encrypt(message, key)
try:
return transform(cipher_text, op_format_no)
except:
print('Nope')
return cipher_text
def get_asym_cipher_text(cipher_no, message, key=None,
shift_count=0, op_format_no = 1):
cipher_no -= 9
print("\nEncrypting message: ",message)
print("With ",list_asymmetric_ciphers[cipher_no])
if key:
print("Key: ", key)
else:
print("Shift Count: ", shift_count)
if cipher_no == 1:
public_key = key.publickey()
cipher_text = list_asymmetric_ciphers[cipher_no].encrypt(public_key, message)
if cipher_no == 2:
cipher_text = list_asymmetric_ciphers[cipher_no].encrypt(message, key)
if cipher_no == 3:
cipher_text = list_asymmetric_ciphers[cipher_no].encrypt(message, shift_count)
if cipher_no in [2, 5, 6]:
cipher_text = list_asymmetric_ciphers[cipher_no].encrypt(message)
try:
return transform(cipher_text, op_format_no)
except:
return cipher_text
def sym_encryption(message, cipher_no):
print("\nEnter Mode: \n\t\t1. ECB\n\t\t2. CBC\n\t\t3. CFB\n\t\t4. OFB\n\t\t5. CTR\n\t\t6. EAX")
mode_no = int(input('\nEnter mode number -> '))
iv = "This is an IV456"
if mode_no in range(2,6):
iv = input("\nEnter initialization vector: ")
key = get_key(cipher_no, valid_key_size(cipher_no))
print("\nEnter Encrypted output fromat: \n\t\t1.BASE64\n\t\t2. HEX")
op_format_no = int(input('\nEnter choice: '))
return get_sym_cipher_text(message, cipher_no, mode_no, iv, key, op_format_no)
def asym_encryption(message, cipher_no):
key = None
shift_count = 0
if cipher_no == 10:
print("Generating RSA key...")
key = list_asymmetric_ciphers[cipher_no-9].generate_rsa_keys(1024)
print("Generated RSA key: ", key, key.publickey())
elif cipher_no == 12:
print("Enter Shift Count: ")
shift_count = int(input())
if cipher_no not in [10, 12]:
print("Enter secret key: ")
key = input()
print("Enter Encrypted output fromat: \n\t\t1. BASE64\n\t\t2. HEX")
op_format_no = int(input())
cipher_text = get_asym_cipher_text(cipher_no, message, key, shift_count, op_format_no)
return cipher_text
| StarcoderdataPython |
4813576 | import uuid
from sklearn.metrics import roc_curve, roc_auc_score
from exception_layer.generic_exception.generic_exception import GenericException as PlotlyDashException
from project_library_layer.initializer.initializer import Initializer
from data_access_layer.mongo_db.mongo_db_atlas import MongoDBOperation
from project_library_layer.datetime_libray.date_time import get_time, get_date
import sys
import plotly.figure_factory as ff
import json
import pandas as pd
import plotly
import plotly.graph_objs as go
import random
class AccurayGraph:
def __init__(self, project_id=None, model_accuracy_dict: dict = None):
try:
self.project_id = project_id
self.model_accuray = model_accuracy_dict
self.initializer = Initializer()
self.mongo_db = MongoDBOperation()
self.accuracy_score_database_name = self.initializer.get_accuracy_metric_database_name()
self.accuracy_score_collection_name = self.initializer.get_accuracy_metric_collection_name()
self.colors = ['slategray', 'aquamarine', 'darkturquoise', 'deepskyblue', 'orange', 'green', 'purple']
except Exception as e:
plotly_dash = PlotlyDashException(
"Failed in module [{0}] class [{1}] method [{2}]"
.format(AccurayGraph.__module__.__str__(), AccurayGraph.__name__,
"__init__"))
raise Exception(plotly_dash.error_message_detail(str(e), sys)) from e
def get_random_color_name(self):
"""
:return: Name of a color
"""
try:
n_colors=len(self.colors)
index=random.randint(0,n_colors-1)
return self.colors[index]
except Exception as e:
plotly_dash = PlotlyDashException(
"Failed in module [{0}] class [{1}] method [{2}]"
.format(AccurayGraph.__module__.__str__(), AccurayGraph.__name__,
self.get_random_color_name.__name__))
raise Exception(plotly_dash.error_message_detail(str(e), sys)) from e
def save_accuracy(self):
try:
self.model_accuray.update(
{'project_id': self.project_id, 'stored_date': get_date(), 'stored_time': get_time()})
is_inserted = self.mongo_db.insert_record_in_collection(self.accuracy_score_database_name,
self.accuracy_score_collection_name,
self.model_accuray)
if is_inserted > 0:
return {'status': True, 'message': 'Model accuracy stored '}
else:
return {'status': False, 'message': 'Model accuracy failed to store'}
except Exception as e:
plotly_dash = PlotlyDashException(
"Failed in module [{0}] class [{1}] method [{2}]"
.format(AccurayGraph.__module__.__str__(), AccurayGraph.__name__,
self.save_accuracy.__name__))
raise Exception(plotly_dash.error_message_detail(str(e), sys)) from e
def get_accuray_score_of_trained_model(self, project_id):
try:
records = self.mongo_db.get_records(self.accuracy_score_database_name, self.accuracy_score_collection_name,
{'project_id': project_id})
if records is not None:
return {'status': True, 'message': 'accuracy record found', 'accuracy_data': records}
else:
return {'status': False, 'message': 'accuracy record not found'}
except Exception as e:
plotly_dash = PlotlyDashException(
"Failed in module [{0}] class [{1}] method [{2}]"
.format(AccurayGraph.__module__.__str__(), AccurayGraph.__name__,
self.get_accuray_score_of_trained_model.__name__))
raise Exception(plotly_dash.error_message_detail(str(e), sys)) from e
def get_training_execution_id_with_project_id(self):
try:
response = {'status': False, 'message': "We don't have project with execution id"}
df = self.mongo_db.get_dataframe_of_collection(self.accuracy_score_database_name,
self.accuracy_score_collection_name)
if df is None:
return response
training_execution_with_project_id = df[['project_id', 'training_execution_id']].copy()
training_execution_with_project_id.drop_duplicates(inplace=True)
training_execution_with_project_id_list = list(training_execution_with_project_id.T.to_dict().values())
if len(training_execution_with_project_id_list) > 0:
return {'status': True, 'message': 'We have project with execution id',
'training_execution_with_project_id_list': training_execution_with_project_id_list}
else:
return response
except Exception as e:
plotly_dash = PlotlyDashException(
"Failed in module [{0}] class [{1}] method [{2}]"
.format(AccurayGraph.__module__.__str__(), AccurayGraph.__name__,
self.get_training_execution_id_with_project_id.__name__))
raise Exception(plotly_dash.error_message_detail(str(e), sys)) from e
def save_accuracy_bar_graph(self, model_name_list, accuracy_score_list, project_id, execution_id, file_object,
title=None,x_label=None,y_label=None):
"""
:param model_name_list: model_name_list
:param accuracy_score_list: accuracy_score_list
:return: graph_json
"""
try:
x_label='Model Name' if x_label is None else x_label
y_label = 'Score' if x_label is None else y_label
if len(model_name_list) != len(accuracy_score_list):
return False
fig = go.Figure()
fig.add_trace(go.Bar(
x=model_name_list, # assign x as the dataframe column 'x'
y=accuracy_score_list,
marker_color=self.get_random_color_name()
)
)
fig.update_layout(
xaxis_title=x_label,
yaxis_title=y_label,
title={'text': title}
)
graph_json = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
file_object.write_file_content(
directory_full_path=self.initializer.get_project_report_graph_file_path(project_id, execution_id),
file_name=str(uuid.uuid4()) + '.graph',
content=graph_json)
except Exception as e:
plotly_dash = PlotlyDashException(
"Failed in module [{0}] class [{1}] method [{2}]"
.format(AccurayGraph.__module__.__str__(), AccurayGraph.__name__,
self.save_accuracy_bar_graph.__name__))
raise Exception(plotly_dash.error_message_detail(str(e), sys)) from e
def save_roc_curve_plot_binary_classification(self, fpr, tpr, project_id=None, execution_id=None, file_object=None,
title=None):
"""
:param fpr: False +ve rate
:param tpr: True +ve rate
:param project_id: project id
:param execution_id: execution id
:param file_object: file object
:param title: title
:return: nothing
"""
try:
fig = go.Figure()
fig.add_trace(go.Scatter(x=fpr, y=tpr, fill='tozeroy',fillcolor=self.get_random_color_name())) # fill down to xaxis
fig.update_layout(
xaxis_title='False Positive Rate',
yaxis_title='True Positive Rate',
title={'text': title}
)
json_graph = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
file_object.write_file_content(
directory_full_path=self.initializer.get_project_report_graph_file_path(project_id, execution_id),
file_name=str(uuid.uuid4()) + '.graph',
content=json_graph)
except Exception as e:
plotly_dash = PlotlyDashException(
"Failed in module [{0}] class [{1}] method [{2}]"
.format(AccurayGraph.__module__.__str__(), AccurayGraph.__name__,
self.save_roc_curve_plot_binary_classification.__name__))
raise Exception(plotly_dash.error_message_detail(str(e), sys)) from e
def save_plot_multiclass_roc_curve(self,y, y_scores, model, project_id=None, execution_id=None, file_object=None,
title=None):
"""
:param y: truth value of y
:param y_scores: predict proba score
:param model: trained model
:param project_id: project id
:param execution_id: execution id
:param file_object: file object
:param title: title of graph
:return: nothing
"""
try:
y_onehot = pd.get_dummies(y, columns=model.classes_)
# Create an empty figure, and iteratively add new lines
# every time we compute a new class
fig = go.Figure()
fig.add_shape(
type='line', line=dict(dash='dash'),
x0=0, x1=1, y0=0, y1=1
)
for i in range(y_scores.shape[1]):
y_true = y_onehot.iloc[:, i]
y_score = y_scores[:, i]
fpr, tpr, _ = roc_curve(y_true, y_score)
auc_score = roc_auc_score(y_true, y_score)
name = f"{y_onehot.columns[i]} (AUC={auc_score:.2f})"
fig.add_trace(go.Scatter(x=fpr, y=tpr, name=name, mode='lines'))
fig.update_layout(
xaxis_title='False Positive Rate',
yaxis_title='True Positive Rate',
yaxis=dict(scaleanchor="x", scaleratio=1),
xaxis=dict(constrain='domain'),
title={'text': title}
)
graph_json = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
file_object.write_file_content(
directory_full_path=self.initializer.get_project_report_graph_file_path(project_id, execution_id),
file_name=str(uuid.uuid4()) + '.graph',
content=graph_json)
except Exception as e:
plotly_dash = PlotlyDashException(
"Failed in module [{0}] class [{1}] method [{2}]"
.format(AccurayGraph.__module__.__str__(), AccurayGraph.__name__,
self.save_plot_multiclass_roc_curve.__name__))
raise Exception(plotly_dash.error_message_detail(str(e), sys)) from e
def save_scatter_plot(self, x_axis_data, y_axis_data, project_id, execution_id, file_object, x_label=None,
y_label=None, title=None):
"""
:param x_axis_data: X axis data
:param y_axis_data: Y axis data
:param project_id: project id
:param execution_id: execution_id
:param file_object: file object
:param x_label: x label name
:param y_label: ylabel name
:return: nothing
"""
try:
x_axis_label = x_label if x_label is not None else 'X axis'
y_axis_label = y_label if y_label is not None else 'Y axis'
if len(x_axis_data) != len(y_axis_data):
return False
fig = go.Figure()
fig.add_trace(go.Scatter(
x=x_axis_data, # assign x as the dataframe column 'x'
y=y_axis_data,
fillcolor=self.get_random_color_name(),
mode='markers'
)
)
fig.update_layout(
xaxis_title=x_axis_label,
yaxis_title=y_axis_label,
title={'text': title}
)
graph_json = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
file_object.write_file_content(
directory_full_path=self.initializer.get_project_report_graph_file_path(project_id, execution_id),
file_name=str(uuid.uuid4()) + '.graph',
content=graph_json)
except Exception as e:
plotly_dash = PlotlyDashException(
"Failed in module [{0}] class [{1}] method [{2}]"
.format(AccurayGraph.__module__.__str__(), AccurayGraph.__name__,
self.save_scatter_plot.__name__))
raise Exception(plotly_dash.error_message_detail(str(e), sys)) from e
def save_line_plot(self, x_axis_data, y_axis_data, project_id, execution_id, file_object, x_label=None,
y_label=None, title=None):
"""
:param x_axis_data: X axis data
:param y_axis_data: Y axis data
:param project_id: project id
:param execution_id: execution_id
:param file_object: file object
:param x_label: x label name
:param y_label: ylabel name
:return: nothing
"""
try:
x_axis_label = x_label if x_label is not None else 'X axis'
y_axis_label = y_label if y_label is not None else 'Y axis'
if len(x_axis_data) != len(y_axis_data):
return False
fig = go.Figure()
fig.add_trace(go.Scatter(
x=x_axis_data, # assign x as the dataframe column 'x'
y=y_axis_data,
mode='lines+markers',
fillcolor=self.get_random_color_name()
)
)
fig.update_layout(
xaxis_title=x_axis_label,
yaxis_title=y_axis_label,
title={'text': title}
)
graph_json = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
file_object.write_file_content(
directory_full_path=self.initializer.get_project_report_graph_file_path(project_id, execution_id),
file_name=str(uuid.uuid4()) + '.graph',
content=graph_json)
except Exception as e:
plotly_dash = PlotlyDashException(
"Failed to instantiate mongo_db_object in module [{0}] class [{1}] method [{2}]"
.format(AccurayGraph.__module__.__str__(), AccurayGraph.__name__,
self.save_line_plot.__name__))
raise Exception(plotly_dash.error_message_detail(str(e), sys)) from e
def save_distribution_plot(self, data,label, project_id, execution_id, file_object, x_label=None,
y_label=None, title=None):
"""
:param data: data kind of array
:param label: list of label
:param project_id: project id
:param execution_id: execution id
:param file_object: file object
:param x_label: x label
:param y_label: y label
:param title: title
:return: nothing
"""
try:
fig = ff.create_distplot([data], group_labels=[label], bin_size=.5,
curve_type='normal', # override default 'kde'
)
fig.update_layout(title_text=title)
graph_json = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
file_object.write_file_content(
directory_full_path=self.initializer.get_project_report_graph_file_path(project_id, execution_id),
file_name=str(uuid.uuid4()) + '.graph',
content=graph_json)
except Exception as e:
plotly_dash = PlotlyDashException(
"Failed in module [{0}] class [{1}] method [{2}]"
.format(AccurayGraph.__module__.__str__(), AccurayGraph.__name__,
self.save_distribution_plot.__name__))
raise Exception(plotly_dash.error_message_detail(str(e), sys)) from e
def save_pie_plot(self, data, label, project_id, execution_id, file_object, title=None):
"""
:param data: data
:param label: label
:param project_id: project id
:param execution_id: execution id
:param file_object: file object
:param title: title
:return: nothing
"""
try:
fig = go.Figure(data=[go.Pie(labels=label, values=data)])
fig.update_layout(title_text=title)
graph_json = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
file_object.write_file_content(
directory_full_path=self.initializer.get_project_report_graph_file_path(project_id, execution_id),
file_name=str(uuid.uuid4()) + '.graph',
content=graph_json)
except Exception as e:
plotly_dash = PlotlyDashException(
"Failed in module [{0}] class [{1}] method [{2}]"
.format(AccurayGraph.__module__.__str__(), AccurayGraph.__name__,
self.save_pie_plot.__name__))
raise Exception(plotly_dash.error_message_detail(str(e), sys)) from e
def get_training_execution_id_of_project(self, project_id):
"""
:param project_id: accpet project id
:return: return {'status':True/False,'training_execution_id_list':training_execution_id_list }
"""
try:
df = self.mongo_db.get_dataframe_of_collection(self.accuracy_score_database_name,
self.accuracy_score_collection_name)
if df is not None:
df = df[df['project_id'] == project_id]
if df.shape[0] == 0:
return {'status': False}
training_execution_id_list = list(df['training_execution_id'].unique())
if len(training_execution_id_list) > 0:
return {'status': True, 'training_execution_id_list': training_execution_id_list}
else:
return {'status': False}
else:
return {'status': False}
except Exception as e:
plotly_dash = PlotlyDashException(
"Failed in module [{0}] class [{1}] method [{2}]"
.format(AccurayGraph.__module__.__str__(), AccurayGraph.__name__,
self.get_training_execution_id_of_project.__name__))
raise Exception(plotly_dash.error_message_detail(str(e), sys)) from e
| StarcoderdataPython |
1793422 | <reponame>AndrewWood94/WalkingSpeedPaper<filename>src/gps_reader_pkg/break_finder.py
"""
Finds breaks in gps tracks
"""
from PyQt5.QtCore import QVariant
import math
from qgis.core import QgsVectorLayer, QgsExpression, QgsExpressionContext, QgsExpressionContextUtils, QgsField
def get_segment_info(datalayer):
"""
Read attributes from gps segment
:param datalayer: gps segment to be read
:return: start feature id,
end feature id,
median speed,
median distance,
total distance,
total duration
"""
exp = QgsExpression('array('
'minimum("fid"),'
'maximum("fid"),'
'median("speed"),'
'median("distance"),'
'sum("distance"),'
'sum("duration"))')
context = QgsExpressionContext()
context.appendScopes(QgsExpressionContextUtils.globalProjectLayerScopes(datalayer))
first_feature, last_feature, median_speed, median_distance, total_dist, total_time = exp.evaluate(context)
return int(first_feature), int(last_feature), median_speed, median_distance, total_dist, total_time
def break_likelihood(datalayer, feature, median_speed):
"""
Calculate break_likelihood for a point based on point speed & angle between previous & next points
:param datalayer: gps segment
:param feature: gps point id to check
:param median_speed: median speed for gps segment
:return: category_break: High/Medium/Low break likelihood for point
category_speed: High/Medium/Low point speed
category_angle: Wide/Narrow point angle
line_direction: Quadrant the direction of travel is heading
"""
prevfeature = datalayer.getFeature(feature - 1)
feature = datalayer.getFeature(feature)
a1 = prevfeature.geometry().angleAtVertex(0) * 180 / math.pi
a2 = feature.geometry().angleAtVertex(0) * 180 / math.pi
speed = feature.attribute('speed')
#Set angle = 180 for first point in segment
try:
if feature["Segment No"] == prevfeature["Segment No"]:
angle = abs(180 - abs(a1 - a2))
else:
angle = 180
except:
angle = 180
if speed > 10:
category_speed = 'High'
elif speed <= median_speed / 2:
category_speed = 'Zero'
else:
category_speed = 'Low'
if angle > 90:
category_angle = 'Wide'
if category_speed == 'Zero' or category_speed == 'High':
category_break = 'Medium'
else:
category_break = 'Low'
else:
category_angle = 'Narrow'
if category_speed == 'Low' or category_speed == 'Zero':
category_break = 'High'
else:
category_break = 'Medium'
if 0 <= a2 < 90:
line_direction = 1
elif 90 <= a2 < 180:
line_direction = 2
elif 180 <= a2 < 270:
line_direction = 3
else:
line_direction = 4
return category_break, category_speed, category_angle, line_direction
def rangequery(feature, datalayer, median_speed, median_distance):
"""
Finds all features within 10 minutes of given point which are under median distance away,
plus the previous point if speed > 2 * median speed
:param feature: point id for epicentre of search
:param datalayer: gps segment to check
:param median_speed: median segment point speed
:param median_distance: median distance between points in segment
:return: n.keys(): point ids of features in range
extreme_speed: if point list contains exceptionally fast or slow points
s_line: if no gaps exist in ids of points found
"""
featureID = feature['fid']
extreme_speed = False
sline = False
a = feature["a_time"]
#com = coordinates of centre of mass
com = (feature.geometry().asPolyline()[0])
n = dict()
#QGIS expression to search layer for points
expression = 'abs(second(to_datetime(\"a_time\") - to_datetime(\'' + a + '\'))) < 600 and ' \
'((distance(transform(start_point($geometry) , \'EPSG:4326\',\'EPSG:27700\'), ' + \
'transform(make_point(\'' + str(com.x()) + '\',\'' + str(com.y()) + '\'), ' \
'\'EPSG:4326\', \'EPSG:27700\'))<=' + str(median_distance) + ') or ' \
'(\"fid\" = ' + str(featureID - 1) + 'and \"speed\" > ' + str(float(median_speed * 2)) + '))'
datalayer.selectByExpression(expression)
for feat in datalayer.selectedFeatures():
p = feat['fid']
n[p] = True
if feat["speed"] > float(median_speed * 2) or feat["speed"] < 0.01:
n[p + 1] = True
extreme_speed = True
if len(n) == (max(n) - min(n)) + 1:
sline = True
return list(n.keys()), extreme_speed, sline
class BreakFinder:
"""
Class to find valid breakpoints in gps track
"""
def __init__(self):
pass
def find_breaks(self, data_path, point_list=None):
"""
Method to loop over points in gps track and check for valid breakpoints
:param data_path: gpkg file containing gps track
:param point_list: optional parameter to specify range of points to check
:return: updated gpkg file with OnBreak = 1 if point is breakpoint
"""
datalayer = QgsVectorLayer(data_path, 'All data', 'ogr')
selectedFeats = list()
plist = point_list
first, last, median_speed, median_distance, total_dist, total_time = get_segment_info(datalayer)
#Ignore tracks with under 250m or 2.5 min of travel, or high median speed (non walking)
if total_dist < 250 or total_time < 150 or median_speed > 10:
datalayer.dataProvider().truncate()
return False
if plist is None:
point_list = list(range(first, last + 1))
else:
first = plist[0]
last = plist[-1]
point_dict = dict.fromkeys(point_list, 'unknown')
for point in point_dict:
#Ignore points which have already been checked
if point_dict[point] != 'unknown':
continue
feature = datalayer.getFeature(point)
neighbourhood, extreme_speed, line = rangequery(feature, datalayer, median_speed, median_distance)
#Ignore very small point clusters/ possible clustered straight lines containing no extreme point speeds
if (len(neighbourhood) <= 4 or line) and not extreme_speed and len(neighbourhood) <= 10:
point_dict[point] = 'walk'
continue
point_dict[point] = 'cluster'
neighbour_dict = dict()
neighbour_direction = dict.fromkeys(list(range(1, 5)), False)
for neighbour in neighbourhood:
if not (first <= neighbour <= last):
continue
#Check break likelihood & walking direction of all points in cluster
break_chance, speed, angle, line_direction = break_likelihood(datalayer, neighbour, median_speed)
neighbour_dict[neighbour] = [break_chance, speed, angle, line_direction]
if point_dict[neighbour] == 'walk':
point_dict[neighbour] = 'cluster'
#Ignore points which have already been checked
if point_dict[neighbour] != 'unknown':
continue
point_dict[neighbour] = 'cluster'
#Find extent of cluster by checking each point
new_neighbours, extreme_speed, line = rangequery(datalayer.getFeature(neighbour), datalayer,
median_speed, median_distance)
if (len(new_neighbours) > 4 and not line) or extreme_speed or len(new_neighbours) > 10:
for new_neighbour in new_neighbours:
if new_neighbour not in neighbourhood:
neighbourhood.append(new_neighbour)
min_breakpoint = math.inf
max_breakpoint = 0
breakcount = 0
for k, v in sorted(neighbour_dict.items()):
if v[0] != 'Low':
if k < min_breakpoint:
min_breakpoint = k
if k > max_breakpoint:
max_breakpoint = k
breakcount += 1
#If no points have medium/high break likelihood, ignore cluster
if breakcount == 0 or len(neighbourhood) <= 4:
for neighbour in neighbour_dict:
point_dict[neighbour] = 'walk'
continue
breakpoints = list(range(min_breakpoint, max_breakpoint + 1))
#Check break likelihood of 'gaps' in cluster id list (ie cluster = points [1,2,5,6], check ids 3 & 4)
for item in breakpoints:
if item not in neighbourhood:
break_chance, speed, angle, line_direction = break_likelihood(datalayer, item, median_speed)
neighbour_dict[item] = [break_chance, speed, angle, line_direction]
if break_chance != 'Low':
breakcount += 1
neighbour_direction[neighbour_dict[item][3]] = True
#Check to ensure track doubles back on itself
if (neighbour_direction[1] & neighbour_direction[3]) or \
(neighbour_direction[2] & neighbour_direction[4]):
#Check less than half the points have low break likelihood
if breakcount / len(breakpoints) >= 0.5:
selectedFeats.extend(breakpoints)
point_dict.update(dict.fromkeys(breakpoints, 'cluster'))
#Update/Add OnBreak field to
field_no = datalayer.dataProvider().fieldNameIndex("OnBreak")
if field_no == -1:
newAttribute = [QgsField('OnBreak', QVariant.Int, 'Integer')]
datalayer.dataProvider().addAttributes(newAttribute)
field_no = datalayer.dataProvider().fieldNameIndex("OnBreak")
change_dict = {field_no: 1}
add_breaks = dict.fromkeys(selectedFeats, change_dict)
datalayer.dataProvider().changeAttributeValues(add_breaks)
return True
| StarcoderdataPython |
129835 | <reponame>D-Wolter/PycharmProjects
"""
CPF = 168.995.350-09
------------------------------------------------
1 * 10 = 10 # 1 * 11 = 11 <-
6 * 9 = 54 # 6 * 10 = 60
8 * 8 = 64 # 8 * 9 = 72
9 * 7 = 63 # 9 * 8 = 72
9 * 6 = 54 # 9 * 7 = 63
5 * 5 = 25 # 5 * 6 = 30
3 * 4 = 12 # 3 * 5 = 15
5 * 3 = 15 # 5 * 4 = 20
0 * 2 = 0 # 0 * 3 = 0
# -> 0 * 2 = 0
297 # 343
11 - (297 % 11) = 11 # 11 - (343 % 11) = 9
11 > 9 = 0 #
Digito 1 = 0 # Digito 2 = 9
"""
# Loop infinito
while True:
cpf_and_digito = '04045683941'
#cpf_and_digito = input('Digite um CPF: ')
cpf_sem_digito = cpf_and_digito[:-2] # Elimina os dois últimos digitos do CPF
reverso = 10 # Contador reverso
reverso_soma = 0
# Loop do CPF
for index in range(19):
if index > 8: # Primeiro índice vai de 0 a 9,
index -= 9 # São os 9 primeiros digitos do CPF
reverso_soma += int(cpf_sem_digito[index]) * reverso # Valor total da multiplicação
reverso -= 1 # Decrementa o contador reverso
if reverso < 2:
reverso = 11
dig_seg = 11 - (reverso_soma % 11)
if dig_seg > 9: # Se o digito for > que 9 o valor é 0
dig_seg = 0
reverso_soma = 0 # Zera o total
cpf_sem_digito += str(dig_seg) # Concatena o digito gerado no novo cpf
# Evita sequencias. Ex.: 11111111111, 00000000000...
sequencia = cpf_sem_digito == str(cpf_sem_digito[0]) * len(cpf_and_digito)
# Descobri que sequências avaliavam como verdadeiro, então também
# adicionei essa checagem aqui
if cpf_and_digito == cpf_sem_digito and not sequencia:
print('Válido')
else:
print('Inválido')
| StarcoderdataPython |
3243661 | <filename>apps/tool/apis/water_mark.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import math
from PIL import Image, ImageFont, ImageDraw, ImageEnhance, ImageChops
def add_mark(imagePath, mark, out, quality):
'''
添加水印,然后保存图片
'''
im = Image.open(imagePath)
image = mark(im)
name = os.path.basename(imagePath)
if image:
new_name = out
if os.path.splitext(new_name)[1] != '.png':
image = image.convert('RGB')
image.save(new_name, quality=quality)
print(name + " Success.")
else:
print(name + " Failed.")
def set_opacity(im, opacity):
'''
设置水印透明度
'''
assert opacity >= 0 and opacity <= 1
alpha = im.split()[3]
alpha = ImageEnhance.Brightness(alpha).enhance(opacity)
im.putalpha(alpha)
return im
def crop_image(im):
'''裁剪图片边缘空白'''
bg = Image.new(mode='RGBA', size=im.size)
diff = ImageChops.difference(im, bg)
del bg
bbox = diff.getbbox()
if bbox:
return im.crop(bbox)
return im
def gen_mark(font_height_crop, mark, size, color, font_family, opacity, space, angle):
'''
生成mark图片,返回添加水印的函数
'''
# 字体宽度、高度
is_height_crop_float = '.' in font_height_crop # not good but work
width = len(mark) * size
if is_height_crop_float:
height = round(size * float(font_height_crop))
else:
height = int(font_height_crop)
# 创建水印图片(宽度、高度)
mark_ = Image.new(mode='RGBA', size=(width, height))
# 生成文字
draw_table = ImageDraw.Draw(im=mark_)
draw_table.text(xy=(0, 0),
text=mark,
fill=color,
font=ImageFont.truetype(font_family, size=size))
del draw_table
# 裁剪空白
mark_ = crop_image(mark_)
# 透明度
set_opacity(mark_, opacity)
def mark_im(im):
''' 在im图片上添加水印 im为打开的原图'''
# 计算斜边长度
c = int(math.sqrt(im.size[0] * im.size[0] + im.size[1] * im.size[1]))
# 以斜边长度为宽高创建大图(旋转后大图才足以覆盖原图)
mark2 = Image.new(mode='RGBA', size=(c, c))
# 在大图上生成水印文字,此处mark为上面生成的水印图片
y, idx = 0, 0
while y < c:
# 制造x坐标错位
x = -int((mark_.size[0] + space) * 0.5 * idx)
idx = (idx + 1) % 2
while x < c:
# 在该位置粘贴mark水印图片
mark2.paste(mark_, (x, y))
x = x + mark_.size[0] + space
y = y + mark_.size[1] + space
# 将大图旋转一定角度
mark2 = mark2.rotate(angle)
# 在原图上添加大图水印
if im.mode != 'RGBA':
im = im.convert('RGBA')
im.paste(mark2, # 大图
(int((im.size[0] - c) / 2), int((im.size[1] - c) / 2)), # 坐标
mask=mark2.split()[3])
del mark2
return im
return mark_im
def marker(file,mark,out,
color="#dddddd",
space=200,
angle=30,
font_family="arial.ttf",
font_height_crop="1.2",
size=50,
opacity=0.05,
quality=80):
marker = gen_mark(font_height_crop, mark, size, color, font_family, opacity, space, angle)
add_mark(file, marker, out, quality)
if __name__ == '__main__':
marker('simple.jpg','QTechCode','test.jpg') | StarcoderdataPython |
4805265 | <reponame>Rounak40/Proxy-Scrapper-and-Scanner
# import modules
import requests
import json
from bs4 import BeautifulSoup
import re
from threading import Thread
global good_list
good_list = []
def get_links(proxy_type=None):
if proxy_type == "http":
data = open("site urls.txt").readlines()[0]
elif proxy_type == "https":
data = open("site urls.txt").readlines()[1]
elif proxy_type == "socks4":
data = open("site urls.txt").readlines()[2]
elif proxy_type == "socks5":
data = open("site urls.txt").readlines()[3]
else:
data = ""
return [i.strip() for i in data.split(",") if len(i) > 10]
def parse_proxies_from_html_response(response_data):
data = re.findall(r'[\w\.-]+:[\w\.-]+', response_data, re.MULTILINE)
proxies = []
for i in data:
if "." in i:
if i.split(":")[1].isdigit():
proxies.append(i+"\n")
return proxies
def scrap_proxies(proxy_type=None):
requests_links = get_links(proxy_type)
print("Scraping proxies....")
response_data = """
"""
for i in requests_links:
try:
res = requests.get(i,timeout=10).text
except:
res = ""
response_data += res
proxy_list = parse_proxies_from_html_response(response_data)
with open(proxy_type.upper()+"-proxies.txt","w+") as file:
file.writelines(proxy_list)
print("Saved Successfully!")
ask_input()
def check_proxy_by_url(proxy,url,timeout):
global good_list
p = dict(http=proxy, https=proxy)
try:
r = requests.get(url, proxies=p, timeout=timeout)
if r.status_code == 200:
good_list.append(proxy+"\n")
except Exception as e:
pass
def main(proxy_type,proxy_list):
global good_list
proxy_list2 = [proxy_type.lower()+"://"+i.strip() for i in proxy_list]
thread_list = []
lists = proxy_list2
print("Scanning",len(lists),proxy_type.upper(),"proxies...")
for l in lists:
t = Thread(target=check_proxy_by_url, args=[l,"http://ipinfo.io/json",10])
t.start()
thread_list.append(t)
for x in thread_list:
x.join()
print('Proxies Scanned .')
with open(proxy_type.upper()+"-working-proxies.txt","w+") as file:
file.writelines(good_list)
ask_input()
def ask_proxy_type():
proxy_type = input("Which type of proxy you want to Scrape/Scan?\n1.HTTP\n2.HTTPS\n3.SOCKS4\n4.SOCKS5\n5.Back\n==>")
if str(proxy_type) == "1":
return "http"
elif str(proxy_type) == "2":
return "https"
elif str(proxy_type) == "3":
return "socks4"
elif str(proxy_type) == "4":
return "socks5"
elif str(proxy_type) == "5":
ask_input()
else:
print("Wrong input try again!")
ask_proxy_type()
def ask_input():
user_input = input("What you want to do? (Enter 1 or 2)\n1.Scrape Proxies.\n2.Scan Proxies \n==>")
if str(user_input) == "1":
proxy_type = ask_proxy_type()
scrap_proxies(proxy_type)
elif str(user_input) == "2":
proxy_type = ask_proxy_type()
file = input("File Name: ")
main(proxy_type,open(file).readlines())
else:
print("Wrong input try again!")
ask_input()
if __name__ == "__main__":
print("Welcome in Proxy Scrapper + Scanner.")
ask_input()
| StarcoderdataPython |
3295957 | """
Bottleneck Transformers for Visual Recognition.
adapted from https://github.com/CandiceD17/Bottleneck-Transformers-for-Visual-Recognition
"""
import torch
from einops import rearrange
from torch import einsum, nn
try:
from distribuuuu.models import resnet50
except ImportError:
from torchvision.models import resnet50
def expand_dim(t, dim, k):
"""
Expand dims for t at dim to k
"""
t = t.unsqueeze(dim=dim)
expand_shape = [-1] * len(t.shape)
expand_shape[dim] = k
return t.expand(*expand_shape)
def rel_to_abs(x):
"""
x: [B, Nh * H, L, 2L - 1]
Convert relative position between the key and query to their absolute position respectively.
Tensowflow source code in the appendix of: https://arxiv.org/pdf/1904.09925.pdf
"""
B, Nh, L, _ = x.shape
# pad to shift from relative to absolute indexing
col_pad = torch.zeros((B, Nh, L, 1)).cuda()
x = torch.cat((x, col_pad), dim=3)
flat_x = torch.reshape(x, (B, Nh, L * 2 * L))
flat_pad = torch.zeros((B, Nh, L - 1)).cuda()
flat_x = torch.cat((flat_x, flat_pad), dim=2)
# Reshape and slice out the padded elements
final_x = torch.reshape(flat_x, (B, Nh, L + 1, 2 * L - 1))
return final_x[:, :, :L, L - 1 :]
def relative_logits_1d(q, rel_k):
"""
q: [B, Nh, H, W, d]
rel_k: [2W - 1, d]
Computes relative logits along one dimension.
The details of relative position is explained in: https://arxiv.org/pdf/1803.02155.pdf
"""
B, Nh, H, W, _ = q.shape
rel_logits = torch.einsum("b n h w d, m d -> b n h w m", q, rel_k)
# Collapse height and heads
rel_logits = torch.reshape(rel_logits, (-1, Nh * H, W, 2 * W - 1))
rel_logits = rel_to_abs(rel_logits)
rel_logits = torch.reshape(rel_logits, (-1, Nh, H, W, W))
rel_logits = expand_dim(rel_logits, dim=3, k=H)
return rel_logits
class AbsPosEmb(nn.Module):
def __init__(self, height, width, dim_head):
super().__init__()
# assert height == width
scale = dim_head ** -0.5
self.height = nn.Parameter(torch.randn(height, dim_head) * scale)
self.width = nn.Parameter(torch.randn(width, dim_head) * scale)
def forward(self, q):
emb = rearrange(self.height, "h d -> h () d") + rearrange(
self.width, "w d -> () w d"
)
emb = rearrange(emb, " h w d -> (h w) d")
logits = einsum("b h i d, j d -> b h i j", q, emb)
return logits
class RelPosEmb(nn.Module):
def __init__(self, height, width, dim_head):
super().__init__()
# assert height == width
scale = dim_head ** -0.5
self.height = height
self.width = width
self.rel_height = nn.Parameter(torch.randn(height * 2 - 1, dim_head) * scale)
self.rel_width = nn.Parameter(torch.randn(width * 2 - 1, dim_head) * scale)
def forward(self, q):
h = self.height
w = self.width
q = rearrange(q, "b h (x y) d -> b h x y d", x=h, y=w)
rel_logits_w = relative_logits_1d(q, self.rel_width)
rel_logits_w = rearrange(rel_logits_w, "b h x i y j-> b h (x y) (i j)")
q = rearrange(q, "b h x y d -> b h y x d")
rel_logits_h = relative_logits_1d(q, self.rel_height)
rel_logits_h = rearrange(rel_logits_h, "b h x i y j -> b h (y x) (j i)")
return rel_logits_w + rel_logits_h
class BoTBlock(nn.Module):
def __init__(
self,
dim,
fmap_size,
dim_out,
stride=1,
heads=4,
proj_factor=4,
dim_qk=128,
dim_v=128,
rel_pos_emb=False,
activation=nn.ReLU(),
):
"""
dim: channels in feature map
dim_out: output channels for feature map
"""
super().__init__()
if dim != dim_out or stride != 1:
self.shortcut = nn.Sequential(
nn.Conv2d(dim, dim_out, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(dim_out),
activation,
)
else:
self.shortcut = nn.Identity()
bottleneck_dimension = dim_out // proj_factor # from 2048 to 512
attn_dim_out = heads * dim_v
self.net = nn.Sequential(
nn.Conv2d(dim, bottleneck_dimension, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(bottleneck_dimension),
activation,
MHSA(
dim=bottleneck_dimension,
fmap_size=fmap_size,
heads=heads,
dim_qk=dim_qk,
dim_v=dim_v,
rel_pos_emb=rel_pos_emb,
),
nn.AvgPool2d((2, 2)) if stride == 2 else nn.Identity(), # same padding
nn.BatchNorm2d(attn_dim_out),
activation,
nn.Conv2d(attn_dim_out, dim_out, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(dim_out),
)
nn.init.zeros_(
self.net[-1].weight
) # last batch norm uses zero gamma initializer
self.activation = activation
def forward(self, featuremap):
shortcut = self.shortcut(featuremap)
featuremap = self.net(featuremap)
featuremap += shortcut
return self.activation(featuremap)
class MHSA(nn.Module):
def __init__(
self, dim, fmap_size, heads=4, dim_qk=128, dim_v=128, rel_pos_emb=False
):
"""
dim: number of channels of feature map
fmap_size: [H, W]
dim_qk: vector dimension for q, k
dim_v: vector dimension for v (not necessarily the same with q, k)
"""
super().__init__()
self.scale = dim_qk ** -0.5
self.heads = heads
out_channels_qk = heads * dim_qk
out_channels_v = heads * dim_v
self.to_qk = nn.Conv2d(
dim, out_channels_qk * 2, 1, bias=False
) # 1*1 conv to compute q, k
self.to_v = nn.Conv2d(
dim, out_channels_v, 1, bias=False
) # 1*1 conv to compute v
self.softmax = nn.Softmax(dim=-1)
height, width = fmap_size
if rel_pos_emb:
self.pos_emb = RelPosEmb(height, width, dim_qk)
else:
self.pos_emb = AbsPosEmb(height, width, dim_qk)
def forward(self, featuremap):
"""
featuremap: [B, d_in, H, W]
Output: [B, H, W, head * d_v]
"""
heads = self.heads
B, C, H, W = featuremap.shape
q, k = self.to_qk(featuremap).chunk(2, dim=1)
v = self.to_v(featuremap)
q, k, v = map(
lambda x: rearrange(x, "B (h d) H W -> B h (H W) d", h=heads), (q, k, v)
)
q *= self.scale
logits = einsum("b h x d, b h y d -> b h x y", q, k)
logits += self.pos_emb(q)
weights = self.softmax(logits)
attn_out = einsum("b h x y, b h y d -> b h x d", weights, v)
attn_out = rearrange(attn_out, "B h (H W) d -> B (h d) H W", H=H)
return attn_out
class BoTStack(nn.Module):
def __init__(
self,
dim,
fmap_size,
dim_out=2048,
heads=4,
proj_factor=4,
num_layers=3,
stride=2,
dim_qk=128,
dim_v=128,
rel_pos_emb=False,
activation=nn.ReLU(),
):
"""
dim: channels in feature map
fmap_size: [H, W]
"""
super().__init__()
self.dim = dim
self.fmap_size = fmap_size
layers = []
for i in range(num_layers):
is_first = i == 0
dim = dim if is_first else dim_out
fmap_divisor = 2 if stride == 2 and not is_first else 1
layer_fmap_size = tuple(map(lambda t: t // fmap_divisor, fmap_size))
layers.append(
BoTBlock(
dim=dim,
fmap_size=layer_fmap_size,
dim_out=dim_out,
stride=stride if is_first else 1,
heads=heads,
proj_factor=proj_factor,
dim_qk=dim_qk,
dim_v=dim_v,
rel_pos_emb=rel_pos_emb,
activation=activation,
)
)
self.net = nn.Sequential(*layers)
def forward(self, x):
_, c, h, w = x.shape
assert c == self.dim, f"assert {c} == self.dim {self.dim}"
assert h == self.fmap_size[0] and w == self.fmap_size[1]
return self.net(x)
def botnet50(pretrained=False, **kwargs):
"""
Bottleneck Transformers for Visual Recognition.
https://arxiv.org/abs/2101.11605
"""
resnet = resnet50(pretrained, **kwargs)
layer = BoTStack(dim=1024, fmap_size=(14, 14), stride=1, rel_pos_emb=True)
backbone = list(resnet.children())
model = nn.Sequential(
*backbone[:-3],
layer,
nn.AdaptiveAvgPool2d((1, 1)),
nn.Flatten(1),
nn.Linear(2048, 1000),
)
return model
def test_botnet50():
x = torch.ones(16, 3, 224, 224).cuda()
model = botnet50().cuda()
y = model(x)
print(y.shape)
def test_backbone():
x = torch.ones(16, 3, 256, 128).cuda()
resnet = resnet50()
layer = BoTStack(dim=1024, fmap_size=(16, 8), stride=1, rel_pos_emb=True)
backbone = list(resnet.children())
model = nn.Sequential(
*backbone[:-3],
layer,
nn.AdaptiveAvgPool2d((1, 1)),
nn.Flatten(1),
nn.Linear(2048, 1000),
).cuda()
y = model(x)
print(y.shape)
if __name__ == "__main__":
test_backbone()
| StarcoderdataPython |
86510 | import os, json
from blockfrost import BlockFrostApi, ApiError
from blockfrost.utils import convert_json_to_object
hash = "8f55e18a94e4c0951e5b8bd8910b2cb20aa4d742b1608fda3a06793d39fb07b1"
xpub = "d507c8f866691bd96e131334c355188b1a1d0b2fa0ab11545075aab332d77d9eb19657ad13ee581b56b0f8d744d66ca356b93d42fe176b3de007d53e9c4c4e7a"
role = 0
index = 0
def test_utils_addresses_xpub(requests_mock):
api = BlockFrostApi()
mock_data = [
{
"xpub": "d507c8f866691bd96e131334c355188b1a1d0b2fa0ab11545075aab332d77d9eb19657ad13ee581b56b0f8d744d66ca356b93d42fe176b3de007d53e9c4c4e7a",
"role": 0,
"index": 0,
"address": "addr1q90sqnljxky88s0jsnps48jd872p7znzwym0jpzqnax6qs5nfrlkaatu28n0qzmqh7f2cpksxhpc9jefx3wrl0a2wu8q5amen7"
}
]
requests_mock.get(f"{api.url}/utils/addresses/xpub/{xpub}/{role}/{index}", json=mock_data)
assert api.utils_addresses_xpub(xpub, role, index) == convert_json_to_object(mock_data)
def test_integration_utils_addresses_xpub():
if os.getenv('BLOCKFROST_PROJECT_ID_MAINNET'):
api = BlockFrostApi(project_id=os.getenv('BLOCKFROST_PROJECT_ID_MAINNET'))
assert api.utils_addresses_xpub(xpub, role, index)
def test_utils_transaction_evaluate(requests_mock):
api = BlockFrostApi()
mock_data = hash
requests_mock.post(f"{api.url}/utils/txs/evaluate", json=mock_data)
assert api.transaction_evaluate(file_path="./README.md") == convert_json_to_object(mock_data)
| StarcoderdataPython |
1710112 | # =============================================================================
# SIMULATION-BASED ENGINEERING LAB (SBEL) - http://sbel.wisc.edu
#
# Copyright (c) 2019 SBEL
# All rights reserved.
#
# Use of this source code is governed by a BSD-style license that can be found
# at https://opensource.org/licenses/BSD-3-Clause
#
# =============================================================================
# Contributors: <NAME>, <NAME>
# =============================================================================
#!/usr/bin/env python3
import numpy as np
import sys
import os
from integrate import integrate
from writefile import writeosprayfile
from writeforcefile import writeforcefile
from params import params
def main(friction, out_dir, top_mass, unique):
if not os.path.exists(out_dir):
os.mkdir(out_dir)
grav = np.array([0,0,-980])
setup = {"nb": 6,
"gravity" : grav,
"envelope" : 1e-7,
"static_friction" : friction, # TODO allow combination of body friction at contact
"mu_tilde" : 0.1,
"eps_0" : 0.1,
"mu_star" : 1e-9,
"eps_star" : 1e-6,
"tau_mu" : 0.2,
"tau_eps" : 0.1,
"tolerance" : 1e-3,
"dt" : 1e-3,
"time_end": 3,
"max_iterations" : 300,
"unique" : unique,
"prefix" : out_dir + "/step",
"suffix" : ".csv"}
gran_params = params(setup)
id = 1
sphere_mass = 1.0
sphere_z = 1.0
sphere_radius = 1.0
for x in [-1.0,1.0]:
for y in [-1.0,1.0]:
pos = np.array([x,y,sphere_z])
rot = np.array([1,0,0,0])
gran_params.add_sphere(pos, rot, sphere_mass, sphere_radius, id)
id += 1
top_id = 0
top_radius = 1.0
top_z = 1 + np.sqrt(top_radius**2 + 2 * top_radius * sphere_radius + sphere_radius**2 - 2)
pos = np.array([0,0,top_z])
rot = np.array([1,0,0,0])
gran_params.add_sphere(pos, rot, top_mass, top_radius, top_id)
box_id = 5
box_mass = 4.0
box_hdims = np.array([4,4,0.5])
box_z = -0.5
pos = np.array([0,0,box_z])
rot = np.array([1,0,0,0])
gran_params.add_box(pos, rot, box_hdims, box_mass, box_id, fixed=True)
c_pos = np.array([])
f_contact = np.array([])
# print(gran_params)
step = 0
t = 0.0
t_settling = 0.1
pushing = False
out_fps = 100.0
out_steps = 1.0 / (out_fps * gran_params.dt)
frame = 0
while t < gran_params.time_end:
if step % out_steps == 0:
frame_s = '%06d' % frame
print('Rendering frame ' + frame_s)
filename = gran_params.prefix + frame_s + gran_params.suffix
writeosprayfile(gran_params.q, gran_params.v, frame_s, gran_params)
filename = gran_params.prefix + frame_s + '_forces' + gran_params.suffix
frame += 1
new_q, new_v, new_a, c_pos, f_contact = integrate(gran_params.q, gran_params.v, gran_params)
gran_params.q = new_q
gran_params.v = new_v
if gran_params.q[7*top_id + 2] <= top_z / 2.0:
return True
t += gran_params.dt
step += 1
return False
if __name__ == '__main__':
argv = sys.argv
if len(sys.argv) != 5:
print("usage " + argv[0] + " <friction> <out_dir> <top_mass> <unique?>")
exit(1)
fric = float(argv[1])
out_dir = argv[2]
mass = float(argv[3])
unique = bool(int(argv[4]))
print("fric: ", fric, " mass: ", mass, " unique: ", unique)
print(main(fric, out_dir, mass, unique))
| StarcoderdataPython |
50820 | # login.txt should contain address on first line and app specific password on the second
#
# <EMAIL>
# <PASSWORD>
def sendEmail(subject, message_):
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
with open("login.txt") as f:
login = f.read().splitlines()
gmailUser = login[0]
gmailPassword = login[1]
recipient = login[0]
message = message_
msg = MIMEMultipart()
msg['From'] = gmailUser
msg['To'] = recipient
msg['Subject'] = subject
msg.attach(MIMEText(message))
mailServer = smtplib.SMTP('smtp.gmail.com', 587)
mailServer.ehlo()
mailServer.starttls()
mailServer.ehlo()
mailServer.login(gmailUser, gmailPassword)
mailServer.sendmail(gmailUser, recipient, msg.as_string())
mailServer.close() | StarcoderdataPython |
3228606 | import connexion
import six
from swagger_server.models.schedule_option import ScheduleOption # noqa: E501
from swagger_server import util
from sqlalchemy import exc
def add_schedule_option(body): # noqa: E501
"""Add a schedule_option to the classdeck
# noqa: E501
:param body: ScheduleOption object that needs to be added to the system
:type body: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
json = connexion.request.get_json()
body = ScheduleOption.from_dict(json) # noqa: E501
insert_string = """
INSERT INTO schedule_option (nuid, title, semester_id)
VALUES ({0}, '{1}', {2});
""".format(json["nuid"], json["title"], json["semester"])
try:
session_cookie = connexion.request.cookies.get("session")
session_NUID = connexion.JWT_verify(session_cookie)
if session_NUID == str(body.nuid).zfill(9):
db_conn = connexion.DB(connexion.DB_ENG)
trans = db_conn.begin()
result = db_conn.execute(insert_string)
trans.commit()
db_conn.close()
return ["Accepted", result.lastrowid], 201
else:
return "Forbidden", 403
except exc.IntegrityError as err:
return "Could not add pursued degree", 406
except KeyError:
return "Forbidden", 403
return "Bad Request", 400
def delete_schedule_option(schedule_option_id): # noqa: E501
"""Deletes a schedule_option
# noqa: E501
:param schedule_option_id: ScheduleOption id to delete
:type schedule_option_id: int
:rtype: None
"""
delete_string = "DELETE FROM schedule_option WHERE schedule_option_id = {0};".format(schedule_option_id)
try:
session_cookie = connexion.request.cookies.get("session")
session_NUID = connexion.JWT_verify(session_cookie)
db_conn = connexion.DB(connexion.DB_ENG)
result = db_conn.execute(delete_string)
db_conn.close()
return "Accepted", 201
except exc.IntegrityError:
return "Could not delete schedule option", 406
except KeyError:
return "Forbidden", 403
def duplicate_schedule_option(sch_opt_id):
try:
session_cookie = connexion.request.cookies.get("session")
session_NUID = connexion.JWT_verify(session_cookie)
db_conn = connexion.DB_ENG.raw_connection()
cursor = db_conn.cursor()
cursor.callproc("duplicate_schedule_option", [sch_opt_id])
db_conn.commit()
cursor.close()
db_conn.close()
return "Accepted", 200
except exc.IntegrityError:
return "Server error", 500
except KeyError:
return "Forbidden", 403
def update_schedule_option():
if connexion.request.is_json:
body: ScheduleOption = ScheduleOption.from_dict(connexion.request.get_json()) # noqa: E501
update_string = """
UPDATE schedule_option
SET title = "{1}"
WHERE schedule_option_id = {0};
""".format(body.schedule_id, body.title)
try:
session_cookie = connexion.request.cookies.get("session")
session_NUID = connexion.JWT_verify(session_cookie)
db_conn = connexion.DB(connexion.DB_ENG)
db_conn.execute(update_string)
db_conn.close()
return "Accepted", 201
except exc.IntegrityError:
return "Already Exists", 202
except KeyError:
return "Forbidden", 403
return "Bad Request", 400
def get_schedule_option_by_nuid(nuid): # noqa: E501
"""List schedule_option by NUID
Returns the schedule_options related to the given NUID # noqa: E501
:param nuid: nuid of the user related to the schedule_option to return
:type nuid: int
:rtype: None
"""
select_string = """
SELECT
DISTINCT
opt.schedule_option_id,
opt.title,
opt.semester_id,
sec.crn,
sec.class_dept,
sec.class_number,
sec.professor,
mt.start_time,
mt.end_time,
mt.meeting_days,
cls.name,
cls.description,
satisfies_degree_requirement({0}, cls.class_dept, cls.class_number) AS part_of_degree
FROM schedule_option AS opt
LEFT OUTER JOIN schedule_option_section AS opt_s ON opt.schedule_option_id = opt_s.schedule_option_id
LEFT OUTER JOIN section AS sec ON opt_s.section_crn = sec.crn
LEFT OUTER JOIN meeting_times AS mt ON sec.crn = mt.crn
LEFT OUTER JOIN class AS cls ON (sec.class_dept = cls.class_dept AND sec.class_number = cls.class_number)
WHERE nuid = {0};
""".format(nuid)
try:
session_cookie = connexion.request.cookies.get("session")
session_NUID = connexion.JWT_verify(session_cookie)
if session_NUID == str(nuid).zfill(9):
db_conn = connexion.DB(connexion.DB_ENG)
result = db_conn.execute(select_string)
db_conn.close()
res = []
opt = {
"schedule_option_id": -1,
"nuid": -1,
"title": -1,
"semester_id": -1,
"sections": []
}
for schedule_option_id, title, semester_id, crn, class_dept, \
class_number, professor, start_time, end_time, meeting_days, \
cname, cdesc, part_of_degree in result.fetchall():
if opt["schedule_option_id"] == schedule_option_id:
mapped_crns = list(map(lambda s: s["crn"], opt["sections"]))
if crn not in mapped_crns:
opt["sections"].append({
"class_dept": class_dept,
"class_number": class_number,
"professor": professor,
"crn": crn,
"cname": cname,
"cdesc": cdesc,
"part_of_degree": part_of_degree
})
else:
if opt["schedule_option_id"] != -1:
res.append(opt)
opt = {
"schedule_option_id": schedule_option_id,
"nuid": nuid,
"title": title,
"semester_id": semester_id,
"sections": []
}
if crn is not None:
mapped_crns = list(map(lambda s: s["crn"], opt["sections"]))
if crn not in mapped_crns:
opt["sections"].append({
"class_dept": class_dept,
"class_number": class_number,
"professor": professor,
"crn": crn,
"cname": cname,
"cdesc": cdesc,
"part_of_degree": part_of_degree
})
if opt["schedule_option_id"] != -1:
res.append(opt)
return res, 201
else:
return "Forbidden", 403
except exc.IntegrityError as err:
return "Could not add pursued degree", 406
except KeyError:
return "Forbidden", 403
| StarcoderdataPython |
194527 | <filename>seldom/db_operation/mongo_db.py
try:
from pymongo import MongoClient
except ModuleNotFoundError:
raise ModuleNotFoundError("Please install the library. https://github.com/mongodb/mongo-python-driver")
class MongoDB:
def __new__(cls, host, port, db):
"""
Connect the mongodb database
"""
client = MongoClient(host, port)
db_obj = client[db]
return db_obj
if __name__ == '__main__':
mongo_db = MongoDB("localhost", 27017, "yapi")
col = mongo_db.list_collection_names()
print("collection list: ", col)
data = mongo_db.project.find_one()
print("table one data:", data)
| StarcoderdataPython |
1769320 | <reponame>sergei-dyshel/tmux-clost
from lib.tmux import run
for i in xrange(100):
run(['display-message', '-p', '#{pane_id}'], cm=False)
# run(['send-keys', 'Escape'], cm=False)
# run(['list-keys'], cm=False)
| StarcoderdataPython |
1742367 | <gh_stars>1-10
# Copyright (C) 2021 <NAME>
# All Rights Reserved.
#
from aiohttp import ClientSession
from userbot import CMD_HELP
from userbot.events import register
async def get_nekos_img(args):
nekos_baseurl = "https://nekos.life/api/v2/img/"
if args == "random_hentai_gif":
args = "Random_hentai_gif"
async with ClientSession() as ses, ses.get(nekos_baseurl + args) as r:
result = await r.json()
return result
@register(outgoing=True, pattern=r"^\.nekos(?: |$)(.*)")
async def nekos_media(event):
args = event.pattern_match.group(1)
args_error = "Do `.help nekos` to see available arguments."
if not args:
return await event.edit(args_error)
result = await get_nekos_img(args)
if result.get("msg") == "404":
return await event.edit(args_error)
media_url = result.get("url")
await event.edit("`Fetching from nekos...`")
await event.client.send_file(
entity=event.chat_id,
file=media_url,
caption=f"[Source]({media_url})",
force_document=False,
reply_to=event.reply_to_msg_id,
)
await event.delete()
CMD_HELP.update(
{
"nekos": ">`.nekos <arguments>`"
"\nUsage: For fetching images from nekos"
"\n\nArguments : `8ball`, `anal`, `avatar`, `baka`, `bj`, "
"`blowjob`, `boobs`, `classic`, `cuddle`, `cum`, "
"`cum_jpg`, `ero`, `erofeet`, `erok`, `erokemo`, "
"`eron`, `eroyuri`, `feed`, `feet`, `feetg`, "
"`femdom`, `fox_girl`, `futanari`, `gasm`, `gecg`, "
"`goose`, `hentai`, `holo`, `holoero`, `hololewd`, "
"`hug`, `kemonomimi`, `keta`, `kiss`, `kuni`, "
"`les`, `lewd`, `lewdk`, `lewdkemo`, `lizard`, "
"`neko`, `ngif`, `nsfw_avatar`, `nsfw_neko_gif`, `pat`, "
"`poke`, `pussy`, `pussy_jpg`, `pwankg`, `random_hentai_gif`, "
"`slap`, `smallboobs`, `smug`, `solo`, `solog`, "
"`spank`, `tickle`, `tits`, `trap`, `waifu`, "
"`wallpaper`, `woof`, `yuri`"
}
)
| StarcoderdataPython |
1733034 | import degooged_tube.ytApiHacking as ytapih
import degooged_tube.config as cfg
from typing import Union, Tuple
from degooged_tube.subboxChannel import SubBoxChannel, ChannelLoadIssue, loadChannel, callReload
from degooged_tube import getPool
from degooged_tube.helpers import paginationCalculator
class EndOfSubBox(Exception):
pass
class NoVideo(Exception):
pass
class AlreadySubscribed(Exception):
pass
def listsOverlap(l1, l2):
return not set(l1).isdisjoint(l2)
def setsOverlap(s1:set, s2:set):
return not s1.isdisjoint(s2)
class SubBox:
channels: list[SubBoxChannel]
channelDict: dict[str, SubBoxChannel]
atMaxLen: bool
orderedUploads: list[ytapih.Upload]
prevOrdering: list[str]
def __init__(self, subBoxChannels: list[SubBoxChannel], prevOrdering: list):
self.channels = subBoxChannels
self.channelDict = {}
for channel in self.channels:
self.channelDict[channel.channelId] = channel
self.orderedUploads = []
self.atMaxLen = False
if prevOrdering != []:
raise NotImplementedError
self.prevOrdering = prevOrdering
@classmethod
def fromUrls(cls, urls: list[str], channelTags:list[set[str]] = None, prevOrdering:list = list()) -> 'SubBox':
cfg.logger.info("Loading SubBox... ")
cfg.logger.debug(f"Creating SubBox From Urls:\n{urls}")
channels = []
if channelTags is None:
channelTags = [set() for _ in range(len(urls))]
else:
assert len(urls) == len(channelTags)
pool = getPool()
if cfg.testing or pool is None:
channels = [loadChannel(data) for data in zip(urls, channelTags)]
else:
channels = pool.map(loadChannel, zip(urls, channelTags))
for i,channel in enumerate(reversed(channels)):
if isinstance(channel, str):
cfg.logger.info(f"Unable to Subscribe to {channel} \nAre You Sure the URL is Correct?")
channels.pop(i)
# Remove Duplicate Channels
duplicateIndices = []
for i in range(len(channels)):
for j in range(i+1, len(channels)):
ch1 = channels[i]
ch2 = channels[j]
assert not isinstance(ch1,str)
assert not isinstance(ch2,str)
if ch1 == ch2:
cfg.logger.info(f'{ch1.channelUrl} \nand \n{ch2.channelUrl} \nAre the Same {ch1.channelName}, Removing Duplicate channel')
duplicateIndices.append(i)
for i in duplicateIndices:
channels.pop(i)
return cls(channels, prevOrdering)
def reload(self):
self.orderedUploads.clear()
self.atMaxLen = False
for channel in self.channels:
channel.reload()
pool = getPool()
if cfg.testing or pool is None:
for channel in self.channels:
channel.reload()
else:
pool.map(callReload, self.channels)
self.channelDict = {}
for channel in self.channels:
self.channelDict[channel.channelId] = channel
def _getNextChannelWithMoreUploads(self, startIndex: int) -> Tuple[int, SubBoxChannel, ytapih.Upload]:
channelIndex = startIndex
while channelIndex < len(self.channels):
channel = self.channels[channelIndex]
try:
return channelIndex, channel, channel.peekNextUploadInQueue()
except IndexError:
channelIndex+=1
raise NoVideo
def _appendNextUpload(self):
if self.atMaxLen:
cfg.logger.debug("End of SubBox Reached!")
raise EndOfSubBox
# mostRecentIndex / contenderIndex are indices of channels, we are checking for the channel who uploaded most recently
try:
mostRecentIndex, mostRecentChannel, mostRecentVideo = self._getNextChannelWithMoreUploads(0)
except NoVideo:
self.atMaxLen = True
cfg.logger.debug("End of SubBox Reached!")
raise EndOfSubBox
contenderIndex = mostRecentIndex
while True:
try:
contenderIndex, contenderChannel, contenderVideo = self._getNextChannelWithMoreUploads(contenderIndex+1)
except NoVideo:
break
if contenderVideo.unixTime > mostRecentVideo.unixTime:
mostRecentIndex = contenderIndex
mostRecentChannel = contenderChannel
mostRecentVideo = contenderVideo
self.orderedUploads.append(mostRecentChannel.popNextUploadInQueue())
def _getChannelIdsUnderTags(self, tags: set[str]):
# a channel must have every tag in tags in order to appear (tags must be a subset of the channels tags)
return [channel.channelId for channel in self.channels if tags.issubset(channel.tags)]
def _numUploads(self, channelIdWhitelist: list[str]):
if len(channelIdWhitelist) == 0:
return len(self.orderedUploads)
num = 0
for upload in self.orderedUploads:
if upload.channelId in channelIdWhitelist:
num+=1
return num
def _extendOrderedUploads(self, desiredLen: int, channelIdWhitelist: Union[list[str], None]):
initalLength = len(self.orderedUploads)
debugMessage = \
f"SubBox Extenion Requested:\n" \
f"Length Before Extension: {initalLength}\n" \
f"Desired Length: {desiredLen}\n" \
f"Length After Extenion: {len(self.orderedUploads)}"
if channelIdWhitelist is None or len(channelIdWhitelist) == 0:
numExtend = desiredLen - initalLength
for _ in range(numExtend):
self._appendNextUpload()
else:
currentLen = self._numUploads(channelIdWhitelist)
initalLen = currentLen
numExtend = desiredLen - currentLen
while numExtend > 0:
for _ in range(numExtend):
self._appendNextUpload()
currentLen = self._numUploads(channelIdWhitelist)
numExtend = desiredLen - currentLen
debugMessage += f"\nSpecified Channels: {channelIdWhitelist}"\
f"\nLength of Tagged Uploads Before Extension: {initalLen}"\
f"\nLength of Tagged Uploads After Extension: {currentLen}"\
cfg.logger.debug(debugMessage)
def getLimitOffset(self, limit: int, offset: int, tags: Union[set[str], None] = None) -> list[ytapih.Upload]:
if tags is None or len(tags) == 0:
channelIdWhitelist = None
else:
channelIdWhitelist = self._getChannelIdsUnderTags(tags)
if len(channelIdWhitelist) == 0:
cfg.logger.debug(f"Provided Tags: {tags} Exclude All Channels")
return []
# special case for tag filtering that involves one channel
if len(channelIdWhitelist) == 1:
return self.channelDict[channelIdWhitelist[0]].uploadList.getLimitOffset(limit, offset)
desiredLen = limit + offset
try:
self._extendOrderedUploads(desiredLen, channelIdWhitelist)
except EndOfSubBox:
pass
if channelIdWhitelist is None:
uploads = self.orderedUploads
else:
uploads = list(
filter(
lambda upload: upload.channelId in channelIdWhitelist,
self.orderedUploads
)
)
cfg.logger.info(
f"Filtering Subbox by Tags:{tags}\n"
f"SubBox Len Before Filtering: {len(self.orderedUploads)}\n"
f"SubBox Len After Filtering: {len(uploads)}\n"
f"Desired Length: {limit + offset - 1}"
)
start = min(offset, len(uploads))
end = min(offset+limit, len(uploads))
if start >= end:
cfg.logger.debug(f"SubBox.getLimitOffset(limit= {limit}, offset= {offset}), Returning Empty List")
return []
return uploads[offset: offset+limit]
def getPaginated(self, pageNum: int, pageSize: int, tags: Union[set[str], None] = None) -> list[ytapih.Upload]:
limit, offset = paginationCalculator(pageNum, pageSize)
return self.getLimitOffset(limit, offset, tags)
def addChannelFromInitalPage(self, initalPage: ytapih.YtInitalPage, tags: set[str] = set()):
channel = SubBoxChannel.fromInitalPage(initalPage, tags)
cfg.logger.debug(f"Adding new Channel to SubBox\nName:{channel.channelName}\nURL: {channel.channelUrl}")
for c in self.channels:
if channel == c:
cfg.logger.debug(f"Channel ({channel.channelName}, {channel.channelId}) Already exists in SubBox:\n{[(channel.channelName, channel.channelId) for channel in self.channels]}")
cfg.logger.error(f"You're Already Subscribed to {channel.channelName}")
raise AlreadySubscribed()
channelUploadIndex = 0
orderedUploadIndex = 0
while orderedUploadIndex < len(self.orderedUploads):
c1Upload = channel.uploadList[channelUploadIndex]
orderedUpload = self.orderedUploads[orderedUploadIndex]
if c1Upload.unixTime >= orderedUpload.unixTime:
cfg.logger.debug(
f"Inserting New Channel Video Into SubBox\n"
f"New Insert Id : {c1Upload.videoId} Unix Time: {c1Upload.unixTime} Title: {c1Upload.title}\n"
f"Pushed Back Id: {orderedUpload.videoId} Unix Time: {orderedUpload.unixTime} Title: {orderedUpload.title}"
)
self.orderedUploads.insert(orderedUploadIndex, c1Upload)
channelUploadIndex+=1
orderedUploadIndex+=1
self.channels.append(
channel
)
self.channelDict[channel.channelId] = channel
self.atMaxLen = False
return channel
def addChannelFromUrl(self, url: str, tags: set[str] = set()):
url = ytapih.sanitizeChannelUrl(url, ytapih.ctrlp.channelVideoPath)
for channel in self.channelDict.values():
if url == channel.channelUrl:
cfg.logger.debug(f"url: {url} Already exists in SubBox Urls:\n{[channel.channelUrl for channel in self.channels]}")
cfg.logger.info(f"You're Already Subscribed to {url}")
raise AlreadySubscribed()
channel = self.addChannelFromInitalPage(ytapih.YtInitalPage.fromUrl(url), tags)
return channel
def popChannel(self, channelIndex: int):
channelId = self.channels[channelIndex].channelId
cfg.logger.debug(f"Remvoing Channel from SubBox, Id: {channelId}")
i = 0
while i < len(self.orderedUploads):
upload = self.orderedUploads[i]
if upload.channelId != channelId:
i+=1
continue
self.orderedUploads.pop(i)
channel = self.channels[channelIndex]
self.channelDict.pop(self.channels[channelIndex].channelId)
self.channels.pop(channelIndex)
return channel
def getChannelIndex(self, channelId: str):
for channelIndex in range(len(self.channels)):
channel = self.channels[channelIndex]
if channel.channelId == channelId:
return channelIndex
cfg.logger.debug(f"Channel Ids: {[key for key in self.channelDict.keys()]}")
raise KeyError(f"No Channel with Channel Id: {channelId}")
def getAllTags(self):
tags = set()
for channel in self.channels:
tags.update(channel.tags)
return tags
| StarcoderdataPython |
3330289 | <reponame>MaxStrange/nlp
"""
This module provides a command line interface for making graphs and charts of the data.
"""
import argparse
import betrayal
import matplotlib.pyplot as plt
import os
import sys
def plot_triplet(relationship):
"""
Plots the given triplet/relationship.
"""
fvs = [s.to_feature_vector() for s in relationship]
b_nwords = [fv[0] for fv in fvs]
b_nsents = [fv[1] for fv in fvs]
b_nrequs = [fv[2] for fv in fvs]
b_polite = [fv[3] for fv in fvs]
b_sentim = [fv[4] for fv in fvs]
v_nwords = [fv[5] for fv in fvs]
v_nsents = [fv[6] for fv in fvs]
v_nrequs = [fv[7] for fv in fvs]
v_polite = [fv[8] for fv in fvs]
v_sentim = [fv[9] for fv in fvs]
b = relationship.people[0]
v = relationship.people[1]
print(b, v) # E.g., France, Russia
def plot_feature(subplotnum, title, xlabel, line, color, plot_label):
plt.subplot(subplotnum)
plt.title(title)
plt.xlabel(xlabel)
plt.xticks([0, 1, 2], [str(s.year) + " " + s.season.title() for s in relationship])
plt.plot(line, color, label=plot_label)
plt.legend()
plt.tight_layout()
plot_feature(231, "Number of words", "Turn", b_nwords, "ro-", b)
plot_feature(231, "Number of words", "Turn", v_nwords, "b^--", v)
plot_feature(232, "Number of Sentences", "Turn", b_nsents, "ro-", b)
plot_feature(232, "Number of Sentences", "Turn", v_nsents, "b^--", v)
plot_feature(233, "Number of Requests", "Turn", b_nrequs, "ro-", b)
plot_feature(233, "Number of Requests", "Turn", v_nrequs, "b^--", v)
plot_feature(234, "Avg Message Politeness", "Turn", b_polite, "ro-", b)
plot_feature(234, "Avg Message Politeness", "Turn", v_polite, "b^--", v)
plot_feature(235, "Avg Message Sentiment", "Turn", b_sentim, "ro-", b)
plot_feature(235, "Avg Message Sentiment", "Turn", v_sentim, "b^--", v)
plt.show()
def execute(args):
"""
Execute the program based on the given arguments.
Returns whether the program did something or not.
"""
if args.triplet:
betrayals, relationship = betrayal.betrayal(args.triplet)
print("Betrayals:", betrayals)
print("Relationship:", relationship)
plot_triplet(relationship)
return True
return False
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="This is the visualization CLI for the diplomacy dataset.")
parser.add_argument("-t", "--triplet", help="Give the CLI three YAML files to make a relationship out of and have it graph the resulting realtionship with the charting suite.",
nargs=3, metavar=("path/to/file1.yml", "path/to/file2.yml", "path/to/file3.yml"))
args = parser.parse_args()
did_something = execute(args)
if not did_something:
print("----------------------------------------------")
print("ERROR: You need to supply at least one argument.")
print("----------------------------------------------")
parser.print_help()
exit()
| StarcoderdataPython |
3286834 | <gh_stars>0
# https://www.hackerrank.com/challenges/insert-a-node-into-a-sorted-doubly-linked-list/problem
import math
import os
import random
import re
import sys
class DoublyLinkedListNode:
def __init__(self, node_data):
self.data = node_data
self.next = None
self.prev = None
class DoublyLinkedList:
def __init__(self):
self.head = None
self.tail = None
def insert_node(self, node_data):
node = DoublyLinkedListNode(node_data)
if not self.head:
self.head = node
else:
self.tail.next = node
node.prev = self.tail
self.tail = node
def print_doubly_linked_list(node, sep, fptr):
while node:
fptr.write(str(node.data))
node = node.next
if node:
fptr.write(sep)
def sortedInsert(head, data):
"""
Insert a new node into a sorted doubly linked list given the head and the data to be inserted.
:params: head is the head of the doubly linked list
:param: data the content to be stored into a new node of the doubly-linked list
:returns: the head of the new list with the inserted data node
"""
new_node = DoublyLinkedListNode(data)
# Regular inserstion of the list assuming head != null and list elements > 1
if (head != None and data > head.data):
position = head
# while loop to find the position that data is to be inserted
while (data > position.data and position.next != None):
# at the end of the loop, this is the desired position
position = position.next
# putting the new node at the end of the list
if data > position.data and position.next == None:
position.next = new_node
new_node.prev = position
# putting the new node in the desired location
else:
prev_node = position.prev
prev_node.next = new_node
new_node.prev = prev_node
new_node.next = position
position.prev = new_node
# Insert the node at the beginning of the list, if the head is larger than data
if head.data > data:
new_node.next = head
head.prev = new_node
head = new_node
# If the head is null, then the new_node become the new head
if head == None:
head = new_node
return head
| StarcoderdataPython |
1750626 | <reponame>aidotse/Team-rahma.ai
# coding=utf-8
"""regexp_editor - give a user feedback on their regular expression
"""
import re
import wx
import wx.stc
STYLE_NO_MATCH = 0
STYLE_MATCH = 1
STYLE_FIRST_LABEL = 2
STYLE_ERROR = 31
UUID_REGEXP = (
"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{12}"
)
RE_FILENAME_GUESSES = [
# This is the generic naming convention for fluorescent microscopy images
"^(?P<Plate>.*?)_(?P<Well>[A-Za-z]+[0-9]+)f(?P<Site>[0-9]{2})d(?P<Dye>[0-9])\\.tif$",
# Molecular devices single site
"^(?P<ExperimentName>.*?)_(?P<Well>[A-Za-z]+[0-9]+)_w(?P<Wavelength>[0-9])_?"
+ UUID_REGEXP
+ "\\.tif$",
# Plate / well / site / channel without UUID
"^(?P<Plate>.*?)_(?P<Well>[A-Za-z]+[0-9]+)_s(?P<Site>[0-9])_w(?P<Wavelength>[0-9])\\.tif$",
# Molecular devices multi-site
"^(?P<ExperimentName>.*?)_(?P<Well>[A-Za-z]+[0-9]+)_s(?P<Site>[0-9])_w(?P<Wavelength>[0-9])"
+ UUID_REGEXP
+ "\\.tif$",
# Molecular devices multi-site, single wavelength
"^(?P<ExperimentName>.*)_(?P<Well>[A-Za-z][0-9]{2})_s(?P<Site>[0-9])" + UUID_REGEXP,
# Plate / well / [UUID]
"^(?P<Plate>.*?)_(?P<Well>[A-Za-z]+[0-9]+)_\\[" + UUID_REGEXP + "\\]\\.tif$",
# Cellomics
"^(?P<ExperimentName>.*)_(?P<Well>[A-Za-z][0-9]{1,2})f(?P<Site>[0-9]{1,2})d(?P<Wavelength>[0-9])",
# BD Pathway
"^(?P<Wavelength>.*) - n(?P<StackSlice>[0-9]{6})",
# GE InCell Analyzer
r"^(?P<Row>[A-H]*) - (?P<Column>[0-9]*)\(fld (?P<Site>[0-9]*) wv (?P<Wavelength>.*) - (?P<Filter>.*)\)",
# Phenix
r"^r(?P<WellRow>\d{2})c(?P<WellColumn>\d{2})f(?P<Site>\d{2})p\d{2}-ch(?P<ChannelNumber>\d)",
# GE InCell Analyzer 7.2
r"^(?P<Row>[A-P])_(?P<Column>[0-9]*)_f(?P<Site>[0-9]*)_c(?P<Channel>[0-9]*)_x(?P<Wavelength>.*)_m("
r"?P<Filter>.*)_z(?P<Slice>[0-9]*)_t(?P<Timepoint>[0-9]*)\.tif",
# Please add more guesses below
]
RE_FOLDER_GUESSES = [
# BD Pathway
r".*[\\/](?P<Plate>[^\\/]+)[\\/](?P<Well>[A-Za-z][0-9]{2})",
# Molecular devices
r".*[\\/](?P<Date>\d{4}-\d{1,2}-\d{1,2})[\\/](?P<PlateID>.*)$"
# Please add more guesses below
]
def edit_regexp(parent, regexp, test_text, guesses=None):
if guesses is None:
guesses = RE_FILENAME_GUESSES
frame = RegexpDialog(parent, style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
frame.SetMinSize((300, 200))
frame.SetSize((600, 200))
frame.value = regexp
frame.test_text = test_text
frame.guesses = guesses
if frame.ShowModal():
return frame.value
return None
class RegexpDialog(wx.Dialog):
def __init__(self, *args, **varargs):
varargs["title"] = "Regular expression editor"
super(RegexpDialog, self).__init__(*args, **varargs)
self.__value = "Not initialized"
self.__test_text = "Not initialized"
self.__guesses = RE_FILENAME_GUESSES
font = wx.Font(
10, wx.FONTFAMILY_MODERN, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL
)
self.font = font
self.error_font = font
sizer = wx.BoxSizer(wx.VERTICAL)
hsizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(hsizer, 0, wx.GROW | wx.ALL, 5)
hsizer.Add(wx.StaticText(self, label="Regex:"), 0, wx.ALIGN_CENTER | wx.ALL, 5)
self.regexp_display = wx.stc.StyledTextCtrl(self, -1, style=wx.BORDER_SIMPLE)
self.regexp_display.SetBufferedDraw(True)
o = self.regexp_display.GetFullTextExtent("".join(["M"] * 50))
w, h = self.regexp_display.ClientToWindowSize(wx.Size(o[1], o[2]))
self.regexp_display.SetMinSize(wx.Size(w, h))
self.regexp_display.Text = self.value
self.regexp_display.SetLexer(wx.stc.STC_LEX_CONTAINER)
for key in range(31):
self.regexp_display.StyleSetFont(key, self.font)
self.regexp_display.StyleSetForeground(TOK_ORDINARY, wx.Colour(0, 0, 0, 255))
self.regexp_display.StyleSetForeground(TOK_ESCAPE, wx.Colour(0, 64, 64, 255))
self.regexp_display.StyleSetForeground(TOK_GROUP, wx.Colour(0, 0, 255, 255))
self.regexp_display.StyleSetForeground(TOK_REPEAT, wx.Colour(0, 128, 0, 255))
self.regexp_display.StyleSetForeground(
TOK_BRACKET_EXP, wx.Colour(64, 64, 64, 255)
)
self.regexp_display.StyleSetForeground(TOK_SPECIAL, wx.Colour(128, 64, 0, 255))
color_db = self.get_color_db()
for i in range(1, 16):
self.regexp_display.StyleSetForeground(
TOK_DEFINITION - 1 + i, color_db[i % len(color_db)]
)
self.regexp_display.StyleSetForeground(
STYLE_ERROR, wx.Colour(255, 64, 128, 255)
)
self.regexp_display.StyleSetFont(34, self.font)
self.regexp_display.StyleSetForeground(34, wx.Colour(0, 0, 255, 255))
self.regexp_display.StyleSetUnderline(34, True)
self.regexp_display.StyleSetFont(35, self.font)
self.regexp_display.StyleSetForeground(35, wx.Colour(255, 0, 0, 255))
self.regexp_display.SetUseVerticalScrollBar(0)
self.regexp_display.SetUseHorizontalScrollBar(0)
self.regexp_display.SetMarginWidth(wx.stc.STC_MARGIN_NUMBER, 0)
self.regexp_display.SetMarginWidth(wx.stc.STC_MARGIN_SYMBOL, 0)
hsizer.Add(self.regexp_display, 1, wx.EXPAND | wx.ALL, 5)
hsizer = wx.BoxSizer(wx.HORIZONTAL)
hsizer.Add(
wx.StaticText(self, label="Test text:"), 0, wx.ALIGN_CENTER | wx.ALL, 5
)
self.test_text_ctl = wx.TextCtrl(self, value=self.__test_text)
self.test_text_ctl.Font = self.font
hsizer.Add(self.test_text_ctl, 1, wx.ALIGN_CENTER | wx.ALL, 5)
sizer.Add(hsizer, 0, wx.GROW | wx.ALL, 5)
style = wx.NO_BORDER
self.test_display = wx.stc.StyledTextCtrl(self, -1, style=style)
self.test_display.SetLexer(wx.stc.STC_LEX_CONTAINER)
self.test_display.StyleClearAll()
self.test_display.StyleSetFont(STYLE_NO_MATCH, self.font)
self.test_display.StyleSetForeground(
STYLE_NO_MATCH, wx.Colour(128, 128, 128, 255)
)
color_db = self.get_color_db()
for i in range(16):
self.test_display.StyleSetFont(STYLE_FIRST_LABEL - 1 + i, self.font)
self.test_display.StyleSetForeground(
STYLE_FIRST_LABEL - 1 + i, color_db[i % len(color_db)]
)
self.test_display.StyleSetFont(STYLE_ERROR, self.error_font)
self.test_display.StyleSetForeground(STYLE_ERROR, wx.Colour(255, 0, 0, 255))
self.test_display.Text = self.__test_text
self.test_display.SetReadOnly(True)
self.test_display.SetUseVerticalScrollBar(0)
self.test_display.SetUseHorizontalScrollBar(0)
self.test_display.SetMarginWidth(wx.stc.STC_MARGIN_NUMBER, 0)
self.test_display.SetMarginWidth(wx.stc.STC_MARGIN_SYMBOL, 0)
text_extent = self.test_display.GetFullTextExtent(self.__test_text)
self.test_display.SetSizeHints(100, text_extent[1] + 3, maxH=text_extent[1] + 3)
self.test_display.Enable(False)
sizer.Add(self.test_display, 0, wx.EXPAND | wx.ALL, 5)
line = wx.StaticLine(self, -1, size=(20, -1), style=wx.LI_HORIZONTAL)
sizer.Add(line, 0, wx.GROW | wx.RIGHT | wx.LEFT, 5)
hsizer = wx.StdDialogButtonSizer()
guess_button = wx.Button(self, label="Guess")
hsizer.Add(guess_button, 0)
ok_button = wx.Button(self, label="Submit")
ok_button.SetDefault()
hsizer.Add(ok_button, 0, wx.LEFT, 5)
cancel_button = wx.Button(self, label="Cancel")
hsizer.Add(cancel_button, 0, wx.LEFT, 5)
hsizer.Realize()
sizer.Add(hsizer, 0, wx.ALIGN_RIGHT | wx.ALL, 5)
self.Bind(wx.EVT_BUTTON, self.on_guess, guess_button)
self.Bind(wx.EVT_BUTTON, self.on_ok_button, ok_button)
self.Bind(wx.EVT_BUTTON, self.on_cancel_button, cancel_button)
self.Bind(wx.EVT_TEXT, self.on_test_text_text_change, self.test_text_ctl)
self.Bind(
wx.stc.EVT_STC_CHANGE, self.on_editor_text_change, self.regexp_display
)
self.Bind(wx.stc.EVT_STC_STYLENEEDED, self.on_style_needed, self.regexp_display)
self.regexp_display.Bind(wx.EVT_KEY_DOWN, self.on_regexp_key)
self.SetSizer(sizer)
self.Fit()
@staticmethod
def on_regexp_key(event):
#
# On Mac, very bad things (infinite recursion through OnPaint
# followed by segfault) happen if you type carriage return
#
if event.GetKeyCode() != wx.stc.STC_KEY_RETURN:
event.Skip()
@staticmethod
def get_color_db():
color_db = [
"BLACK",
"RED",
"GREEN",
"BLUE",
"CYAN",
"MAGENTA",
"SIENNA",
"PURPLE",
]
color_db = [wx.TheColourDatabase.FindColour(x) for x in color_db]
return color_db
def on_guess(self, event):
sample = self.test_text_ctl.GetValue()
for guess in self.guesses:
m = re.match(guess, sample)
if m is not None:
self.regexp_display.Text = guess
break
else:
wx.MessageBox(
"None of the standard regular expressions matches the test text.",
caption="No matching guesses",
style=wx.OK | wx.CENTRE | wx.ICON_INFORMATION,
parent=self,
)
def on_ok_button(self, event):
self.EndModal(1)
def on_cancel_button(self, event):
self.__value = None
self.EndModal(0)
def on_editor_text_change(self, event):
self.__value = self.regexp_display.Text
self.refresh_text()
def on_style_needed(self, event):
self.refresh_regexp()
def on_test_text_text_change(self, event):
self.__test_text = self.test_text_ctl.GetValue()
self.refresh_text()
def refresh_regexp(self):
state = RegexpState()
regexp_text = self.__value
self.regexp_display.StartStyling(0)
self.regexp_display.SetStyling(len(regexp_text), STYLE_ERROR)
try:
parse(regexp_text, state)
except:
pass
for i in range(state.position):
self.regexp_display.StartStyling(i)
self.regexp_display.SetStyling(1, state.token_labels[i])
pos = self.regexp_display.CurrentPos
if state.open_expression_start is not None:
self.regexp_display.BraceBadLight(state.open_expression_start)
elif (
0 < pos < len(state.matching_braces)
and state.matching_braces[pos - 1] is not None
):
self.regexp_display.BraceHighlight(state.matching_braces[pos - 1], pos - 1)
else:
self.regexp_display.BraceHighlight(
wx.stc.STC_INVALID_POSITION, wx.stc.STC_INVALID_POSITION
)
def refresh_text(self):
self.test_display.SetReadOnly(False)
self.test_display.Text = self.__test_text
try:
parse(self.__value, RegexpState())
except ValueError as e:
self.test_display.Text = e.args[0]
self.test_display.StartStyling(0)
self.test_display.SetStyling(len(self.test_display.Text), STYLE_ERROR)
return
try:
match = re.search(self.__value, self.__test_text)
if match:
for i in range(len(match.groups()) + 1):
start = match.start(i)
end = match.end(i)
self.test_display.StartStyling(start)
self.test_display.SetStyling(end - start, i + 1)
else:
self.test_display.Text = "Regular expression does not match"
self.test_display.StartStyling(0)
self.test_display.SetStyling(len(self.test_display.Text), STYLE_ERROR)
except:
self.test_display.Text = "Regular expression is not valid"
self.test_display.StartStyling(0)
self.test_display.SetStyling(len(self.test_display.GetText()), STYLE_ERROR)
self.test_display.SetReadOnly(True)
def get_value(self):
return self.__value
def set_value(self, value):
self.__value = value
self.regexp_display.SetText(value)
self.refresh_regexp()
self.refresh_text()
value = property(get_value, set_value)
def get_test_text(self):
return self.__test_text
def set_test_text(self, test_text):
self.__test_text = test_text
self.test_text_ctl.SetValue(test_text)
self.test_display.SetText(test_text)
self.refresh_text()
test_text = property(get_test_text, set_test_text)
def get_guesses(self):
"""The guess regexps used when the user presses the "guess" button"""
return self.__guesses
def set_guesses(self, value):
self.__guesses = value
guesses = property(get_guesses, set_guesses)
####################
#
# The code below parses regexps, assigning categories to tokens
#
####################
TOK_ORDINARY = 0
TOK_ESCAPE = 1
TOK_GROUP = 2
TOK_BRACKET_EXP = 3
TOK_REPEAT = 4
TOK_SPECIAL = 5
TOK_DEFINITION = 6
HARDCODE_ESCAPES = {
r"\\",
r"\a",
r"\b",
r"\d",
r"\f",
r"\n",
r"\r",
r"\s",
r"\t",
r"\v",
r"\w",
r"\A",
r"\B",
r"\D",
r"\S",
r"\W",
r"\Z",
}
OCTAL_DIGITS = set("01234567")
DECIMAL_DIGITS = set("0123456789")
HEXIDECIMAL_DIGITS = set("0123456789ABCDEFabcdef")
REPEAT_STARTS = set("{*+?")
OTHER_SPECIAL_CHARACTERS = set(".|")
IGNORABLE_GROUPS = (r"\(\?[iLmsux]+\)", r"\(\?#.*\)")
class RegexpState(object):
def __init__(self):
self.__group_count = 0
self.__group_names = []
self.__group_depth = 0
self.__group_starts = []
self.__in_brackets = False
self.__bracket_start = None
self.__any_tokens = False
self._is_non_grouping = False
self.position = 0
self.token_labels = []
self.matching_braces = []
def mark_tokens(self, length, label):
self.token_labels += [label] * length
self.matching_braces += [None] * length
def open_group(self, length, group_name=None, is_non_grouping=False):
"""Open a grouping expression"""
self.__group_depth += 1
self.__group_starts.append(self.position)
self.__any_tokens = True
self.__group_name = group_name
self.__is_non_grouping = is_non_grouping
self.__any_tokens = False
self.mark_tokens(length, TOK_GROUP)
self.position += length
def close_group(self):
"""Close a grouping expression returning the matching position"""
if self.__group_depth == 0:
raise ValueError("Unmatched closing parentheses")
if self.__group_name is not None:
self.__group_names.append(self.__group_name)
self.__group_name = None
self.__group_depth -= 1
if self.__is_non_grouping:
self.__group_count += 1
matching_brace = self.__group_starts.pop()
self.mark_tokens(1, TOK_GROUP)
self.matching_braces[self.position] = matching_brace
self.position += 1
self.__any_tokens = True
return matching_brace
@property
def group_count(self):
return self.__group_count
def get_in_brackets(self):
"""True if the state is within [] brackets"""
return self.__in_brackets
in_brackets = property(get_in_brackets)
def open_brackets(self):
self.__in_brackets = True
self.__bracket_start = self.position
self.mark_tokens(1, TOK_BRACKET_EXP)
self.position += 1
self.__any_tokens = True
def close_brackets(self):
if not self.in_brackets:
raise ValueError("Unmatched closing brackets")
self.__in_brackets = False
self.__any_tokens = True
self.mark_tokens(1, TOK_BRACKET_EXP)
self.matching_braces[self.position] = self.__bracket_start
self.position += 1
return self.__bracket_start
def parsed_token(self, length=1, label=TOK_ORDINARY):
self.__any_tokens = True
self.mark_tokens(length, label)
self.position += length
def parsed_special(self, length=1, label=TOK_SPECIAL):
"""Parse a token that's not repeatable"""
self.__any_tokens = False
self.mark_tokens(length, label)
self.position += length
def parsed_repeat(self, length):
self.__any_tokens = False
self.mark_tokens(length, TOK_REPEAT)
self.position += length
def is_group_name(self, x):
return x in self.__group_names
def group_name_index(self, x):
if x == self.__group_name:
return len(self.__group_names)
return self.__group_names.index(x)
@property
def open_expression_start(self):
"""Return the start of the innermost open expression or None"""
if self.__in_brackets:
return self.__bracket_start
elif self.__group_depth:
return self.__group_starts[-1]
@property
def any_tokens(self):
return self.__any_tokens
def looking_at_escape(s, state):
"""Return # of characters in an escape
s - string to look at
state - the current search state
returns either None or the # of characters in the escape
"""
if s[0] != "\\":
return
if len(s) < 2:
raise ValueError("Unterminated escape sequence")
if s[:2] in HARDCODE_ESCAPES:
return 2
if state.in_brackets:
# within brackets, only octal supported
if s[1] in OCTAL_DIGITS:
for i in range(2, min(4, len(s))):
if s[i] != OCTAL_DIGITS:
return i
if s[1] in DECIMAL_DIGITS:
raise ValueError(
"Numeric escapes within brackets must be octal values: e.g., [\\21] for ^Q"
)
elif s[1] == 0:
for i in range(2, min(4, len(s))):
if s[i] != OCTAL_DIGITS:
return i
elif s[1] in DECIMAL_DIGITS:
# A group number
if len(s) > 2 and s[2] in DECIMAL_DIGITS:
group_number = int(s[1:3])
length = 2
else:
group_number = int(s[1])
length = 1
if group_number > state.group_count:
raise ValueError("Only %d groups at this point" % state.group_count)
return length
if s[1] == "x":
if s[2] in HEXIDECIMAL_DIGITS and s[3] in HEXIDECIMAL_DIGITS:
return 4
raise ValueError("Hexidecimal escapes are two digits long: eg. \\x1F")
# The escape is needless, but harmless
return 2
def looking_at_repeat(s, state):
if s[0] not in REPEAT_STARTS:
return None
if state.in_brackets:
return None
if not state.any_tokens:
raise ValueError("Invalid repeat placement: there is nothing to repeat")
if s[0] == "{":
match = re.match("{([0-9]+)(,([0-9]+))?\\}", s)
if not match:
raise ValueError("Incomplete or badly formatted repeat expression")
if match.group(3) is not None:
if int(match.group(1)) > int(match.group(3)):
raise ValueError(
"Minimum # of matches in %s is greater than maximum number"
% match.group()
)
return len(match.group())
if len(s) > 1 and s[1] == "?":
return 2
return 1
def handle_open_group(s, state):
if s[0] == "(":
if len(s) > 2 and s[1] == "?":
if s[2] in ("=", "!", ":"):
# a look-ahead expression or parentheses without grouping
state.open_group(3, is_non_grouping=True)
return 3
elif len(s) > 3 and s[1:3] == "<=":
# A look-ahead expression
state.open_group(4, is_non_grouping=True)
return 4
elif s[2] in set("iLmsux"):
# Setting switches
match = re.match(r"\(\?[iLmsux]+\)", s)
if not match:
raise ValueError("Incomplete or badly formatted switch expression")
state.parsed_special(len(match.group()))
return len(match.group())
elif s[2] == "#":
# comment
match = re.match(r"\(\?#.*\)", s)
if not match:
raise ValueError("Incomplete or badly formatted comment")
state.parsed_special(len(match.group()))
return len(match.group())
elif s[2] == "(":
# (?(name/id)) construct
match = re.match(r"\(\?\(([^)]+)\)", s)
if not match:
raise ValueError("Incomplete or badly formatted conditional match")
name_or_id = match.group(1)
if name_or_id.isdigit():
if int(name_or_id) > state.group_count:
raise ValueError(
"Not enough groups before conditional match: asked for %d, but only %d available"
% (int(name_or_id), state.group_count)
)
else:
if not state.is_group_name(name_or_id):
raise ValueError(
'Unavailable group name, "%s", in conditional match'
% name_or_id
)
state.open_group(len(match.group()), is_non_grouping=True)
elif s[2] == "P" and len(s) > 3:
if s[3] == "=":
# (?P=FOO) matches the previous group expression, FOO
match = re.match(r"\(\?P=([^)]+)\)", s)
if not match:
raise ValueError(
"Incomplete or badly formatted named group reference"
)
else:
state.parsed_token(len(match.group()), TOK_GROUP)
return len(match.group())
elif s[3] == "<":
# Named group definition: (?P<FOO>...)
match = re.match(r"\(\?P<([^>]+)>", s)
if not match:
raise ValueError(
"Incomplete or badly formattted named group definition"
)
elif state.is_group_name(match.group(1)):
raise ValueError("Doubly-defined group: %s" % match.group(1))
else:
group_name = match.group(1)
state.open_group(
len(match.group()),
group_name=group_name,
is_non_grouping=True,
)
state.token_labels[-len(group_name) - 1 : -1] = [
TOK_DEFINITION + state.group_name_index(group_name)
] * len(group_name)
return len(match.group())
else:
raise ValueError("Incomplete or badly formatted (?P expression")
else:
raise ValueError("Incomplete or badly formatted (? expression")
else:
state.open_group(1)
return 1
def parse(s, state):
while state.position < len(s):
length = looking_at_escape(s[state.position :], state)
if length:
state.parsed_token(length, TOK_ESCAPE)
continue
if state.in_brackets:
if s[state.position] != "]":
state.parsed_token(1, TOK_BRACKET_EXP)
else:
state.close_brackets()
else:
length = looking_at_repeat(s[state.position :], state)
if length:
state.parsed_repeat(length)
continue
if s[state.position] == "[":
state.open_brackets()
continue
if s[state.position] == "^":
if state.position:
raise ValueError(
"^ can only appear at the start of a regular expression"
)
else:
state.parsed_special()
continue
if s[state.position] == "$":
if state.position < len(s) - 1:
raise ValueError(
"$ can only appear at the end of a regular expression"
)
else:
state.parsed_special()
continue
if s[state.position] == "|":
state.parsed_special()
continue
if s[state.position] == ".":
state.parsed_token(1, TOK_SPECIAL)
continue
if s[state.position] == ")":
state.close_group()
continue
if handle_open_group(s[state.position :], state):
continue
# Otherwise, assume normal character
state.parsed_token()
if state.open_expression_start is not None:
state.token_labels[state.open_expression_start] = STYLE_ERROR
raise ValueError("Incomplete expression")
return state
| StarcoderdataPython |
3309221 | <reponame>sassoftware/conary
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from testrunner import testhelp
from conary_test import rephelp
from conary.local import database
from conary.deps import deps
from conary.deps.deps import Flavor
from conary.deps.deps import parseDep
from conary import trove
from conary import versions
from conary.repository import changeset
from conary.repository import trovesource
class DepTableTest(testhelp.TestCase):
new = versions.NewVersion()
flv = Flavor()
def _fixupVersion(self, version):
if version is None:
return versions.NewVersion()
else:
assert(isinstance(version, str))
return versions.ThawVersion("/localhost@foo:bar/" + "1.0:" +
version)
def reqTrove(self, troveName, depSet, version=None):
version = self._fixupVersion(version)
trv = trove.Trove(troveName, version, Flavor(), None)
trv.setRequires(depSet)
return trv
def prvTrove(self, troveName, depSet, version=None):
version = self._fixupVersion(version)
trv = trove.Trove(troveName, version, Flavor(), None)
trv.setProvides(depSet)
return trv
def prvReqTrove(self, troveName, prvDepSet, reqDepSet, version=None):
trv = self.prvTrove(troveName, prvDepSet, version=version)
trv.setRequires(reqDepSet)
return trv
def createJobInfo(self, db, *troves):
cs = changeset.ChangeSet()
jobs = []
for trvInfo in troves:
if isinstance(trvInfo, tuple):
oldTrv, newTrv = trvInfo
else:
newTrv = trvInfo
oldTrv = None
if newTrv is not None:
trvCs = newTrv.diff(oldTrv, absolute=False)[0]
cs.newTrove(trvCs)
jobs.append((trvCs.getName(),
(trvCs.getOldVersion(), trvCs.getOldFlavor()),
(trvCs.getNewVersion(), trvCs.getNewFlavor()),
False))
else:
cs.oldTrove(oldTrv.getName(), oldTrv.getVersion(),
oldTrv.getFlavor())
jobs.append((oldTrv.getName(),
(oldTrv.getVersion(), oldTrv.getFlavor()),
(None, None),
False))
src = trovesource.ChangesetFilesTroveSource(db)
src.addChangeSet(cs)
return (db.db, jobs, src)
def init(self):
db = database.Database(None, ':memory:')
dt = db.db.depTables
# commit the schema changes
db.commit()
cu = db.db.db.cursor()
return dt, db, cu
def check(self, db, jobSet, troveSource, findOrdering=False):
checker = db.dependencyChecker(troveSource, findOrdering=findOrdering)
checker.addJobs(jobSet)
result = checker.check()
checker.done()
return (result.unsatisfiedList, result.unresolveableList,
result.getChangeSetList())
def testSimpleResolution(self):
dt, db, cu = self.init()
dep = parseDep("soname: ELF32/libc.so.6(GLIBC_2.0)")
jobInfo = self.createJobInfo(db, self.reqTrove("test", dep))
assert(self.check(*jobInfo)[0:2] == ([
( ("test", self.new, self.flv), dep )], [] ))
dt.add(cu, self.prvTrove("test-prov", dep), 1)
db.commit()
assert(self.check(*jobInfo)[0:2] == ([], []))
dt, db, cu = self.init()
jobInfo = self.createJobInfo(db, self.reqTrove("test", dep),
self.prvTrove("test-prov", dep))
assert(self.check(*jobInfo)[0:2] == ([], []))
def testFlagSets(self):
dt, db, cu = self.init()
dep = parseDep(
"soname: ELF32/libc.so.6(GLIBC_2.0 GLIBC_2.1 GLIBC_2.2)")
jobInfo = self.createJobInfo(db, self.reqTrove("test", dep))
assert(self.check(*jobInfo)[0:2] == ([
( ("test", self.new, self.flv), dep )], [] ))
# make sure that having separate troves provide each flag doesn't
# yield resolved dependencies
prv1 = parseDep("soname: ELF32/libc.so.6(GLIBC_2.0)")
prv2 = parseDep("soname: ELF32/libc.so.6(GLIBC_2.1)")
prv3 = parseDep("soname: ELF32/libc.so.6(GLIBC_2.2)")
dt.add(cu, self.prvTrove("test-prov", prv1), 1)
dt.add(cu, self.prvTrove("test-prov", prv2), 2)
dt.add(cu, self.prvTrove("test-prov", prv3), 3)
db.commit()
assert(self.check(*jobInfo)[0:2] == ([
( ("test", self.new, self.flv), dep )], [] ))
# now set up a trove that provides all of the flags; this trove
# should resolve the dependency
prv = parseDep(
"soname: ELF32/libc.so.6(GLIBC_2.0 GLIBC_2.1 GLIBC_2.2)")
dt.add(cu, self.prvTrove("test-prov", prv), 4)
db.commit()
assert(self.check(*jobInfo)[0:2] == ([], []))
def testInfoOrdering(self):
dt, db, cu = self.init()
# info-user:foo needs info-group:foo
# test needs info-user:foo
jobInfo = self.createJobInfo(db,
self.prvReqTrove("info-user:user",
parseDep("userinfo: user"),
parseDep("groupinfo: group")),
self.prvTrove("info-group:group",
parseDep("groupinfo: group")),
self.reqTrove("test",
parseDep("userinfo: user"))
)
resolvedOrder = self.check(findOrdering=True, *jobInfo)[2]
order = [ [ y[0] for y in x ] for x in resolvedOrder ]
assert(order == [['info-group:group'], ['info-user:user'], ['test']])
def testIterativeOrdering(self):
"""Jobs order correctly when the graph is built over several check()
calls.
@tests: CNY-3654
"""
dt, db, cu = self.init()
# Create a cycle between two troves, as that's easy to verify in the
# final ordering. One of them also requires a third trove, which will
# be added in a separate dep check cycle.
dep1 = parseDep('python: dep1')
dep2 = parseDep('python: dep2')
dep3 = parseDep('python: dep3')
# trv1 requires dep2 + dep3
trv1reqs = deps.DependencySet()
trv1reqs.union(dep2)
trv1reqs.union(dep3)
trv1 = self.prvReqTrove('trv1:runtime', dep1, trv1reqs)
trv2 = self.prvReqTrove('trv2:runtime', dep2, dep1)
trv3 = self.prvTrove('trv3:runtime', dep3)
# The first job has just the cycle in it, and is missing trv3 to
# complete the graph.
_, job12, src = self.createJobInfo(db, trv1, trv2)
# The second job includes the needed trove
_, job3, src3 = self.createJobInfo(db, trv3)
# Merge the second job's changeset into the first so the trove source
# is complete.
src.addChangeSets(src3.csList)
checker = db.db.dependencyChecker(src, findOrdering=True)
# First pass: missing one dep
checker.addJobs(job12)
result = checker.check()
self.assertEqual(result.unsatisfiedList,
[(('trv1:runtime', self.new, self.flv), dep3)])
# Second pass: add provider
checker.addJobs(job3)
result = checker.check()
self.assertEqual(result.unsatisfiedList, [])
# trv1 and trv2 require each other and so constitute a single job, trv3
# is not part of the cycle so it is a separate job. The original bug
# would have all three troves as separate jobs since it forgot about
# the deps from the first check.
checker.done()
self.assertEqual(result.getChangeSetList(), [job3, job12])
def testSelf(self):
dt, db, cu = self.init()
dep = parseDep("trove: test")
jobInfo = self.createJobInfo(db, self.reqTrove("test", dep))
assert(self.check(*jobInfo)[1:4] == ([], []))
def testOldNeedsNew(self):
dt, db, cu = self.init()
prv1 = parseDep("soname: ELF32/libtest.so.1(foo)")
prv2 = parseDep("soname: ELF32/libtest.so.2(foo)")
prvTrv1 = self.prvTrove("test-prov", prv1, version="1.0-1-1")
reqTrv1 = self.reqTrove("test-req", prv1, version="1.0-1-1")
troveInfo = db.addTrove(prvTrv1)
db.addTroveDone(troveInfo)
troveInfo = db.addTrove(reqTrv1)
db.addTroveDone(troveInfo)
db.commit()
prvTrv2 = self.prvTrove("test-prov", prv2, version="2.0-1-1")
reqTrv2 = self.reqTrove("test-req", prv2, version="2.0-1-1")
jobInfo = self.createJobInfo(db,
(prvTrv1, prvTrv2), (reqTrv1, reqTrv2))
order = self.check(findOrdering=True, *jobInfo)[2]
assert(len(order) == 1)
def testOutsiderNeedsOldAndNew(self):
dt, db, cu = self.init()
dep = parseDep("soname: ELF32/libtest.so.1(flag)")
reqTrv1 = self.reqTrove("test-req", dep, version="1.0-1-1")
prvTrv1 = self.prvTrove("test-prov", dep, version="1.0-1-1")
prvTrv2 = self.prvTrove("test-prov2", dep, version="1.0-1-1")
troveInfo = db.addTrove(prvTrv1)
db.addTroveDone(troveInfo)
troveInfo = db.addTrove(reqTrv1)
db.addTroveDone(troveInfo)
db.commit()
jobInfo = self.createJobInfo(db, (prvTrv1, None), (None, prvTrv2))
(broken, byErase, order) = self.check(findOrdering=True, *jobInfo)
assert(not broken and not byErase)
assert(len(order) == 1)
class DepTableTestWithHelper(rephelp.RepositoryHelper):
def testGetLocalProvides(self):
db = self.openDatabase()
baz = self.addDbComponent(db, 'baz:run', '1', '',
provides=parseDep('trove:foo:run'))
foo2 = self.addDbComponent(db, 'foo:run', '2', '',
provides=parseDep('trove:foo:run'),
requires=parseDep('trove:baz:run'))
foo1 = self.addDbComponent(db, 'foo:run', '1', '',
provides=parseDep('trove:foo:run'),
requires=parseDep('trove:baz:run'))
bar = self.addDbComponent(db, 'bar:run', '1', '',
provides=parseDep('trove:bar:run'),
requires=parseDep('trove:foo:run'))
bam = self.addDbComponent(db, 'bam:run', '1', '',
provides=parseDep('trove:bam:run'),
requires=parseDep('trove:foo:run'))
depSet = parseDep('trove:bam:run trove:bar:run')
sols = db.getTrovesWithProvides([depSet], True)
assert(sols[depSet] == [[bam.getNameVersionFlavor()],
[bar.getNameVersionFlavor()]])
def testUnknownDepTag(self):
db = self.openDatabase()
intTag = 65535
stringTag = "yet-to-be-defined"
class YetToBeDefinedDependency(deps.DependencyClass):
tag = intTag
tagName = stringTag
justOne = False
depClass = deps.Dependency
ds = deps.DependencySet()
depName = "some"
depFlag = "flag1"
ds.addDep(YetToBeDefinedDependency, deps.Dependency(depName,
[ (depFlag, deps.FLAG_SENSE_REQUIRED) ]))
bam = self.addDbComponent(db, 'bam:run', '1', '',
provides=ds,
requires=ds)
bam2 = db.getTrove(bam.name(), bam.version(), bam.flavor())
self.assertEqual(bam.requires.freeze(), bam2.requires.freeze())
self.assertEqual(bam.provides.freeze(), bam2.provides.freeze())
| StarcoderdataPython |
111746 | <reponame>dsnk24/tts
import pyttsx3
engine = pyttsx3.init()
engine.say("Welcome to my text-to-speech program. Type the text you would like to convert below")
engine.runAndWait()
while True:
text = input('===>')
engine.say(text)
engine.runAndWait() | StarcoderdataPython |
129304 | '''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from sos_trades_core.sos_processes.base_process_builder import BaseProcessBuilder
class ProcessBuilder(BaseProcessBuilder):
# ontology information
_ontology_data = {
'label': 'Climate Process Multiscenario Process',
'description': '',
'category': '',
'version': '',
}
def get_builders(self):
# scenario build map
scenario_map = {'input_name': 'scenario_list',
'input_type': 'string_list',
'input_ns': 'ns_scatter_scenario',
'output_name': 'scenario_name',
'scatter_ns': 'ns_scenario',
'gather_ns': 'ns_scatter_scenario',
'ns_to_update': ['ns_witness',
'ns_functions']}
self.ee.smaps_manager.add_build_map(
'scenario_list', scenario_map)
builder_cdf_list = self.ee.factory.get_builder_from_process(
'climateeconomics.sos_processes.iam.witness', 'climate_process')
scatter_scenario_name = 'Scenarios'
# modify namespaces defined in the child process
for ns in self.ee.ns_manager.ns_list:
self.ee.ns_manager.update_namespace_with_extra_ns(
ns, scatter_scenario_name, after_name=self.ee.study_name)
# Add new namespaces needed for the scatter multiscenario
ns_dict = {'ns_scatter_scenario': f'{self.ee.study_name}',
'ns_post_processing': f'{self.ee.study_name}.Post-processing'}
self.ee.ns_manager.add_ns_def(ns_dict)
multi_scenario = self.ee.factory.create_very_simple_multi_scenario_builder(
scatter_scenario_name, 'scenario_list', builder_cdf_list, autogather=False)
self.ee.post_processing_manager.add_post_processing_module_to_namespace('ns_post_processing',
'climateeconomics.sos_wrapping.sos_wrapping_witness.post_proc_climate_ms.post_processing_climate')
return multi_scenario
| StarcoderdataPython |
3242987 | <reponame>marbogusz/pycarwings2
#!/usr/bin/env python
import pycarwings2
import time
from configparser import ConfigParser
import logging
import sys
import pprint
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
parser = ConfigParser()
candidates = ['config.ini', 'my_config.ini']
found = parser.read(candidates)
username = parser.get('get-leaf-info', 'username')
password = parser.get('get-leaf-info', 'password')
region = parser.get('get-leaf-info', 'region')
sleepsecs = 30 # Time to wait before polling Nissan servers for update
def print_info(info):
print(" date %s" % info.answer["BatteryStatusRecords"]["OperationDateAndTime"])
print(" date %s" % info.answer["BatteryStatusRecords"]["NotificationDateAndTime"])
print(" battery_capacity2 %s" % info.answer["BatteryStatusRecords"]["BatteryStatus"]["BatteryCapacity"])
print(" battery_capacity %s" % info.battery_capacity)
print(" charging_status %s" % info.charging_status)
print(" battery_capacity %s" % info.battery_capacity)
print(" battery_remaining_amount %s" % info.battery_remaining_amount)
print(" charging_status %s" % info.charging_status)
print(" is_charging %s" % info.is_charging)
print(" is_quick_charging %s" % info.is_quick_charging)
print(" plugin_state %s" % info.plugin_state)
print(" is_connected %s" % info.is_connected)
print(" is_connected_to_quick_charger %s" % info.is_connected_to_quick_charger)
print(" time_to_full_trickle %s" % info.time_to_full_trickle)
print(" time_to_full_l2 %s" % info.time_to_full_l2)
print(" time_to_full_l2_6kw %s" % info.time_to_full_l2_6kw)
print(" battery_percent %s" % info.battery_percent)
print(" state_of_charge %s" % info.state_of_charge)
# Main program
logging.debug("login = %s, password = %s, region = %s" % (username, password, region))
print("Prepare Session")
s = pycarwings2.Session(username, password, region)
print("Login...")
leaf = s.get_leaf()
print("get_latest_battery_status from servers")
leaf_info = leaf.get_latest_battery_status()
start_date = leaf_info.answer["BatteryStatusRecords"]["OperationDateAndTime"]
print("start_date=", start_date)
print_info(leaf_info)
print("request an update from the car itself")
result_key = leaf.request_update()
update_source = ""
while True:
print("Waiting {0} seconds".format(sleepsecs))
time.sleep(sleepsecs) # sleep to give request time to process
battery_status = leaf.get_status_from_update(result_key)
# The Nissan Servers seem to have changed. Previously a battery_status would eventually be returned
# from get_status_from_update(), now this always seems to return 0.
# Checking for updates via get_latest_battery_status() seems to be the way to check if an update
# has been provided to the Nissan servers.
if battery_status is None:
print("No update, see latest_battery_status has changed")
latest_leaf_info = leaf.get_latest_battery_status()
latest_date = latest_leaf_info.answer["BatteryStatusRecords"]["OperationDateAndTime"]
print("latest_date=", latest_date)
if (latest_date != start_date):
print("Latest info has updated we'll use that instead of waiting for get_status_from_update to respond")
update_source = "get_latest_battery_status"
break
else:
update_source = "get_status_from_update"
break
if update_source == "get_status_from_update":
pprint.pprint(battery_status.answer)
elif update_source == "get_latest_battery_status":
print_info(latest_leaf_info)
exit()
# result_key = leaf.start_climate_control()
# time.sleep(60)
# start_cc_result = leaf.get_start_climate_control_result(result_key)
# result_key = leaf.stop_climate_control()
# time.sleep(60)
# stop_cc_result = leaf.get_stop_climate_control_result(result_key)
| StarcoderdataPython |
1605413 | import sys
import multiprocessing
from multiprocessing import Process
from multiprocessing.queues import Queue
import traceback
from entropy_search_terminal import main as entropy_search_main
def run_function_with_output_to_queue(func, args, queue):
stdout = sys.stdout
sys.stdout = queue
try:
func(*args)
except Exception as e:
print(traceback.format_exc())
print(e)
sys.stdout = stdout
class StdoutQueue(Queue):
def __init__(self, *args, **kwargs):
ctx = multiprocessing.get_context()
super(StdoutQueue, self).__init__(*args, **kwargs, ctx=ctx)
def write(self, msg):
self.put(msg)
def flush(self):
sys.__stdout__.flush()
class EntropySearchServer:
def __init__(self):
self.output_queue = StdoutQueue()
self.output_str = []
self.thread = None
def start_search(self, para):
self.output_str = []
para.update({
"method": "untarget-identity",
"clean_spectra": True,
})
if self.thread is not None:
self.thread.join()
self.thread = Process(target=run_function_with_output_to_queue,
args=(entropy_search_main, (para,), self.output_queue))
self.thread.start()
return
def get_output(self):
try:
while not self.output_queue.empty():
output_str = self.output_queue.get_nowait()
print(output_str)
if output_str is not None:
self.output_str.append(output_str)
except AttributeError as e:
pass
return "".join(self.output_str)
def stop(self):
self.thread.terminate()
# self.thread.join()
return self.get_output()
def is_finished(self):
self.get_output()
if self.thread is None:
return True
else:
if self.thread.pid is None:
return False
self.thread.join(0.1)
if self.thread.exitcode is None:
return False
else:
return True
| StarcoderdataPython |
1784589 | """Provides the constants needed for component."""
SUPPORT_ALARM_ARM_HOME = 1
SUPPORT_ALARM_ARM_AWAY = 2
SUPPORT_ALARM_ARM_NIGHT = 4
SUPPORT_ALARM_TRIGGER = 8
SUPPORT_ALARM_ARM_CUSTOM_BYPASS = 16
SUPPORT_ALARM_ARM_VACATION = 32
CONDITION_TRIGGERED = "is_triggered"
CONDITION_DISARMED = "is_disarmed"
CONDITION_ARMED_HOME = "is_armed_home"
CONDITION_ARMED_AWAY = "is_armed_away"
CONDITION_ARMED_NIGHT = "is_armed_night"
CONDITION_ARMED_VACATION = "is_armed_vacation"
CONDITION_ARMED_CUSTOM_BYPASS = "is_armed_custom_bypass"
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.