id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
5121940
|
<filename>formee/formTools/fill.py
import json
from formee.auth.check import check_login
from formee.auth.user_jwt import get_user_jwt
from formee.formTools.validators import NumberValidator
from gql import Client, gql
from gql.transport.aiohttp import AIOHTTPTransport
from PyInquirer import prompt
from rich import print
transport = AIOHTTPTransport(url="https://hrbt-portal.hasura.app/v1/graphql",
headers={'Authorization': 'Bearer ' + get_user_jwt()})
# Create a GraphQL client using the defined transport
client = Client(transport=transport, fetch_schema_from_transport=True)
get_form_details_query = gql("""
query GetForm($id: Int!) {
Form_by_pk(id: $id) {
User {
username
}
id
description
title
ques_confirms {
title
}
ques_numbers {
title
}
ques_options {
title
options {
title
}
}
ques_texts {
title
}
}
}
""")
answer_mutation = gql("""
mutation AnswerForm($data: json!, $filled_by: String!, $form: Int!, $form_creator: String!) {
insert_answers_one(object: {filled_by: $filled_by, data: $data, form: $form, form_creator: $form_creator}) {
filled_by
form
form_creator
id
}
}
""")
def get_form_details(id: int) -> dict:
"""
Args:
id (int): ID of the form to be fetched
Returns:
dict: Data of the form
"""
return client.execute(get_form_details_query, variable_values={"id": id})['Form_by_pk']
def fill_prompt() -> dict:
"""
Returns:
dict: Data of the form
"""
questions = [
{
'type': 'input',
'name': 'form_id',
'message': 'Enter the form id:',
'validate': lambda val: val != ''
},
]
answers = prompt(questions)
form_details = get_form_details(answers['form_id'])
if not form_details:
print(f"[red]Form with id {answers['form_id']} not found.")
fill_prompt()
print("\n")
print(f"[blue] Filling form {form_details['title']}")
print(f"[yellow] Created by {form_details['User']['username']}")
print(f"[green] Description: {form_details['description']}")
ques_answers = {}
for ques in form_details['ques_texts']:
ques_answers[ques['title']] = prompt([{
'type': 'input',
'name': ques['title'],
'message': ques['title'],
'validate': lambda val: val != '' and len(val) <= 1000
}])[ques['title']]
for ques in form_details['ques_numbers']:
ques_answers[ques['title']] = prompt([{
'type': 'input',
'name': ques['title'],
'message': ques['title'],
'validate': NumberValidator
}])[ques['title']]
for ques in form_details['ques_options']:
ques_answers[ques['title']] = prompt([{
'type': 'list',
'name': ques['title'],
'message': ques['title'],
'choices': [opt['title'] for opt in ques['options']]
}])[ques['title']]
for ques in form_details['ques_confirms']:
ques_answers[ques['title']] = prompt([{
'type': 'confirm',
'name': ques['title'],
'message': ques['title']
}])[ques['title']]
print("\n")
usr_data = check_login()
if usr_data is None:
usrname = 'Anonymous'
else:
usrname = usr_data['username']
answer_return = client.execute(answer_mutation, variable_values={"data": json.dumps(
ques_answers), "form": form_details['id'], "filled_by": usrname, "form_creator": form_details['User']['username']})['insert_answers_one']
print(f"[green] Form filled successfully.")
print(f"[green] Response id: {answer_return['id']}")
print(f"[green] Filled by: {answer_return['filled_by']}")
print(f"[green] Form creator: {answer_return['form_creator']}")
print(f"[green] Form id: {answer_return['form']}")
return answer_return
|
StarcoderdataPython
|
59613
|
#Copyright (C) 2011 by <NAME> and <NAME>
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
import collections
import xlrd
from django.core.exceptions import ObjectDoesNotExist
from mittab.apps.tab.forms import SchoolForm
from mittab.apps.tab.models import *
def import_teams(fileToImport):
try:
sh = xlrd.open_workbook(filename=None, file_contents=fileToImport.read()).sheet_by_index(0)
except:
return ['ERROR: Please upload an .xlsx file. This filetype is not compatible']
num_teams = 0
found_end = False
team_errors = []
while found_end == False:
try:
sh.cell(num_teams, 0).value
num_teams +=1
except IndexError:
found_end = True
#Verify sheet has required number of columns
try:
sh.cell(0, 8).value
except:
team_errors.append('ERROR: Insufficient Columns in Sheet. No Data Read')
return team_errors
# verify no duplicate debaters, give error messages
deb_indicies = []
for i in range(1, num_teams):
deb_indicies.append((sh.cell(i, 3).value.strip(), i)) # tuple saves debater name and row
deb_indicies.append((sh.cell(i, 7).value.strip(), i))
deb_names = [i[0] for i in deb_indicies]
names_dict = collections.Counter(deb_names)
for deb_index in deb_indicies:
if names_dict.get(deb_index[0]) > 1: # if dict says appears more than once
# inform that duplicate exists at location, report relevant information
row_num = deb_index[1]
msg = "Check for duplicate debater " + deb_index[0] + " in team " + sh.cell(row_num, 0).value + \
", on XLS file row " + str(row_num)
team_errors.append(msg)
for i in range(1, num_teams):
# Name, School, Seed [full, half, free, none], D1 name, D1 v/n?, D1 phone, D1 prov,
# D2 name, D2 v/n?, D2 phone, D2 prov
# team name, check for duplicates
duplicate = False
team_name = sh.cell(i, 0).value
if team_name == '':
team_errors.append('Skipped row ' + str(i) + ': empty Team Name')
continue
if Team.objects.filter(name=team_name).first() is not None: # inform that duplicates exist
duplicate = True
team_errors.append(team_name + ': duplicate team, overwriting data')
school_name = sh.cell(i, 1).value.strip()
try:
team_school = School.objects.get(name__iexact=school_name)
except:
#Create school through SchoolForm because for some reason they don't save otherwise
form = SchoolForm(data={'name': school_name})
if form.is_valid():
form.save()
else:
team_errors.append(team_name + ": Invalid School")
continue
team_school = School.objects.get(name__iexact=school_name)
# check seeds
team_seed, changed_seed = _create_seed(team_name=team_name, seed=sh.cell(i, 2).value.strip().lower())
if changed_seed:
team_errors.append('Changed ' + team_name + ' from "' + sh.cell(i, 2).value.strip().lower()
+ '" to unseeded. Note and confirm with school.')
deb1_name = sh.cell(i, 3).value.strip()
deb1_status = _create_status(sh.cell(i, 4).value.lower())
deb1_phone = sh.cell(i, 5).value.strip()
deb1_provider = sh.cell(i, 6).value.strip()
deb1, deb1_created = Debater.objects.get_or_create(name=deb1_name, novice_status=deb1_status, phone=deb1_phone,
provider=deb1_provider)
iron_man = True
deb2_name = sh.cell(i, 7).value.strip()
if deb2_name is not '':
iron_man = False
deb2_status = _create_status(sh.cell(i, 8).value.lower())
try:
deb2_phone = sh.cell(i, 9).value
except IndexError:
deb2_phone = ''
try:
deb2_provider = sh.cell(i,10).value
except IndexError:
deb2_provider = ''
deb2, deb2_created = Debater.objects.get_or_create(name=deb2_name, novice_status=deb2_status,
phone=deb2_phone,
provider=deb2_provider)
if not duplicate: # create new team
team = Team(name=team_name, school=team_school, seed=team_seed)
team.save()
team.debaters.add(deb1)
if not iron_man:
team.debaters.add(deb2)
else:
team_errors.append(team_name + ': Team is an iron-man, added successfully')
team.save()
else: # update the team
team = Team.objects.get(name=team_name)
team.school = team_school
team.seed = team_seed
team.debaters.clear()
team.debaters.add(deb1)
if not iron_man:
team.debaters.add(deb2)
else:
team_errors.append(team_name + ': Team is an iron-man, added successfully')
team.save()
return team_errors
def _create_status(status):
"""Translates the string for varsity-novice status into MIT-TAB's integer pseudo-enum"""
if status == 'novice' or status == 'nov' or status == 'n':
return 1
else:
return 0
def _create_seed(team_name, seed):
"""Translates the string version of the seed into the pseudo-enum. Checks for duplicate free seeds and changes it
as necessary. Also notes that change so a message can be returned.
:type team_name: str
:type seed: str
:return tuple with the integer version of the seed and whether that team's seed was changed
"""
seed_int = 0
seed_changed = False
if seed == 'full seed' or seed == 'full':
seed_int = 3
elif seed == 'half seed' or seed == 'half':
seed_int = 2
elif seed == 'free seed' or seed == 'free':
seed_int = 1
multiple_free_seeds = False
try:
school_name = Team.objects.get(name=team_name).school # get school_name
for team in Team.objects.filter(school=school_name).all(): # get teams with that name
if int(team.seed) == 1: # 1 is the free seed
if team.name != team_name: # if there is a free seed already, change and note change
multiple_free_seeds = True
except ObjectDoesNotExist:
pass
if multiple_free_seeds: # force free, note this
seed_changed = True
seed_int = 0
return seed_int, seed_changed
|
StarcoderdataPython
|
9744904
|
<reponame>pravinva/aws-serverless-data-lake-framework<filename>sdlf-foundations/lambda/check-job/src/lambda_function.py
import json
import logging
import datetime as dt
import boto3
logger = logging.getLogger()
logger.setLevel(logging.INFO)
glue = boto3.client('glue')
def datetimeconverter(o):
if isinstance(o, dt.datetime):
return o.__str__()
def lambda_handler(event, context):
"""Calls custom job waiter developed by user
Arguments:
event {dict} -- Dictionary with details on previous processing step
context {dict} -- Dictionary with details on Lambda context
Returns:
{dict} -- Dictionary with Data Quality Job details
"""
try:
logger.info('Fetching event data from previous step')
job_details = event['body']['dataQuality']
logger.info('Checking Job Status')
job_response = glue.get_job_run(
JobName=job_details['job']['jobName'],
RunId=job_details['job']['jobRunId'])
json_data = json.loads(json.dumps(
job_response, default=datetimeconverter))
job_details['job']['jobStatus'] = \
json_data.get('JobRun').get('JobRunState')
except Exception as e:
logger.error("Fatal error", exc_info=True)
raise e
return job_details
|
StarcoderdataPython
|
1916131
|
<reponame>shubhangini-tripathy/geeks_for_geeks<gh_stars>0
n = int(input())
arr = [int(x) for x in input().split()]
arr1 = [int(x) for x in input().split()]
for i in range(len(arr)):
arr[i] = arr[i]+arr1[i]
print(arr)
|
StarcoderdataPython
|
12850513
|
from django.test import TestCase, Client
from django.urls import reverse
class TestViews(TestCase):
def setUp(self):
self.client = Client()
self.register_url = reverse('register')
self.profile_url = reverse('profile')
def test_register(self):
response = self.client.get(self.register_url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'users/register.html')
|
StarcoderdataPython
|
1678106
|
import NetworkManager
from subprocess import Popen
# This can most likely be done just as easily with the python library,
# but this works. Why reinvent the wheel? As dirty as this feels,
# programming every wifi edge condition into this involves quite a bit
# of work with a large error margin. However nmcli already does this
# and when new wifi technologies come out it can just be updated
# rather than us having to keep this method constantly up to date.
#
# I'm all ears if you have a better idea...
#
# TODO: We probably need to detect 802-1x...
def addAp (path, ssid, key):
args = [path, 'device', 'wifi', 'connect', ssid, 'password', key]
nmcliProcess = Popen(args)
nmcliProcess.wait()
if nmcliProcess.returncode == 0:
return True
else:
return False
def getAps():
aps = []
for device in NetworkManager.NetworkManager.GetDevices():
if device.DeviceType != NetworkManager.NM_DEVICE_TYPE_WIFI:
continue
for ap in device.GetAccessPoints():
aps.append(ap)
return aps
# Not sure if this is ugly or elegant... probably ugly.
def isConnected():
for connection in NetworkManager.NetworkManager.ActiveConnections:
return True
return False
|
StarcoderdataPython
|
1678837
|
<filename>electricpy/visu.py
################################################################################
"""
`electricpy.visu` - Support for plotting and visualizations.
Filled with plotting functions and visualization tools for electrical engineers,
this module is designed to assist engineers visualize their designs.
"""
################################################################################
import numpy as _np
import matplotlib.pyplot as _plt
class InductionMotorCircle:
"""
Plot Induction Motor Circle Diagram.
This class is designed to plot induction motor circle diagram
and plot circle diagram to obtain various parameters of induction motor
Parameters
----------
no_load_data: dict {'V0', 'I0', 'W0'}
V0: no load test voltage
I0: no load current in rotor
W0: No load power(in Watts)
blocked_rotor_data: dict {'Vsc','Isc','Wsc'}
Vsc: blocked rotor terminal voltage
Isc: blocked rotor current in rotor
Wsc: Power consumed in blocked rotor test
output_power: int
Desired power output from the induction motor
torque_ration: float
Ration between rotor resitance to stator resistance
(i.e., R2/R1)
frequency: int
AC supply frequency
poles: int
Pole count of induction Motor
"""
def __init__(self, no_load_data, blocked_rotor_data, output_power,
torque_ration=1, frequency=50, poles=4):
"""Primary Entrypoint."""
self.no_load_data = no_load_data
self.blocked_rotor_data = blocked_rotor_data
self.f = frequency
self.operating_power = output_power
self.torque_ratio = torque_ration
self.poles = poles
self.sync_speed = 120*frequency/poles #rpm
v0 = no_load_data['V0']
i0 = no_load_data['I0']
w0 = no_load_data['W0']
self.no_load_pf = w0 / (_np.sqrt(3) * v0 * i0)
theta0 = _np.arccos(self.no_load_pf)
# get short circuit power factor and Current at slip=1
vsc = blocked_rotor_data['Vsc']
isc = blocked_rotor_data['Isc']
wsc = blocked_rotor_data['Wsc']
self.blocked_rotor_pf = wsc / (_np.sqrt(3) * vsc * isc)
theta_sc = _np.arccos(self.blocked_rotor_pf)
# because V is on Y axis
theta0 = _np.pi / 2 - theta0
theta_sc = _np.pi / 2 - theta_sc
# isc is the current at reduced voltage
# calculate current at rated voltage
isc = v0 * isc / vsc
self.no_load_line = [
[0, i0 * _np.cos(theta0)],
[0, i0 * _np.sin(theta0)]
]
self.full_load_line = [
[0, isc * _np.cos(theta_sc)],
[0, isc * _np.sin(theta_sc)]
]
# secondary current line
self.secondary_current_line = [
[i0 * _np.cos(theta0), isc * _np.cos(theta_sc)],
[i0 * _np.sin(theta0), isc * _np.sin(theta_sc)]
]
[[x1, x2], [y1, y2]] = self.secondary_current_line
self.theta = _np.arctan((y2 - y1) / (x2 - x1))
# get the induction motor circle
self.power_scale = w0 / (i0 * _np.sin(theta0))
self.center, self.radius = self.compute_circle_params()
[self.center_x, self.center_y] = self.center
self.p_max = self.radius * _np.cos(self.theta) - (
self.radius - self.radius * _np.sin(self.theta)
) * _np.tan(self.theta)
self.torque_line, self.torque_point = self.get_torque_line()
self.torque_max, self.torque_max_x, self.torque_max_y = \
self.get_torque_max()
# Take low slip point
_, [self.power_x, self.power_y] = self.get_output_power()
self.data = self.compute_efficiency()
def __call__(self):
# noqa: D102
__doc__ = self.__doc__
return self.data
def plot(self):
"""Plot the Induction Motor Circle Diagram."""
[circle_x, circle_y] = InductionMotorCircle.get_circle(
self.center,
self.radius,
semi=True
)
_plt.plot(circle_x, circle_y)
InductionMotorCircle.plot_line(self.no_load_line)
InductionMotorCircle.plot_line(self.secondary_current_line)
InductionMotorCircle.plot_line(self.full_load_line, ls='-.')
InductionMotorCircle.plot_line(self.torque_line, ls='-.')
# Full load output
_plt.plot(
[self.secondary_current_line[0][1],
self.secondary_current_line[0][1]],
[self.secondary_current_line[1][1], self.center_y])
# Diameter of the circle
_plt.plot([self.center_x - self.radius, self.center_x+self.radius],
[self.center_y, self.center_y], ls='-.')
# Max torque line
_plt.plot(
[self.center_x, self.torque_max_x],
[self.center_y, self.torque_max_y], ls='-.')
# Max Output Power line
_plt.plot(
[self.center_x, self.center_x - self.radius * _np.sin(self.theta)],
[self.center_y, self.center_y + self.radius * _np.cos(self.theta)],
ls='-.'
)
# Operating Point
_plt.plot([0, self.power_x], [0, self.power_y], c='black')
_plt.scatter(self.power_x, self.power_y, marker='X', c='red')
# mark the center of the circle
_plt.scatter(self.center_x, self.center_y, marker='*', c='blue')
_plt.scatter(
self.center_x - self.radius * _np.sin(self.theta),
self.center_y + self.radius * _np.cos(self.theta),
linewidths=3, c='black', marker='*'
)
_plt.scatter(
self.torque_max_x,
self.torque_max_y,
linewidths=3,
c='black',
marker='*'
)
_plt.title("Induction Motor Circle Diagram")
_plt.grid()
_plt.legend([
'I2 locus',
'No Load Current',
'Output Line',
'Blocked Rotor Current',
'Torque line',
'Full Load Losses',
'Diameter',
'Maximum Torque',
'Maximum Output Power',
f'Operating Power {self.operating_power}'
])
_plt.show()
def compute_efficiency(self):
"""Compute the output efficiency of induction motor."""
[[_, no_load_x], [_, no_load_y]] = self.no_load_line
no_load_losses = no_load_y * self.power_scale
compute_slope = InductionMotorCircle.compute_slope
torque_slope = compute_slope(self.torque_line)
stator_cu_loss = (self.power_x - no_load_x) * torque_slope * self.power_scale
rotor_current_slope = compute_slope(self.secondary_current_line)
total_cu_loss = (self.power_x - no_load_x) * rotor_current_slope * self.power_scale
rotor_cu_loss = total_cu_loss - stator_cu_loss
rotor_output = self.power_y * self.power_scale - (rotor_cu_loss + stator_cu_loss + no_load_losses)
slip = rotor_cu_loss / rotor_output
self.rotor_speed = self.sync_speed*(1-slip)
data = {
'no_load_loss': no_load_losses,
'rotor_copper_loss': rotor_cu_loss,
'stator_copper_loss': stator_cu_loss,
'rotor_output': rotor_output,
'slip': slip,
'stator_rmf_speed (RPM)':self.sync_speed,
'rotor_speed (RMP)':self.rotor_speed,
'power_factor': (self.power_y / _np.sqrt(self.power_x ** 2 + self.power_y ** 2)),
'efficiency': f"{rotor_output * 100 / (self.power_y * self.power_scale)} %"
}
return data
@staticmethod
def get_circle(center, radius, semi=False):
"""
Determine parametric equation of circle.
Parameters
----------
center: list[float, float] [x0, y0]
radius: float
Returns
-------
(x, y): tuple
parametric equation of circle
(x = x0 + r*cos(theta) ; y = y0 + r*sin(theta))
"""
[x0, y0] = center
if semi:
theta = _np.arange(0, _np.pi, 1e-4)
else:
theta = _np.arange(0, _np.pi * 2, 1e-4)
x = x0 + radius * _np.cos(theta)
y = y0 + radius * _np.sin(theta)
return x, y
@staticmethod
def plot_line(line, mark_start=True, mark_end=True, ls='-', marker=None):
"""Supporting function to plot a line."""
[x, y] = line
[x1, x2] = x
[y1, y2] = y
_plt.plot(x, y, ls=ls)
if mark_start:
_plt.scatter(x1, y1, marker=marker)
if mark_end:
_plt.scatter(x2, y2, marker=marker)
def compute_circle_params(self):
"""Compute the paramters of induction motor circle."""
[[x1, x2], [y1, y2]] = self.secondary_current_line
theta = _np.arctan((y2 - y1) / (x2 - x1))
length = _np.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
radius = length / (2 * _np.cos(theta))
center = [radius + x1, y1]
return center, radius
def get_torque_line(self):
"""Obatin the torque line of the induction motor."""
[[x1, x2], [y1, y2]] = self.secondary_current_line
y = (self.torque_ratio * y2 + y1) / (self.torque_ratio + 1)
torque_point = [x2, y]
torque_line = [[x1, x2], [y1, y]]
return torque_line, torque_point
def get_torque_max(self):
"""Compute max torque for given Induction Motor parameters."""
[x, y] = self.torque_line
[x1, x2] = x
[y1, y2] = y
alpha = _np.arctan((y2 - y1) / (x2 - x1))
torque_max = self.radius * _np.cos(alpha) - (
self.radius - self.radius * _np.sin(alpha)
) * _np.tan(alpha)
torque_max_x = self.center_x - self.radius * _np.sin(alpha)
torque_max_y = self.center_y + self.radius * _np.cos(alpha)
return torque_max, torque_max_x, torque_max_y
@staticmethod
def compute_slope(line):
"""
Compute slope of the line.
Parameters
----------
line: list[float, float]
Returns
-------
slope: float
"""
[[x1, x2], [y1, y2]] = line
return (y2 - y1)/(x2 - x1)
def get_output_power(self):
"""
Determine induction motor circle desired output power point.
Obtain the point on the induction motor circle diagram which
corresponds to the desired output power
"""
[[x1, x2], [y1, y2]] = self.secondary_current_line
alpha = _np.arctan((y2 - y1) / (x2 - x1))
[center_x, center_y] = self.center
[[_, no_load_x], [_, _]] = self.no_load_line
beta = _np.arcsin(
(self.operating_power / self.power_scale + (center_x - no_load_x) * _np.tan(alpha)) *
_np.cos(alpha) / self.radius)
beta_0 = alpha + beta
beta_1 = -alpha + beta
# high slip
p_x_1 = center_x + self.radius * _np.cos(beta_0)
p_y_1 = center_y + self.radius * _np.sin(beta_0)
# low slip
p_x_2 = center_x - self.radius * _np.cos(beta_1)
p_y_2 = center_y + self.radius * _np.sin(beta_1)
return [p_x_1, p_y_1], [p_x_2, p_y_2]
|
StarcoderdataPython
|
3544503
|
<reponame>heavyairship/UselessMachine<filename>useless_machine.py
import threading
class UselessMachine(object):
def __init__(self):
self.switches = ["OFF" for x in range(10)]
self.queue = []
self.cond = threading.Condition()
self.finished = False
def flipOn(self, idx):
self.cond.acquire()
if self.switches[idx] == "OFF":
self.queue.append(idx)
self.switches[idx] = "ON"
self.cond.notify()
self.cond.release()
def flipOff(self):
out = 0
self.cond.acquire()
while len(self.queue) == 0 and not self.finished:
self.cond.wait()
if self.finished:
out = -1
while len(self.queue) > 0:
idx = self.queue.pop(0)
print("OFF %s" % idx)
self.switches[idx] = "OFF"
self.cond.release()
return out
def input(self):
while True:
self.cond.acquire()
idx_raw = input("ON ")
if idx_raw == 'q':
self.finished = True
self.cond.notify()
self.cond.release()
if self.finished:
return
try:
idx = int(idx_raw)
except ValueError:
continue
if idx >= len(self.switches):
continue
self.flipOn(idx)
def output(self):
while self.flipOff() == 0:
pass
uselessMachine = UselessMachine()
inputThread = threading.Thread(target=uselessMachine.input)
outputThread = threading.Thread(target=uselessMachine.output)
inputThread.start()
outputThread.start()
inputThread.join()
outputThread.join()
|
StarcoderdataPython
|
9680345
|
import os
import re
import sys
from contextlib import contextmanager
import click
import click_pathlib
import kconfiglib
from listconfig import print_tree
@contextmanager
def quietify(verbose):
if verbose:
yield None
return
try:
with open(os.devnull, 'w') as devnull:
sys.stderr = devnull
yield None
finally:
sys.stderr = sys.__stderr__
@click.command()
@click.argument('kconfig_path', type=click_pathlib.Path(exists=True))
@click.argument('dotconfig_path', type=click_pathlib.Path(exists=True))
@click.option('--arch', default=None, help='ARCH in Linux kernel. Inferred by default.')
@click.option('--help-lines', '-l', default=2, help='Number of lines to pick from a config help.')
@click.option('--verbose', '-v', is_flag=True, help='Enable warnings during Kconfig analysis.')
def main(kconfig_path, dotconfig_path, arch, help_lines, verbose):
kconfig_path, dotconfig_path = kconfig_path.absolute(), dotconfig_path.absolute()
if not kconfig_path.is_file():
print(f'Specified Kconfig path {kconfig_path} is not a file', file=sys.stderr)
return sys.exit(1)
ksrc = kconfig_path.parent
if 'srctree' not in os.environ:
os.environ['srctree'] = str(ksrc)
if 'CC' not in os.environ:
os.environ['CC'] = 'gcc'
if arch is None:
r = re.compile('Linux/([^ ]+) ')
with open(dotconfig_path, 'r') as f:
detected = None
for l in f.readlines()[:5]:
res = r.search(l)
if res is None:
continue
detected = res.groups()[0]
if detected is None:
print('Specified .config has no expected header', file=sys.stderr)
print('Please specify --arch={arch} manually', file=sys.stderr)
sys.exit(1)
arch = detected
if 'SRCARCH' not in os.environ:
os.environ['SRCARCH'] = os.environ.get('ARCH', arch)
with quietify(verbose):
kconfig = kconfiglib.Kconfig(str(kconfig_path))
if not dotconfig_path.is_file():
print(f'Specified .config path {dotconfig_path} is not a file', file=sys.stderr)
with quietify(verbose):
kconfig.load_config(str(dotconfig_path))
print_tree(kconfig.top_node.list, help_lines)
main()
|
StarcoderdataPython
|
11267127
|
from __future__ import print_function
def early_stopping_command_parser(parser):
parser.add_argument('--es_m', dest='early_stopping_method', choices=['WorstTimesX', 'StopAfterN', 'None'],
help='Early stopping method', default='None')
parser.add_argument('--es_n', help='N parameter (for StopAfterN)', default=5, type=int)
parser.add_argument('--es_x', help='X parameter (for WorstTimesX)', default=2., type=float)
parser.add_argument('--es_min_wait', help='Mininum wait before stopping (for WorstTimesX)', default=1., type=float)
parser.add_argument('--es_LiB', help='Lower is better for validation score.', action='store_true')
def get_early_stopper(args):
if args.early_stopping_method == 'StopAfterN':
return StopAfterN(n=args.es_n, higher_is_better=(not args.es_LiB))
elif args.early_stopping_method == 'WorstTimesX':
return WaitWorstCaseTimesX(x=args.es_x, min_wait=args.es_min_wait, higher_is_better=(not args.es_LiB))
else:
return None
class EarlyStopperBase(object):
def __init__(self, higher_is_better=True):
super(EarlyStopperBase, self).__init__()
self.higher_is_better = higher_is_better
def __call__(self, epochs, val_costs):
if not self.higher_is_better:
val_costs = [-i for i in val_costs]
return self.decideStopping(epochs, val_costs)
def decideStopping(self, epochs, val_costs):
pass
class StopAfterN(EarlyStopperBase):
"""
Stops after N consecutively non improving cost
"""
def __init__(self, n=3, **kwargs):
super(StopAfterN, self).__init__(**kwargs)
self.n = n
def decideStopping(self, epochs, val_costs):
if len(val_costs) <= self.n:
return False
for i in range(self.n):
if val_costs[-1 - i] > val_costs[-2 - i]:
return False
return True
class WaitWorstCaseTimesX(EarlyStopperBase):
"""
Stops if the number of epochs since the best cost is X times larger than the maximum number of
epochs between two consecutive best.
"""
def __init__(self, x=2., min_wait=1., **kwargs):
super(WaitWorstCaseTimesX, self).__init__(**kwargs)
self.x = x
self.min_wait = min_wait
def decideStopping(self, epochs, val_costs):
# find longest wait between two best scores
last_best = val_costs[0]
last_best_epoch = epochs[0]
longest_wait = 0
for epoch, cost in zip(epochs[1:], val_costs[1:]):
if cost > last_best:
wait = epoch - last_best_epoch
last_best_epoch = epoch
last_best = cost
if wait > longest_wait:
longest_wait = wait
current_wait = epochs[-1] - last_best_epoch
if longest_wait == 0:
return current_wait > self.min_wait
print('current wait : ', round(current_wait, 3), ' longest wait : ', round(longest_wait, 3), ' ratio : ',
current_wait / longest_wait, ' / ', self.x)
return current_wait > max(self.min_wait, longest_wait * self.x)
|
StarcoderdataPython
|
8191981
|
import logging
logger = logging.getLogger(__name__)
class Bootstrap:
def __init__(self):
pass
def init(self):
logger.info("Boostrap of %s", type(self))
|
StarcoderdataPython
|
4892515
|
# =============================================================================
# ANALIZAR PUNTOS DE RECARGA ESPAÑA
# =============================================================================
"""
Proceso:
Input:
- /home/tfm/Documentos/TFM/Datasets/PuntosRecarga/puntos_carga_filt_Espana.csv
Output:
- /home/tfm/Documentos/TFM/Datasets/PuntosRecarga/GoogleMapsAPI/puntos_carga_Espana.csv
- /home/tfm/Documentos/TFM/Datasets/PuntosRecarga/puntos_carga_reduced_Espana.csv
"""
# Se cargan las librerias
import sys
import math
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import operator
from sklearn.cluster import DBSCAN
from geopy.distance import great_circle
from shapely.geometry import MultiPoint
# Desactivar warnings (por defecto 'warn')
pd.options.mode.chained_assignment = None
# 1.- Definición de funciones -------------------------------------
#------------------------------------------------------------------
def extraer_provincia(lugares, formatted_address):
"""
Definicion de la funcion extraer_provincia:
Funcion para extraer la provincia de la dirección
Parametros
----------
lugares: string
Strings que contienen los lugares de los que
extraer la provincia
formatted_address: string
String e con la direccion de los lugares
Returns
------
x: string
String que contiene la provincia del lugar
Ejemplo
-------
>>> df["province"] = df.apply(lambda a: extraer_provincia(lugares,
a['formatted_address']), axis = 1)
"""
x = ''
for j in lugares:
if j in formatted_address:
x = j
if x == 'Benahadux':
x = 'Almería'
if x in ('Santiago de Compostela', 'Esclavitud', 'Narón'):
x = 'A Coruña'
if x == 'Illes Balears':
x = 'Baleares'
if x in ('Martorell', 'Pedraforca', 'Mas Roca'):
x = 'Barcelona'
if x in ('Ambrosero', 'Vega Reinosa'):
x = 'Cantabria'
if x in ('Belmonte', 'Atalaya del Cañavate'):
x = 'Cuenca'
if x == 'Cdad. Real':
x = 'Ciudad Real'
if x in ('Gipuzkoa', 'Donostia', 'SS'):
x = 'Guipúzcoa'
if x == 'Girona':
x = 'Gerona'
if x == 'Trijueque':
x = 'Guadalajara'
if x in ('Tricio', 'El Villar de Arnedo'):
x = 'La Rioja'
if x == 'Bembibre':
x = 'León'
if x == '28946':
x = 'Madrid'
if x == 'Marbella':
x = 'Málaga'
if x in ('Lorca', 'Diseminado Salinas', '30110', '30848'):
x = 'Murcia'
if x in ('Navarre', 'Tudela', 'Leitza', 'Azagra', 'Imárcoain'):
x = 'Navarra'
if x == 'PO':
x = 'Pontevedra'
if x == '41012':
x = 'Sevilla'
if x in ('Oropesa', 'Ocaña'):
x = 'Toledo'
if x in ('Portugalete', 'BI', 'Bizkaia', 'Leioa'):
x = 'Vizcaya'
if x in ('València', '46370', 'Carcagente'):
x = 'Valencia'
if x in ('Gasteiz', 'Araba'):
x = 'Álava'
return x
def get_centermost_point(cluster):
"""
Definicion de la funcion get_centermost_point:
Funcion para que encontrar el punto más cercano al centro
del cluster
Parametros
----------
cluster: Pandas Series
Series que contiene la información del cluster
Returns
------
tuple(centermost_point): Pandas Dataframe
Tupla que contiene el id del cluster y el punto más
cercano al centro del cluster
Ejemplo
-------
>>> centermost_points = clusters.map(get_centermost_point)
"""
centroid = (MultiPoint(cluster).centroid.x, MultiPoint(cluster).centroid.y)
centermost_point = min(cluster, key=lambda point: great_circle(point, centroid).m)
return tuple(centermost_point)
def clustering_dbscan(df_filt):
"""
Definicion de la funcion clustering_dbscan:
Funcion para realizar un clustering de tipo DBSCAN
que reduce la dimensión del Dataframe de entrada
de manera geográficamente uniforme
Parametros
----------
df: Pandas Dataframe
Dataframe que contiene el dataset sobre el que se
va a hacer el clustering
Returns
------
rs: Pandas Dataframe
Dataframe que contiene el dataset con dimensión
reducida
Ejemplo
-------
>>> rs = clustering_dbscan(df_filt)
"""
coords = df_filt[["latitude", "longitude"]].values
kms_per_radian = 6371.0088
epsilon = 5 / kms_per_radian
db = DBSCAN(eps=epsilon, min_samples=2, algorithm="ball_tree", metric="haversine").fit(np.radians(coords))
cluster_labels = db.labels_
num_clusters = len(set(cluster_labels))
clusters = pd.Series([coords[cluster_labels == n] for n in range(num_clusters-1)])
print("Number of clusters: {}".format(num_clusters))
centermost_points = clusters.map(get_centermost_point)
lats, lons = zip(*centermost_points)
rep_points = pd.DataFrame({"longitude":lons, "latitude":lats})
rs = rep_points.apply(lambda row: df_filt[(df_filt["latitude"]==row["latitude"]) & (df_filt["longitude"]==row["longitude"])].iloc[0], axis=1)
return rs
# 2.- Main --------------------------------------------------------
#------------------------------------------------------------------
if __name__ == "__main__":
if len(sys.argv) != 2:
print("ERROR: This program needs at least 2 parameters: program_name program_type")
sys.exit(1)
else:
print("The number of arguments is ", len(sys.argv))
program_name = sys.argv[0]
# program type can be: "FILTER", "REDUCE", "ALL"
program_type = sys.argv[1]
print("The program run is: ",program_name, program_type)
path_filt = "/home/tfm/Documentos/TFM/Datasets/PuntosRecarga/puntos_carga_filt_Espana.csv"
path_merged = "/home/tfm/Documentos/TFM/Datasets/PuntosRecarga/GoogleMapsAPI/puntos_carga_Espana.csv"
path_reduced = "/home/tfm/Documentos/TFM/Datasets/PuntosRecarga/puntos_carga_reduced_Espana.csv"
if program_type != "REDUCE":
# 2.1.- Union puntos de recarga por CCAA --------------------------
#------------------------------------------------------------------
# Mergear los csv de las distintas CCAA en un solo csv y volcarlo a un nuevo fichero
comunidades = ["Andalucia","Aragon","Asturias",
"Cantabria","CastillaLaMancha","CastillayLeon",
"Cataluña","ComunidadValenciana","Extremadura","Galicia",
"ComunidaddeMadrid","LaRioja","Murcia","Navarra","PaisVasco"]
# Sin islas (ni Canarias ni Baleares)
frames = []
for comunidad in comunidades:
filename = "puntos_carga_" + comunidad + ".csv"
path = "/home/tfm/Documentos/TFM/Datasets/PuntosRecarga/GoogleMapsAPI/" + filename
df = pd.read_csv(path)
frames.append(df)
result = pd.concat(frames)
result.to_csv(path_merged, index=False)
# 2.2.- Filtrar el dataframe para quedarnos solamente con las coordenadas ---
#----------------------------------------------------------------------------
df_coord = result[["name","formatted_address","geometry.location.lat","geometry.location.lng"]]
df_coord.rename(columns={"geometry.location.lat": "latitude", "geometry.location.lng": "longitude"}, inplace=True)
print(df_coord.shape)
df_coord_filt = df_coord.query("longitude > -25 & latitude < 44")
print(df_coord_filt.shape)
BBox = ((df_coord_filt.longitude.min(), df_coord_filt.longitude.max(),
df_coord_filt.latitude.min(), df_coord_filt.latitude.max()))
# 2.3.- Extraer provincia -----------------------------------------
#------------------------------------------------------------------
df = df_coord_filt.drop_duplicates()
lugares = ['A Coruña', 'Álava', 'Albacete', 'Alicante', 'Almería', 'Ambrosero',
'Araba', 'Asturias', 'Atalaya del Cañavate', 'Ávila', 'Azagra',
'Badajoz', 'Baleares', 'Barcelona', 'Bembibre', 'Belmonte',
'Benahadux', 'BI', 'Bizkaia', 'Burgos', 'Cáceres', 'Cádiz',
'Cantabria', 'Carcagente', 'Castellón', 'Ceuta', 'Ciudad Real',
'Cdad. Real', 'Córdoba', 'Cuenca', 'Diseminado Salinas',
'El Villar de Arnedo', 'Esclavitud', 'Gasteiz', 'Gerona',
'Girona', 'Granada', 'Guadalajara', 'Guipúzcoa', 'Gipuzkoa',
'Huelva', 'Huesca', 'Illes Balears', 'Imárcoain', 'Jaén',
'La Rioja', 'Las Palmas', 'León', 'Leioa', 'Leitza', 'Lleida',
'Lorca', 'Lugo', 'Madrid', 'Málaga', 'Marbella', 'Martorell',
'Mas Roca', 'Melilla', 'Murcia', 'Narón', 'Navarra', 'Navarre',
'Ocaña', 'Ourense', 'Oropesa', 'Palencia', 'Pedraforca', 'PO',
'Pontevedra', 'Portugal', 'Portugalete', 'SS', 'S.C. Tenerife',
'Salamanca', 'Santiago de Compostela', 'Segovia', 'Sevilla',
'Soria', 'Tarragona', 'Teruel', 'Toledo', 'Tricio', 'Trijueque',
'Tudela', 'Valencia', 'València', 'Valladolid', 'Vega Reinosa',
'Vizcaya', 'Zamora', 'Zaragoza', '28946', '30110', '30848', '41012',
'46370']
df["province"] = df.apply(lambda a: extraer_provincia(lugares, a['formatted_address']), axis = 1)
# Se comprueba cuantos valores quedan sin informar
list(df['province']).count('')
# Se filtran eliminando Portugal y Baleares
df = df[df['province'] != 'Portugal']
df = df[df['province'] != 'Baleares']
# 2.4.- Output ----------------------------------------------------
#------------------------------------------------------------------
df.to_csv(path_filt, sep = ";", index = False)
if program_type != "FILTER":
# Importar csv fitrado a df_filt
df_filt = pd.read_csv(path_filt, sep=";")
print(df_filt.shape)
print(df_filt.province.unique())
dict_CCAA = {}
for provincia in df_filt.province.unique():
dict_CCAA[provincia] = df_filt[df_filt["province"] == provincia].shape
sort_CCAA = sorted(dict_CCAA.items(), key=operator.itemgetter(1), reverse=True)
print(sort_CCAA)
# 2.6.- Filtrar por CCAA-------------------------------------------
#------------------------------------------------------------------
"""
Se van a filtrar los datos con un algoritmo de Clustering DBSCAN
que reduce la dimensión de manera geográficamente uniforme
"""
frames = []
provincias_muchos_puntos = ["Madrid","Valencia","Navarra","Murcia","Asturias","Cantabria","Barcelona"]
for provincia in df_filt.province.unique():
df_CCAA = df_filt[df_filt["province"] == provincia]
print(provincia, df_CCAA.shape)
if provincia in provincias_muchos_puntos:
# Clustering to reduce spatial dataset size with DBSCAN
# Source: https://geoffboeing.com/2014/08/clustering-to-reduce-spatial-data-set-size/
print("Clustering with DBSCAN")
rs = clustering_dbscan(df_CCAA)
fig, ax = plt.subplots(figsize=[10, 6])
rs_scatter = ax.scatter(rs["longitude"], rs["latitude"], c="#99cc99", edgecolor="None", alpha=0.7, s=120)
df_scatter = ax.scatter(df_CCAA["longitude"], df_CCAA["latitude"], c="k", alpha=0.9, s=3)
ax.set_title("Full data set vs DBSCAN reduced set")
ax.set_xlabel("Longitude")
ax.set_ylabel("Latitude")
ax.legend([df_scatter, rs_scatter], ["Full set", "Reduced set"], loc="upper right")
plt.show()
else:
rs = df_CCAA
frames.append(rs)
result = pd.concat(frames)
# Export reduced dataframe to csv
result.to_csv(path_reduced, index=False)
print("Exported reduced dataframe with ",result.shape," to ",path_reduced)
print("End of script")
|
StarcoderdataPython
|
8151314
|
<filename>src/pfmsoft/util/file/csv.py
"""Utilities for handling csv files
Todo:
* Write some tests
Created on Nov 27, 2017
@author: croaker
"""
# TODO handle not enough fields, too many fields.
import csv
import logging
from collections import namedtuple
from pathlib import Path
from typing import (
Any,
Callable,
Dict,
Generator,
Iterable,
NamedTuple,
Optional,
Sequence,
TextIO,
)
#### setting up logger ####
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def write_list_of_dicts_to_csv(
data: Sequence[Dict[str, Any]],
file_path: Path,
mode: str = "w",
parents: bool = True,
exist_ok: bool = True,
field_names: Optional[Sequence[str]] = None,
) -> int:
"""
Save a list of dicts to csv. Makes parent directories if they don't exist.
:param data: Data to save.
:param file_path: Path to saved file. Existing files will be overwritten.
:param mode: File mode to use. As used in `open`. Limited to 'w' or 'x'. Defaults to 'w'.
:param parents: Make parent directories if they don't exist. As used by `Path.mkdir`, by default True
:param exist_ok: Suppress exception if parent directory exists as directory. As used by `Path.mkdir`, by default True
:param field_names: Optionally reorder fields, by default None
:return: records writen.
:raises Exception: Any exception that can be raised from Path.mkdir or Path.open
"""
# TODO rewrite to handle list of objects, see collection.sort.index_objects
# this would need a list of fields, so make helpers list of dicts etc?
try:
if mode not in ["w", "x"]:
raise ValueError(f"Unsupported file mode '{mode}'.")
if parents:
file_path.parent.mkdir(parents=parents, exist_ok=exist_ok)
if field_names is None:
field_names = list(data[0].keys())
with file_path.open(mode, encoding="utf8", newline="") as file_out:
writer = csv.DictWriter(file_out, fieldnames=field_names)
writer.writeheader()
total_count = 0
for count, item in enumerate(data):
writer.writerow(item)
total_count = count
return total_count + 1
except Exception as error:
logger.exception("Error trying to save data to %s", file_path, exc_info=True)
raise error
def write_named_tuple_to_csv(
data: Sequence[NamedTuple],
file_path: Path,
mode: str = "w",
parents: bool = True,
exist_ok: bool = True,
) -> int:
try:
if mode not in ["w", "x"]:
raise ValueError(f"Unsupported file mode '{mode}'.")
if parents:
file_path.parent.mkdir(parents=parents, exist_ok=exist_ok)
with file_path.open(mode, encoding="utf8", newline="") as file_out:
writer = csv.writer(file_out)
writer.writerow(data[0]._fields)
total_count = 0
for count, item in enumerate(data):
writer.writerow(item)
total_count = count
return total_count + 1
except Exception as error:
logger.exception("Error trying to save data to %s", file_path, exc_info=True)
raise error
def write_list_to_csv(
data: Iterable[Sequence],
file_path: Path,
mode: str = "w",
parents: bool = True,
exist_ok: bool = True,
has_header: bool = True,
headers: Optional[Sequence[str]] = None,
) -> int:
"""
Writes an iterator of lists to a file in csv format.
:param data: [description]
:param file_path: [description]
:param mode: File mode to use. As used in `open`. Limited to 'w' or 'x'. Defaults to 'w'.
:param parents: [description], by default True
:param exist_ok: [description], by default True
:param has_header: First row of supplied data is the header, by default True
:param headers: Headers to use if not supplied in data, by default None
:returns: Number of rows saved, not including a header
:raises ValueError: Number of items in a row does not match number of headers.
"""
# TODO how does this handle missing fields and excess fields?
try:
if mode not in ["w", "x"]:
raise ValueError(f"Unsupported file mode '{mode}'.")
if parents:
file_path.parent.mkdir(parents=parents, exist_ok=exist_ok)
with file_path.open(mode, encoding="utf8", newline="") as file_out:
writer = csv.writer(file_out)
iterable_data = iter(data)
if has_header:
header_row = next(iterable_data)
writer.writerow(header_row)
else:
if headers is not None:
header_row = headers
writer.writerow(header_row)
else:
header_row = []
total_count = 0
for count, item in enumerate(iterable_data):
if count > 0 and has_header:
if len(header_row) > 0 and len(header_row) != len(item):
raise ValueError(
f"Header has {len(header_row)} but row has {len(item)} items"
)
writer.writerow(item)
total_count = count
return total_count + 1
except Exception as error:
logger.exception("Error trying to save data to %s", file_path, exc_info=True)
raise error
# TODO this code can be easily extended to support reading and
# writing arbitrary objects through factories.
def read_csv_to_row_factory(
file_in: TextIO,
row_factory: Callable[[Sequence[str], dict], Any],
headers_in_first_row: bool = True,
context: Optional[dict] = None,
) -> Generator[Any, None, None]:
# TODO change context to header or header_override
if context is None:
context = {}
reader = csv.reader(file_in)
if headers_in_first_row:
headers = next(reader)
else:
headers = []
factory = row_factory(headers, context)
return (factory(*row) for row in reader)
def named_tuple_factory(headers, context):
header_override = context.get("header_override", None)
if header_override is not None:
headers = header_override
n_tuple = namedtuple("CsvRow", headers)
def factory(row):
if len(headers) != len(row):
raise ValueError(f"Header has {len(headers)} but row has {len(row)} items")
return n_tuple(*row)
return factory
def tuple_factory(_headers, _context):
def factory(row):
return tuple(row)
return factory
def dict_factory(headers, context):
header_override = context.get("header_override", None)
if header_override is not None:
headers = header_override
def factory(row):
if len(headers) != len(row):
raise ValueError(f"Header has {len(headers)} but row has {len(row)} items")
return dict(zip(headers, row))
return factory
# def read_csv_to_named_tuple(
# file_in, use_header_row: bool, field_names: Optional[Sequence[str]] = None
# ):
# if field_names is None:
# field_names = []
# reader = csv.reader(file_in)
# if use_header_row:
# field_names = next(reader)
# CsvRow = namedtuple("CsvRow", field_names)
# return (CsvRow(*row) for row in reader)
|
StarcoderdataPython
|
324032
|
""" listener.py """
import pybreaker
import requests
import time
import random
from retrying import retry
#@retry(stop_max_attempt_number=3, wait_exponential_multiplier=3000,wait_jitter_max=500)
#def get_time(cb):
#try:
#response = requests.get('http://localhost:3001/time', timeout=3.0)
#except (requests.exceptions.ConnectionError,requests.exceptions.Timeout):
#return "still fail, wait for half-open"
#else:
#cd.close()
#return "retry successfully, close CircuitBreaker"
def retry_request(cb,max_attempt,wait_jitter_max, new_state):
avg_interval = int(cb.reset_timeout/max_attempt)
for i in range(max_attempt):
try:
response = requests.get('http://localhost:3001/time', timeout=3.0)
except (requests.exceptions.ConnectionError,requests.exceptions.Timeout):
print "Still fail, attempt: %d, current state is %r" %(i, new_state.name)
time.sleep(avg_interval - (random.randint(0, wait_jitter_max)/1000))
else:
cb.half_open()
print "succeed at attempt: %d, change state to be %r" %(i, cb.current_state)
break
class LogListener(pybreaker.CircuitBreakerListener):
""" Listener used to log circuit breaker events. """
def __init__(self, app):
self.app = app
def state_change(self, cb, old_state, new_state):
"Called when the circuit breaker `cb` state changes."
self.app.logger.error('circuit breaker state change: %r => %r, reset timeout is %r, + %r',
old_state.name, new_state.name, cb.reset_timeout,cb.current_state)
#retry logic
if (old_state.name == "closed" and new_state.name == "open") or (old_state.name == "half-open" and new_state.name == "open"):
print "CircuitBreaker is Open, start retrying before half-open"
retry_request(cb, 3, 500, new_state)
def failure(self, cb, exc):
""" This callback function is called when a function called by the
circuit breaker `cb` fails.
"""
self.app.logger.error('failure: %r, count: %r', exc, cb.fail_counter)
|
StarcoderdataPython
|
3429050
|
import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, list_output
@click.command('get_group_users')
@click.argument("group_id", type=str)
@pass_context
@custom_exception
@list_output
def cli(ctx, group_id):
"""Get the list of users associated to the given group.
Output:
List of group users' info
"""
return ctx.gi.groups.get_group_users(group_id)
|
StarcoderdataPython
|
6464387
|
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.optim as optim
import numpy as np
from torchsummary import summary
device = torch.device('cuda:0')
def loadtraindata():
train_path = r'E:\Python\Medical big data\data\train'
trainset = torchvision.datasets.ImageFolder(train_path,transform=transforms.Compose([transforms.Resize((125, 125)),transforms.ToTensor()]))
trainloader = torch.utils.data.DataLoader(trainset, batch_size=10,shuffle=True, num_workers=2)
return trainloader
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 32, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(32, 64, 5)
self.fc1 = nn.Linear(50176, 120)
self.fc2 = nn.Linear(120, 60)
self.fc3 = nn.Linear(60, 2)
conv_output_size=None
def forward(self, x): # 前向传播
#x=x.cuda(device)
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
conv_output_size=x.size()[1]*x.size()[2]*x.size()[3]
x = x.view(-1, conv_output_size) # .view( )是一个tensor的方法,使得tensor改变size但是元素的总数是不变的。
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
x=F.softmax(x,dim=1)
return x
def loadtestdata():
test_path = r'E:\Python\Medical big data\data\valid'
testset = torchvision.datasets.ImageFolder(test_path,transform=transforms.Compose([transforms.Resize((125, 125)),transforms.ToTensor()]))
testloader = torch.utils.data.DataLoader(testset, batch_size=10,shuffle=True, num_workers=2)
return testloader
def trainandsave():
trainloader = loadtraindata()
# 神经网络结构
net = Net()
summary(net.cuda(), input_size=(3, 125, 125))
net.to(device)
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
#criterion = nn.CrossEntropyLoss()
# 训练部分
for epoch in range(20):
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
#data=data.cuda(device)
inputs, labels = data
inputs, labels = Variable(inputs), Variable(labels)
inputs=inputs.cuda(device)
labels=labels.cuda(device)
optimizer.zero_grad() # 梯度置零,因为反向传播过程中梯度会累加上一次循环的梯度
outputs = net(inputs)
print(outputs)
print(labels)
loss = F.nll_loss(outputs, labels)
#loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % 200 == 199:
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 200))
running_loss = 0.0
print('Finished Training')
# 保存神经网络
torch.save(net, 'net.pkl') # 保存整个神经网络的结构和模型参数
torch.save(net.state_dict(), 'net_params.pkl') # 只保存神经网络的模型参数
if __name__ == '__main__':
#print(torch.cuda.is_available())
trainandsave()
|
StarcoderdataPython
|
1834733
|
"""AyudaEnPython: https://www.facebook.com/groups/ayudapython
"""
from collections import Counter
from time import sleep
from typing import Callable, Dict
# pip install prototools
from prototools import progressbar
CODIGO: Dict[str, Dict[str, float]] = {
"00": {"kg": 0.25, "precio": 300},
"01": {"kg": 0.5, "precio": 500},
"10": {"kg": 1, "precio": 1200},
"11": {"kg": 2, "precio": 2400},
}
def bar(n: int) -> None:
print("Cargando datos de ventas...")
for _ in progressbar(range(n)):
sleep(0.05)
def read_data() -> Dict[str, int]:
with open("database/ventas.txt", "r") as f:
data = f.read().splitlines()
return dict(Counter(data))
def totales(data: Dict[str, float], f: Callable, key: str) -> int:
return sum(f(codigo, key) for codigo in data)
|
StarcoderdataPython
|
8071160
|
<reponame>jbrightuniverse/hungarianalg<filename>hungarianalg/alg.py
"""
Hungarian Algorithm No. 5 by <NAME>
Vancouver School of Economics, UBC
8 March 2021
Based on http://www.cse.ust.hk/~golin/COMP572/Notes/Matching.pdf and https://montoya.econ.ubc.ca/Econ514/hungarian.pdf
"""
import numpy as np
class Node:
"""A simple node for an alternating tree."""
def __init__(self, val, parent = None):
self.val = val
self.parent = parent
def hungarian(matrx):
"""Runs the Hungarian Algorithm on a given matrix and returns the optimal matching with potentials."""
# Step 1: Prep matrix, get size
matrx = np.array(matrx)
size = matrx.shape[0]
# Step 2: Generate trivial potentials
rpotentials = []
cpotentials = [0 for i in range(size)]
for i in range(len(matrx)):
row = matrx[i]
rpotentials.append(max(row))
# Step 3: Initialize alternating tree
matching = []
S = {0}
T = set()
tree_root = Node(0)
x_nodes = {0: tree_root}
# Create helper functions
def neighbours(wset):
"""Finds all firms in equality graph with workers in wset."""
result = []
for x in wset:
# get row of firms for worker x
nbs = matrx[x, :]
for y in range(len(nbs)):
# check for equality
if nbs[y] == rpotentials[x] + cpotentials[y]:
result.append([x, y])
return result
def update_potentials():
"""Find the smallest difference between treed workers and untreed firms
and use it to update potentials."""
# when using functions in functions, if modifying variables, call nonlocal
nonlocal rpotentials, cpotentials
big = np.inf
# iterate over relevant pairs
for dx in S:
for dy in set(range(size)) - T:
# find the difference and check if its smaller than any we found before
weight = matrx[dx, dy]
alpha = rpotentials[dx] + cpotentials[dy] - weight
if alpha < big:
big = alpha
# apply difference to potentials as needed
for dx in S:
rpotentials[dx] -= big
for dy in T:
cpotentials[dy] += big
# Step 4: Loop while our matching is too small
while len(matching) != size:
# Step A: Compute neighbours in equality graph
NS = neighbours(S)
if set([b[1] for b in NS]) == T:
# Step B: If all firms are in the tree, update potentials to get a new one
update_potentials()
NS = neighbours(S)
# get the untreed firm
pair = next(n for n in NS if n[1] not in T)
if pair[1] not in [m[1] for m in matching]:
# Step D: Firm is not matched so add it to matching
matching.append(pair)
# Step E: Swap the alternating path in our alternating tree attached to the worker we matched
source = x_nodes[pair[0]]
matched = 1
while source.parent != None:
above = source.parent
if matched:
# if previously matched, this should be removed from matching
matching.remove([source.val, above.val])
else:
# if previous was a remove, this is a match
matching.append([above.val, source.val])
matched = 1 - matched
source = above
# Step F: Destroy the tree, go to Step 4 to check completion, and possibly go to Step A
free = list(set(range(size)) - set([m[0] for m in matching]))
if len(free):
tree_root = Node(free[0])
x_nodes = {free[0]: tree_root}
S = {free[0]}
T = set()
else:
# Step C: Firm is matched so add it to the tree and go back to Step A
matching_x = next(m[0] for m in matching if m[1] == pair[1])
S.add(matching_x)
T.add(pair[1])
source = x_nodes[pair[0]]
y_node = Node(pair[1], source)
x_node = Node(matching_x, y_node)
x_nodes[matching_x] = x_node
revenues = [matrx[m[0], m[1]] for m in matching]
class Result:
"""A simple response object."""
def __init__(self, match, revenues, row_weights, col_weights, revenue_sum):
self.match = match
self.revenues = revenues
self.row_weights = row_weights
self.col_weights = col_weights
self.revenue_sum = revenue_sum
def __str__(self):
size = len(self.match)
maxlen = max(len(str(max(self.revenues))), len(str(min(self.revenues))))
baselist = [[" "*maxlen for i in range(size)] for j in range(size)]
for i in range(size):
entry = self.match[i]
baselist[entry[0]][entry[1]] = str(self.revenues[i]).rjust(maxlen)
formatted_list = '\n'.join([str(row) for row in baselist])
return f"Matching:\n{formatted_list}\n\nRow Potentials: {self.row_weights}\nColumn Potentials: {self.col_weights}"
return Result(matching, revenues, rpotentials, cpotentials, sum(revenues))
|
StarcoderdataPython
|
166464
|
# TODO: Only require market stats that are being used by ML models
# TODO: Allow storage/retrieval of multiple markets
""" Allows storage/retrieval for custom market data instead of automatic gathering """
from api import api, db
from api.helpers import HTTP_CODES, query_to_dict, validate_db
from api.models.market import Data as DataModel
from flask import abort
from flask_restful import Resource
from webargs import fields
from webargs.flaskparser import use_kwargs
data_kwargs = {
'id': fields.Integer(required=True),
'low': fields.Float(missing=None),
'high': fields.Float(missing=None),
'close': fields.Float(missing=None),
'volume': fields.Float(missing=None)
}
@api.resource('/api/private/market/')
class Data(Resource):
""" Market data at an instance in time """
@use_kwargs({'id': fields.Integer(missing=None)})
@validate_db(db)
def get(self, id):
if id is None:
return [query_to_dict(q) for q in DataModel.query.all()]
else:
return query_to_dict(DataModel.query.get_or_404(id))
@use_kwargs(data_kwargs)
@validate_db(db)
def post(self, id, low, high, close, volume):
try:
post_request = DataModel(id, low, high, close, volume)
db.session.add(post_request)
db.session.commit()
except: # ID already exists, use PUT
abort(HTTP_CODES.UNPROCESSABLE_ENTITY)
else:
return query_to_dict(post_request)
@use_kwargs(data_kwargs)
@validate_db(db)
def put(self, id, low, high, close, volume):
""" Loop through function args, only change what is specified
NOTE: Arg values of -1 clears since each must be >= 0 to be valid
"""
query = DataModel.query.get_or_404(id)
for arg, value in locals().items():
if arg is not 'id' and arg is not 'self' and value is not None:
if value == -1:
setattr(query, arg, None)
else:
setattr(query, arg, value)
db.session.commit()
return query_to_dict(query)
@use_kwargs({'id': fields.Integer(missing=None)})
@validate_db(db)
def delete(self, id):
try:
if id is None:
DataModel.query.delete()
db.session.commit()
else:
db.session.delete(DataModel.query.get_or_404(id))
db.session.commit()
except:
return {'status': 'failed'}
else:
return {'status': 'successful'}
|
StarcoderdataPython
|
12833058
|
# Copyright (C) 2020 Denso IT Laboratory, Inc.
# All Rights Reserved
# Denso IT Laboratory, Inc. retains sole and exclusive ownership of all
# intellectual property rights including copyrights and patents related to this
# Software.
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from skimage.segmentation._slic import _enforce_label_connectivity_cython
def conv_in_relu(in_c, out_c):
return nn.Sequential(
nn.ReflectionPad2d(1),
nn.Conv2d(in_c, out_c, 3, bias=False),
nn.InstanceNorm2d(out_c, affine=True),
nn.ReLU()
)
class CNNRIM(nn.Module):
"""
code for
T.Suzuki, ICASSP2020
Superpixel Segmentation via Convolutional Neural Networks with Regularized Information Maximization
https://arxiv.org/abs/2002.06765
Args:
in_c: int
number of input channels. (5 indicates RGB+XY)
n_spix: int
number of superpixels
n_filters: int
number of filters in convolution filters.
At i-th layer, output channels are n_filters * 2^{i+1}
n_layers: int
number of convolution layers
use_recons: bool
if True, use reconstruction loss for optimization
use_last_inorm: bool
if True, use instance normalization layer for output
"""
def __init__(self, in_c=5, n_spix=100, n_filters=32, n_layers=5, use_recons=True, use_last_inorm=True):
super().__init__()
self.n_spix = n_spix
self.use_last_inorm = use_last_inorm
self.use_recons = use_recons
out_c = n_spix
if use_recons:
out_c += 3
layers = []
for i in range(n_layers-1):
layers.append(conv_in_relu(in_c, n_filters << i))
in_c = n_filters << i
layers.append(nn.Conv2d(in_c, out_c, 1))
self.layers = nn.Sequential(*layers)
if use_last_inorm:
self.norm = nn.InstanceNorm2d(n_spix, affine=True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.InstanceNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
spix = self.layers(x)
if self.use_recons:
recons, spix = spix[:, :3], spix[:, 3:]
else:
recons = None
if self.use_last_inorm:
spix = self.norm(spix)
return spix, recons
def mutual_information(self, logits, coeff):
"""
Mutual information defined in eq. (2)
Args:
logits: torch.Tensor
A Tensor of shape (b, n, h, w)
coeff: float
corresponding to lambda in eq. (2)
"""
prob = logits.softmax(1)
pixel_wise_ent = - (prob * F.log_softmax(logits, 1)).sum(1).mean()
marginal_prob = prob.mean((2, 3))
marginal_ent = - (marginal_prob * torch.log(marginal_prob + 1e-16)).sum(1).mean()
return pixel_wise_ent - coeff * marginal_ent
def smoothness(self, logits, image):
"""
Smoothness loss defined in eq. (3)
Args:
logits: torch.Tensor
A Tensor of shape (b, n, h, w)
image; torch.Tensor
A Tensor of shape (b, c, h, w)
"""
prob = logits.softmax(1)
dp_dx = prob[..., :-1] - prob[..., 1:]
dp_dy = prob[..., :-1, :] - prob[..., 1:, :]
di_dx = image[..., :-1] - image[..., 1:]
di_dy = image[..., :-1, :] - image[..., 1:, :]
return (dp_dx.abs().sum(1) * (-di_dx.pow(2).sum(1)/8).exp()).mean() + \
(dp_dy.abs().sum(1) * (-di_dy.pow(2).sum(1)/8).exp()).mean()
def reconstruction(self, recons, image):
"""
Reconstruction loss defined in eq. (4)
Args:
recons: torch.Tensor
A Tensor of shape (b, c, h, w)
image; torch.Tensor
A Tensor of shape (b, c, h, w)
"""
return F.mse_loss(recons, image)
def __preprocess(self, image, device="cuda"):
image = torch.from_numpy(image).permute(2, 0, 1).float()[None]
h, w = image.shape[-2:]
coord = torch.stack(torch.meshgrid(torch.arange(h), torch.arange(w))).float()[None]
input = torch.cat([image, coord], 1).to(device)
input = (input - input.mean((2, 3), keepdim=True)) / input.std((2, 3), keepdim=True)
return input
def optimize(self, image, n_iter=500, lr=1e-2, lam=2, alpha=2, beta=10, device="cuda"):
"""
optimizer and generate superpixels
Args:
image: numpy.ndarray
An array of shape (h, w, c)
n_iter: int
number of iterations for SGD
lr: float
learning rate
lam: float
used in eq. (2)
alpha: float
used in eq. (1)
beta: float
used in eq. (1)
device: ["cpu", "cuda"]
Return:
spix: numpy.ndarray
An array of shape (h, w)
"""
input = self.__preprocess(image, device)
optimizer = optim.Adam(self.parameters(), lr)
for i in range(n_iter):
spix, recons = self.forward(input)
loss_mi = self.mutual_information(spix, lam)
loss_smooth = self.smoothness(spix, input)
loss = loss_mi + alpha * loss_smooth
if recons is not None:
loss = loss + beta * self.reconstruction(recons, input[:, :3])
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(f"[{i+1}/{n_iter}] loss {loss.item()}")
return self.calc_spixel(image, device)
def calc_spixel(self, image, device="cuda"):
"""
generate superpixels
Args:
image: numpy.ndarray
An array of shape (h, w, c)
device: ["cpu", "cuda"]
Return:
spix: numpy.ndarray
An array of shape (h, w)
"""
input = self.__preprocess(image, device)
spix, recons = self.forward(input)
spix = spix.argmax(1).squeeze().to("cpu").detach().numpy()
segment_size = spix.size / self.n_spix
min_size = int(0.06 * segment_size)
max_size = int(3.0 * segment_size)
spix = _enforce_label_connectivity_cython(
spix[None], min_size, max_size)[0]
return spix
if __name__ == "__main__":
import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
from skimage.segmentation import mark_boundaries
parser = argparse.ArgumentParser()
parser.add_argument("--image", default=None, type=str, help="/path/to/image")
parser.add_argument("--n_spix", default=100, type=int, help="number of superpixels")
parser.add_argument("--n_filters", default=32, type=int, help="number of convolution filters")
parser.add_argument("--n_layers", default=5, type=int, help="number of convolution layers")
parser.add_argument("--lam", default=2, type=float, help="coefficient of marginal entropy")
parser.add_argument("--alpha", default=2, type=float, help="coefficient of smoothness loss")
parser.add_argument("--beta", default=2, type=float, help="coefficient of reconstruction loss")
parser.add_argument("--lr", default=1e-2, type=float, help="learning rate")
parser.add_argument("--n_iter", default=500, type=int, help="number of iterations")
parser.add_argument("--out_dir", default="./", type=str, help="output directory")
args = parser.parse_args()
device = "cuda" if torch.cuda.is_available() else "cpu"
model = CNNRIM(5, args.n_spix, args.n_filters, args.n_layers).to(device)
if args.image is None: # load sample image from scipy
import scipy.misc
img = scipy.misc.face()
else:
img = plt.imread(args.image)
spix = model.optimize(img, args.n_iter, args.lr, args.lam, args.alpha, args.beta, device)
plt.imsave(os.path.join(args.out_dir, "boundary.png"), mark_boundaries(img, spix))
np.save("spixel", spix) # save generated superpixel as .npy file
|
StarcoderdataPython
|
5018078
|
<filename>main.py
# This is the example of main program file which imports entities,
# connects to the database, drops/creates specified tables
# and populate some data to the database
from pony.orm import * # or just import db_session, etc.
import all_entities # This command make sure that all entities are imported
from base_entities import db # Will bind this database
from db_settings import current_settings # binding params
db.bind(*current_settings['args'], **current_settings['kwargs'])
from db_utils import connect
from db_loading import populate_database
if __name__ == '__main__':
sql_debug(True)
connect(db, drop_and_create='ALL') # drop_and_create=['Topic', 'Comment'])
populate_database()
|
StarcoderdataPython
|
6431058
|
<reponame>hwakabh/codewars
from unittest import TestCase
from unittest import main
from clock import past
class TestClock(TestCase):
def test_past(self):
ptr = [
(0, 1, 1, 61000),
(1, 1, 1, 3661000),
(0, 0, 0, 0),
(1, 0, 1, 3601000),
(1, 0, 0, 3600000),
]
for hr, mn, sc, exp in ptr:
with self.subTest(hr=hr, mn=mn, sc=sc, exp=exp):
self.assertEqual(past(h=hr, m=mn, s=sc), exp)
if __name__ == "__main__":
main(verbosity=2)
|
StarcoderdataPython
|
1735887
|
<gh_stars>0
import pytest
import mock
from nose.tools import * # noqa PEP8 asserts
from website import settings
import website.search.search as search
from website.search_migration.migrate import migrate
from website.search.util import build_query
from tests.base import OsfTestCase
from tests.utils import run_celery_tasks
from osf_tests.factories import AuthUserFactory, InstitutionFactory, ProjectFactory
from osf_tests.test_elastic_search import retry_assertion
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestContributorSearch(OsfTestCase):
def setUp(self):
super(TestContributorSearch, self).setUp()
with run_celery_tasks():
self.firstname = 'jane'
self.fullname1 = self.firstname + ' 1'
self.fullname2 = self.firstname + ' 2'
self.fullname3 = self.firstname + ' 3'
self.dummy1_fullname = self.firstname + ' dummy1'
self.dummy2_fullname = self.firstname + ' dummy2'
self.dummy3_fullname = self.firstname + ' dummy3'
self.dummy4_fullname = self.firstname + ' dummy4'
self.inst1 = inst1 = InstitutionFactory()
self.inst2 = inst2 = InstitutionFactory()
def create_user(name, insts):
user = AuthUserFactory(fullname=name)
for inst in insts:
user.affiliated_institutions.add(inst)
user.save()
return user
self.user1 = create_user(self.fullname1, (self.inst1,))
self.user2 = create_user(self.fullname2, (self.inst1, self.inst2))
self.user3 = create_user(self.fullname3, ())
create_user(self.dummy1_fullname, (self.inst1,))
create_user(self.dummy2_fullname, (self.inst1,))
create_user(self.dummy3_fullname, (self.inst2,))
create_user(self.dummy4_fullname, (self.inst2,))
self.inst1_users = [u.fullname for u in inst1.osfuser_set.all()]
self.inst2_users = [u.fullname for u in inst2.osfuser_set.all()]
# dummy_project
ProjectFactory(creator=self.user1, is_public=False)
# migrate() may not update elasticsearch-data immediately.
@retry_assertion(retries=10)
def test_search_contributors_from_my_institutions(self):
def search_contribs(current_user):
return search.search_contributor(
self.firstname,
current_user=current_user
)
# one institution
contribs = search_contribs(self.user1)
assert_equal(sorted(set([u['fullname'] for u in contribs['users']])),
sorted(set(self.inst1_users)))
# two institutions
contribs = search_contribs(self.user2)
assert_equal(sorted(set([u['fullname'] for u in contribs['users']])),
sorted(set(self.inst1_users) | set(self.inst2_users)))
# independent (no institution) -> from all institutions
contribs = search_contribs(self.user3)
assert_equal(len(contribs['users']), 7)
def test_search_contributors_from_my_institutions_after_rebuild_search(self):
migrate(delete=False, remove=False,
index=None, app=self.app.app)
# after migrate (= rebuild_search)
self.test_search_contributors_from_my_institutions()
def test_search_contributors_by_guid(self):
contribs = search.search_contributor(
self.user2._id,
current_user=self.user1
)
assert_equal(set([u['fullname'] for u in contribs['users']]),
set([self.user2.fullname]))
class TestEscape(OsfTestCase):
def test_es_escape(self):
import string
from website.search.util import es_escape
# see https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-query-string-query.html#_reserved_characters
assert_equal(es_escape('+-=&|!(){}[]^"~*?:/'),
'\+\-\=\&\|\!\(\)\{\}\[\]\^\\\"\~\*\?\:\/')
assert_equal(es_escape('"'), '\\\"') # " -> \"
assert_equal(es_escape('\\'), '\\\\') # \ -> \\
assert_equal(es_escape('><'), ' ') # whitespace
assert_equal(es_escape("'"), "'") # not escaped
other_punctuation = '#$%,.;@_`'
assert_equal(es_escape(other_punctuation), other_punctuation)
assert_equal(es_escape(string.letters), string.letters)
assert_equal(es_escape(string.octdigits), string.octdigits)
assert_equal(es_escape(string.whitespace), string.whitespace)
hiragana = ''.join([unichr(i) for i in range(12353, 12436)])
assert_equal(es_escape(hiragana), hiragana)
katakana = ''.join([unichr(i) for i in range(12449, 12533)])
assert_equal(es_escape(katakana), katakana)
zenkaku_hankaku = ''.join([unichr(i) for i in range(65281, 65440)])
assert_equal(es_escape(zenkaku_hankaku), zenkaku_hankaku)
# see ./osf_tests/test_elastic_search.py
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestSearchMigrationNormalizedField(OsfTestCase):
# This feature is unsupported when ENABLE_MULTILINGUAL_SEARCH is True.
@classmethod
def tearDownClass(cls):
super(TestSearchMigrationNormalizedField, cls).tearDownClass()
search.create_index()
def setUp(self):
super(TestSearchMigrationNormalizedField, self).setUp()
self.es = search.search_engine.CLIENT
search.delete_all()
search.create_index()
with run_celery_tasks():
self.inst1 = InstitutionFactory()
self.testname = u'\u00c3\u00c3\u00c3'
self.testname_normalized = 'AAA'
self.user1 = AuthUserFactory()
self.user1.affiliated_institutions.add(self.inst1)
self.user1.fullname = self.testname
self.user1.save()
self.user2 = AuthUserFactory()
self.user2.affiliated_institutions.add(self.inst1)
self.user2.given_name = self.testname
self.user2.save()
self.user3 = AuthUserFactory()
self.user3.affiliated_institutions.add(self.inst1)
self.user3.family_name = self.testname
self.user3.save()
self.user4 = AuthUserFactory()
self.user4.affiliated_institutions.add(self.inst1)
self.user4.middle_names = self.testname
self.user4.save()
self.user5 = AuthUserFactory()
self.user5.affiliated_institutions.add(self.inst1)
self.user5.suffix = self.testname
self.user5.save()
self.users = (self.user1, self.user2, self.user3,
self.user4, self.user5)
self.project = ProjectFactory(
title=self.testname,
creator=self.user1,
is_public=True
)
self.TOTAL_USERS = len(self.users)
self.TOTAL_PROJECTS = 1
# migrate() may not update elasticsearch-data immediately.
@retry_assertion(retries=10)
def search_contrib(self, expect_num):
contribs = search.search_contributor(
self.testname_normalized,
current_user=self.user1
)
assert_equal(len(contribs['users']), expect_num)
# migrate() may not update elasticsearch-data immediately.
@retry_assertion(retries=10)
def search_project(self, expect_num):
r = search.search(build_query(self.testname_normalized),
doc_type='project',
index=None, raw=False)
assert_equal(len(r['results']), expect_num)
def test_rebuild_search_check_normalized(self):
self.search_contrib(self.TOTAL_USERS)
self.search_project(self.TOTAL_PROJECTS)
# after update_search()
for u in self.users:
u.update_search()
self.search_contrib(self.TOTAL_USERS)
self.search_project(self.TOTAL_PROJECTS)
migrate(delete=False, remove=False,
index=None, app=self.app.app)
# after migrate (= rebuild_search)
self.search_contrib(self.TOTAL_USERS)
self.search_project(self.TOTAL_PROJECTS)
def test_rebuild_search_check_not_normalized(self):
with mock.patch('website.search_migration.migrate.fill_and_normalize'):
migrate(delete=False, remove=False,
index=None, app=self.app.app)
# after migrate (= rebuild_search)
self.search_contrib(0)
self.search_project(0)
|
StarcoderdataPython
|
1869601
|
<filename>aws/translate1.py<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ライブラリのインポート
import boto3
# 翻訳したい文章(英語)
input_text = "I like robots."
# AWSを使った翻訳の準備
translate = boto3.client(service_name="translate")
# 文章を翻訳
translate_text = translate.translate_text(
Text=input_text,
SourceLanguageCode="en",
TargetLanguageCode="ja"
)["TranslatedText"].encode("UTF-8")
# 結果を表示
print("------------------------------------")
print("○ 翻訳前: {}".format(input_text))
print("------------------------------------")
print("○ 翻訳後: {}".format(translate_text))
|
StarcoderdataPython
|
11307617
|
# Definition for singly-linked list.
# 142+465=607
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
carry = 0
resultHead = c = ListNode(0)
while l1 or l2 or carry > 0:
num1 = l1.val if l1 else 0
num2 = l2.val if l2 else 0
carry = (num1 + num2 + carry)
c.next = ListNode(carry % 10)
carry = carry // 10
c = c.next
l1 = l1.next if l1 else None
l2 = l2.next if l2 else None
return resultHead.next
|
StarcoderdataPython
|
76134
|
##########################################################################
#
# Copyright (c) 2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import maya.cmds
import maya.mel
import IECore
import IECoreMaya
__dagMenuCallbacks = []
## Registers a callback to be used when creating the right click dag
# menu for procedural holders. Callbacks should have the following signature :
#
# callback( menu, proceduralHolder ).
def addDagMenuCallback( callback ) :
if not callback in __dagMenuCallbacks :
__dagMenuCallbacks.append( callback )
## Removes a callback previously added with addDagMenuCallback.
def removeDagMenuCallback( callback ) :
__dagMenuCallbacks.remove( callback )
## This is forwarded to by the ieProceduralHolderDagMenuProc function in
# ieProceduralHolder.mel
def _dagMenu( menu, proceduralHolder ) :
if maya.cmds.nodeType( proceduralHolder )!="ieProceduralHolder" :
children = maya.cmds.listRelatives( proceduralHolder, children=True, type="ieProceduralHolder", fullPath=True )
if not children :
return
else :
proceduralHolder = children[0]
maya.cmds.setParent( menu, menu=True )
maya.cmds.menuItem(
label = "Component",
radialPosition = "N",
command = IECore.curry( __componentCallback, proceduralHolder )
)
maya.cmds.menuItem(
label = "Object",
radialPosition = "W",
command = IECore.curry( __objectCallback, proceduralHolder ),
)
maya.cmds.menuItem(
label = "Print Component Names",
radialPosition = "NE",
command = IECore.curry( __printComponents, proceduralHolder )
)
fnPH = IECoreMaya.FnProceduralHolder( proceduralHolder )
if fnPH.selectedComponentNames() :
maya.cmds.menuItem(
label = "Print Selected Component Names",
radialPosition = "E",
command = IECore.curry( __printSelectedComponents, proceduralHolder )
)
if fnPH.selectedComponentNames() :
maya.cmds.menuItem(
label = "Create Locator",
radialPosition = "SE",
subMenu = True,
)
maya.cmds.menuItem(
label = "At Bound Min",
radialPosition = "N",
command = IECore.curry( __createLocatorAtPoints, proceduralHolder, [ "Min" ] ),
)
maya.cmds.menuItem(
label = "At Bound Max",
radialPosition = "NE",
command = IECore.curry( __createLocatorAtPoints, proceduralHolder, [ "Max" ] ),
)
maya.cmds.menuItem(
label = "At Bound Min And Max",
radialPosition = "E",
command = IECore.curry( __createLocatorAtPoints, proceduralHolder, [ "Min", "Max" ] ),
)
maya.cmds.menuItem(
label = "At Bound Centre",
radialPosition = "SE",
command = IECore.curry( __createLocatorAtPoints, proceduralHolder, [ "Center" ] ),
)
maya.cmds.menuItem(
label = "At Transform Origin",
radialPosition = "S",
command = IECore.curry( __createLocatorWithTransform, proceduralHolder ),
)
maya.cmds.setParent( menu, menu=True )
maya.cmds.menuItem(
label = "Convert To Geometry",
radialPosition = "S",
command = "import IECoreMaya; IECoreMaya.ProceduralHolderUI._convertToGeometry( \"" + proceduralHolder + "\" )",
)
for c in __dagMenuCallbacks :
c( menu, proceduralHolder )
def __componentCallback( proceduralHolder, *unused ) :
parent = maya.cmds.listRelatives( proceduralHolder, parent=True, fullPath=True )[0]
maya.mel.eval( "doMenuComponentSelection( \"" + parent + "\", \"facet\" )" )
def __objectCallback( proceduralHolder, *unused ) :
parent = maya.cmds.listRelatives( proceduralHolder, parent=True, fullPath=True )[0]
maya.cmds.hilite( parent, unHilite=True )
selection = maya.cmds.ls( selection=True )
maya.mel.eval( "changeSelectMode -object" )
if selection :
maya.cmds.select( selection, replace=True )
else :
maya.cmds.select( clear=True )
def __printComponents( proceduralHolder, *unused ) :
fnP = IECoreMaya.FnProceduralHolder( proceduralHolder )
names = fnP.componentNames()
names = list( names )
names.sort()
print " ".join( names ) ,
def __printSelectedComponents( proceduralHolder, *unused ) :
fnP = IECoreMaya.FnProceduralHolder( proceduralHolder )
selectedNames = fnP.selectedComponentNames()
selectedNames = list( selectedNames )
selectedNames.sort()
print " ".join( selectedNames ) ,
def _convertToGeometry( proceduralHolder, *unused ) :
fnP = IECoreMaya.FnProceduralHolder( proceduralHolder )
proceduralParent = maya.cmds.listRelatives( fnP.fullPathName(), parent=True, fullPath=True )[0]
geometryParent = maya.cmds.createNode( "transform", name = "convertedProcedural", skipSelect=True )
proceduralTransform = maya.cmds.xform( proceduralParent, query=True, worldSpace=True, matrix=True )
maya.cmds.xform( geometryParent, worldSpace=True, matrix=proceduralTransform )
fnP.convertToGeometry( parent=geometryParent )
maya.cmds.select( geometryParent, replace=True )
def __createLocatorAtPoints( proceduralHolder, childPlugSuffixes, *unused ) :
fnPH = IECoreMaya.FnProceduralHolder( proceduralHolder )
selectedNames = fnPH.selectedComponentNames()
proceduralParent = maya.cmds.listRelatives( fnPH.fullPathName(), parent=True, fullPath=True )[0]
locators = []
for name in selectedNames :
for childPlugSuffix in childPlugSuffixes :
outputPlug = fnPH.componentBoundPlugPath( name )
locator = "|" + maya.cmds.spaceLocator( name = name.replace( "/", "_" ) + childPlugSuffix )[0]
maya.cmds.connectAttr( outputPlug + ".componentBound" + childPlugSuffix, locator + ".translate" )
locators.extend( maya.cmds.parent( locator, proceduralParent, relative=True ) )
maya.cmds.select( locators, replace=True )
def __createLocatorWithTransform( proceduralHolder, *unused ) :
fnPH = IECoreMaya.FnProceduralHolder( proceduralHolder )
selectedNames = fnPH.selectedComponentNames()
proceduralParent = maya.cmds.listRelatives( fnPH.fullPathName(), parent=True, fullPath=True )[0]
locators = []
for name in selectedNames :
outputPlug = fnPH.componentTransformPlugPath( name )
locator = "|" + maya.cmds.spaceLocator( name = name.replace( "/", "_" ) + "Transform" )[0]
maya.cmds.connectAttr( outputPlug + ".componentTranslate", locator + ".translate" )
maya.cmds.connectAttr( outputPlug + ".componentRotate", locator + ".rotate" )
maya.cmds.connectAttr( outputPlug + ".componentScale", locator + ".scale" )
locators.extend( maya.cmds.parent( locator, proceduralParent, relative=True ) )
maya.cmds.select( locators, replace=True )
|
StarcoderdataPython
|
6447722
|
<gh_stars>1-10
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid as fluid
class TestMatchMatrixTensorOp(OpTest):
def setUp(self):
self.init_op_type()
self.set_data()
self.compute()
def init_op_type(self):
self.op_type = "match_matrix_tensor"
def set_data(self):
ix, iy, h, dim_t = [5, 8, 3, 4]
x_lod = [[1, 2, 2]]
y_lod = [[3, 1, 4]]
self.init_data(ix, x_lod, iy, y_lod, h, dim_t)
def init_data(self, ix, x_lod, iy, y_lod, h, dim_t):
x_data = np.random.random((ix, h)).astype('float32')
y_data = np.random.random((iy, h)).astype('float32')
w_data = np.random.random((h, dim_t, h)).astype('float32')
self.inputs = {'X': (x_data, x_lod), 'Y': (y_data, y_lod), 'W': w_data}
self.attrs = {'dim_t': dim_t}
def compute(self):
x_data, x_lod = self.inputs['X']
y_data, y_lod = self.inputs['Y']
# [k, dim_t, k] -> [dim_t, k, k]
w_data = self.inputs['W'].transpose(1, 0, 2)
out = np.zeros((0, 1), dtype=x_data.dtype)
# for x*w
tmp = np.zeros((0, 1), dtype=x_data.dtype)
out_lod = [[]]
tmp_lod = [[]]
x_offset, y_offset = 0, 0
for idx in range(len(x_lod[0])):
x_len = x_lod[0][idx]
y_len = y_lod[0][idx]
x_sub = x_data[x_offset:(x_offset + x_len), :]
y_sub = y_data[y_offset:(y_offset + y_len), :]
tmp_sub = np.dot(x_sub, w_data)
tmp = np.vstack((tmp, tmp_sub.reshape(tmp_sub.size, 1)))
out_sub = np.dot(tmp_sub, y_sub.T).transpose(1, 0, 2)
out_lod[0].append(out_sub.size)
out = np.vstack((out, out_sub.reshape(out_sub.size, 1)))
x_offset += x_len
y_offset += y_len
self.outputs = {'Out': (out, out_lod), 'Tmp': tmp}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.005)
class TestMatchMatrixTensorOpCase1(TestMatchMatrixTensorOp):
def set_data(self):
ix, iy, h, dim_t = [5, 8, 16, 4]
x_lod = [[5]]
y_lod = [[8]]
self.init_data(ix, x_lod, iy, y_lod, h, dim_t)
class TestMatchMatrixTensorOpCase2(TestMatchMatrixTensorOp):
def set_data(self):
ix, iy, h, dim_t = [7, 8, 1, 4]
x_lod = [[2, 3, 2]]
y_lod = [[3, 1, 4]]
self.init_data(ix, x_lod, iy, y_lod, h, dim_t)
class TestMatchMatrixTensorOpCase3(TestMatchMatrixTensorOp):
def set_data(self):
ix, iy, h, dim_t = [5, 9, 32, 1]
x_lod = [[1, 2, 2]]
y_lod = [[3, 2, 4]]
self.init_data(ix, x_lod, iy, y_lod, h, dim_t)
class TestMatchMatrixTensorOpCase4(TestMatchMatrixTensorOp):
def set_data(self):
ix, iy, h, dim_t = [8, 12, 16, 5]
x_lod = [[1, 2, 3, 1, 1]]
y_lod = [[3, 2, 4, 1, 2]]
self.init_data(ix, x_lod, iy, y_lod, h, dim_t)
def test_api(self):
x_lod_tensor = fluid.layers.data(name='x', shape=[10], lod_level=1)
y_lod_tensor = fluid.layers.data(name='y', shape=[10], lod_level=1)
out, out_tmp = fluid.contrib.match_matrix_tensor(
x=x_lod_tensor, y=y_lod_tensor, channel_num=3)
place = fluid.CPUPlace()
x_data = np.random.rand(7, 10).astype('float32')
y_data = np.random.rand(9, 10).astype('float32')
x = fluid.create_lod_tensor(x_data, [[2, 5]], place)
y = fluid.create_lod_tensor(y_data, [[3, 6]], place)
exe = fluid.Executor(place=place)
exe.run(fluid.default_startup_program())
ret = exe.run(feed={'x': x,
'y': y},
fetch_list=[out],
return_numpy=False)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
6428150
|
<reponame>japonophile/Education4Climate<gh_stars>0
from abc import ABC
from pathlib import Path
import pandas as pd
import scrapy
from src.crawl.utils import cleanup
from settings import YEAR, CRAWLING_OUTPUT_FOLDER
import logging
log = logging.getLogger()
BASE_URL = 'https://www.z.k.kyoto-u.ac.jp'
PROG_DATA_PATH = Path(__file__).parent.absolute().joinpath(
f'../../../../{CRAWLING_OUTPUT_FOLDER}kyotou_programs_{YEAR}.json')
class KyotoUnivCourseSpider(scrapy.Spider, ABC):
"""
Courses crawler for Kyoto University
"""
name = "kyotou-courses"
custom_settings = {
'FEED_URI': Path(__file__).parent.absolute().joinpath(
f'../../../../{CRAWLING_OUTPUT_FOLDER}kyotou_courses_{YEAR}.json').as_uri()
}
def start_requests(self):
programs = pd.read_json(open(PROG_DATA_PATH, "r"))
for _, program in programs.iterrows():
yield scrapy.Request(
BASE_URL + program["url"].replace('course-list', 'course-detail'),
cookies={'locale': 'ja'}, callback=self.parse_courses,
cb_kwargs={"courses": program["courses"]})
def parse_courses(self, response, courses):
course_ids = response.xpath("//div[contains(@id, 'lecture_')]/@id").getall()
for course_id in course_ids:
if course_id.replace('lecture_', '') not in courses:
log.warn(f"{course_id} not in program courses")
continue
course_name = response.xpath(
f"//div[@id='{course_id}']//span[@class='course-title']/span[1]/text()").get().strip()
LANG_MAP = {"日本語": "ja", "英語": "en"}
language = response.xpath(
f"//div[@id='{course_id}']//span[@class='language']/text()").get().strip()
log.info(f"{language=}")
if '及び' in language:
languages = [LANG_MAP.get(l, "other") for l in language.split('及び')]
else:
languages = [LANG_MAP.get(language, "other")]
teachers = response.xpath(
f"//div[@id='{course_id}']//table[@class='teachers']/tr/td[3]/text()").getall()
def get_sections_text(sections_names):
texts = [cleanup(response.xpath(
f"//div[@id='{course_id}']//div[@class='syllabus-header' and contains(text(), '{section}')]/following-sibling::div[1]").get())
for section in sections_names]
return "\n".join(texts).strip("\n /")
content = get_sections_text(['(授業計画と内容)'])
goal = get_sections_text(['(授業の概要・目的)', '(到達目標)'])
activity = ""
other = get_sections_text(['(履修要件)', '(成績評価の方法・観点及び達成度)',
'(教科書)', '(参考書等)', '(授業外学習(予習・復習)等)',
'(その他(オフィスアワー等))'])
yield {
"id": course_id.replace('lecture_', ''),
"name": course_name,
"year": f"{YEAR}-{int(YEAR)+1}",
"languages": languages,
"teachers": teachers,
"url": response.url + f"#{course_id}",
"content": content,
"goal": goal,
"activity": activity,
"other": other
}
next_page = response.xpath("//nav[@class='pagination']/span[@class='page current']"
"/following-sibling::span[1]/a/@href").get()
log.info(f"{next_page=}")
if next_page is not None and len(next_page.strip()) > 0:
yield response.follow(next_page, self.parse_courses,
cb_kwargs={"courses": courses})
|
StarcoderdataPython
|
6496064
|
import os
import shutil
import tempfile
import logging
import time
import requests
from distutils.dir_util import copy_tree
from brdm.NcbiData import NcbiData
from brdm.RefDataInterface import RefDataInterface
class NcbiTaxonomyData(NcbiData, RefDataInterface):
def __init__(self, config_file):
"""Initialize the object"""
super(NcbiTaxonomyData, self).__init__(config_file)
self.download_folder = \
self.config['ncbi']['taxonomy']['download_folder']
self.download_file = self.config['ncbi']['taxonomy']['download_file']
self.taxonomy_file = self.config['ncbi']['taxonomy']['taxonomy_file']
self.info_file_name = self.config['ncbi']['taxonomy']['info_file_name']
# Create destination directory and backup directory
try:
self.destination_dir = os.path.join(
super(NcbiTaxonomyData, self).destination_dir,
self.config['ncbi']['taxonomy']['destination_folder'])
if not os.path.exists(self.destination_dir):
os.makedirs(self.destination_dir, mode=self.folder_mode)
os.chdir(self.destination_dir)
self.backup_dir = os.path.join(
super(NcbiTaxonomyData, self).backup_dir,
self.config['ncbi']['taxonomy']['destination_folder'])
if not os.path.exists(self.backup_dir):
os.makedirs(self.backup_dir, mode=self.folder_mode)
except Exception as e:
logging.error('Failed to create destination_dir/backup_dir {}'
.format(e))
def update(self):
"""Update NCBI taxonomy database
The method first download the most recent taxonomy from NCBI;
then format and backup the taxonomy information.
"""
logging.info('Executing NCBI taxonomy update')
# Download files into the intermediate folder
temp_dir = self.create_tmp_dir(self.destination_dir)
if not temp_dir:
logging.error('Failed to create the temp_dir:{}'.format(e))
return False
success = self.download()
if not success:
logging.error('Download failed. Quit the Update process.')
return False
# Format the taxonomy file and remove unwanted files
# and change file mode
format_success = self.format_taxonomy(self.taxonomy_file)
if not format_success:
logging.error('Failed to format taxonomy file')
return False
# Backup the files
backup_success = self.backup()
if not backup_success:
logging.error('Backup of taxonomy data did not succeed.')
return False
# Delete old files from the destination folder
# Copy new files from intermediate folder to destination folder
clean_ok = self.clean_destination_dir(self.destination_dir)
if not clean_ok:
return False
try:
copy_tree(temp_dir, self.destination_dir)
shutil.rmtree(temp_dir)
except Exception as e:
logging.error('Failed to move files from temp_dir to \
\n destination folder, error{}'.format(e))
return False
return True
# Download taxonomy database
def download(self, test=False):
"""Download the most recent taxonomy database"""
logging.info('Downloading NCBI taxonomy')
download_start_time = time.time()
downloaded_files = []
files_download_failed = []
max_download_attempts = self.download_retry_num
file_name = self.download_file
readme_success = False
download_success = test
unzip_success = False
attempt = 0
completed = False
while attempt < max_download_attempts and not completed:
attempt += 1
try:
file_url = os.path.join(self.login_url, self.download_folder)
session_requests, connected = self.https_connect()
if not readme_success:
# download readme file:
file_name_readme = self.info_file_name
file_url_readme = os.path.join(file_url, file_name_readme)
readme_success = self.download_a_file(
file_name_readme, file_url_readme, session_requests)
if not download_success:
# download md5 file:
file_name_md5 = self.download_file+'.md5'
file_url_md5 = os.path.join(file_url, file_name_md5)
md5_success = self.download_a_file(
file_name_md5, file_url_md5, session_requests)
# download taxdump zipped file
file_name_taxon = self.download_file
file_url_taxon = os.path.join(file_url, file_name_taxon)
taxon_success = self.download_a_file(
file_name_taxon, file_url_taxon, session_requests)
# check md5
download_success = self.checksum(
file_name_md5, file_name_taxon)
if download_success and readme_success:
completed = True
session_requests.close()
except Exception as e:
logging.info('Failed to download taxonomy on attempt {}. \
\nError: {}'.format(attempt, e))
time.sleep(self.sleep_time)
if completed and not test:
unzip_success = self.unzip_file(file_name_taxon)
if not unzip_success and not test:
files_download_failed.append(file_name)
logging.error('Failed to download {} after {} attempts'
.format(file_name, max_download_attempts))
return False
# Write the README+ file
downloaded_files.append(file_name)
comment = 'Taxonomy reference databases that downloaded from NCBI.'
self.write_readme(
download_url='{}/{}/{}'.format(self.login_url,
self.download_folder,
self.download_file),
downloaded_files=downloaded_files,
download_failed_files=files_download_failed,
comment=comment,
execution_time=(time.time() - download_start_time))
return True
def checksum(self, md5_file, file_name):
"""Check the correctness of the downloaded file"""
try:
with open(md5_file, 'r') as f:
md5_file_contents = f.read()
md5_str = md5_file_contents.split(' ')[0]
os.remove(md5_file)
except Exception as e:
logging.exception('Could not read MD5 file {}. \
\nTry to download the file again'.format(file_name))
return False
if not self.check_md5(file_name, md5_str):
logging.warning('Failed in checking MD5. Download file again.')
return False
return True
# Write the taxonomy file in a specific format, redmine #12865-14
def format_taxonomy(self, filename):
"""Write the taxonomy file in a specific format"""
dmp_file = filename+'.dmp'
taxonomy_file = filename+'.txt'
try:
taxonomy = open(taxonomy_file, 'w')
taxonomy.write(
'taxon_id\ttaxon_name\td__domain; k__kingdom; p__phylum; '
+ 'c__class; o__order; f__family; g__genus; s__species\n')
with open(dmp_file) as fp:
content = fp.readlines()
for line in content:
line = line[:-3]
x = line.split('\t|\t')
tax_id, tax_name, species, genus, family, order, \
taxon_class, phylum, kingdom, superkingdom = x
taxonomy.write(tax_id + '\t' + tax_name + '\td__'
+ superkingdom + '; k__' + kingdom
+ '; p__' + phylum + '; c__'
+ taxon_class + '; o__' + order + '; f__'
+ family + '; g__' + genus + '; s__'
+ species + '\n')
taxonomy.close()
except Exception as e:
logging.exception('Failed to format taxonomy file')
return False
# remove unwanted file and change file mode
app_readme_file = self.config['readme_file']
ncbi_readme_file = self.info_file_name
taxonomy_file = self.taxonomy_file + '.txt'
try:
only_files = [f for f in os.listdir('.') if os.path.isfile(f)]
for f in only_files:
if not f == app_readme_file and not f == ncbi_readme_file \
and not f == taxonomy_file:
os.remove(f)
else:
os.chmod(f, self.file_mode)
except Exception as e:
logging.error('Failed to remove unwanted files:{}'.format(e))
return False
return True
def backup(self):
"""Backup the taxonomy information"""
logging.info('Executing NCBI taxonomy backup')
backup_folder = self.create_backup_dir()
if not backup_folder:
logging.error('NCBI taxonomy Backup did not succeed.')
return False
try:
src_files = [f for f in os.listdir('.') if os.path.isfile(f)]
for filename in src_files:
shutil.copy(filename, backup_folder)
except Exception as e:
logging.exception('Failed in NCBI taxonomy Backup: {}'.format(e))
return False
return True
def restore(self, proposed_folder_name, path_to_destination):
"""Restore old version of taxonomy database from backups
Args:
proposed_folder_name (string): in format yyyy-mm-dd; it is
the version of the database you want to restore
path_to_desination (string): The path to a place that you
want to store the restored database
Return:
True if the database restored successfully; False otherwise
"""
logging.info('Executing NCBI taxonomy restore {} to {}'
.format(proposed_folder_name, path_to_destination))
# check the restore folder, return false if not exist or empty folder
try:
restore_folder = self.check_restore_date(
self.backup_dir, proposed_folder_name)
if not restore_folder:
return False
restore_destination = self.check_restore_destination(
path_to_destination)
if not restore_destination:
return False
# create restore destination folder
if not os.path.isdir(restore_destination):
os.makedirs(restore_destination, mode=self.folder_mode)
# copy the all the files in backup_dir/folder_name to
# restore destination
os.chdir(restore_folder)
for filename in os.listdir(restore_folder):
shutil.copy2(filename, restore_destination)
except Exception as e:
logging.exception('Failed in NCBI taxonomy restore: {}'.format(e))
return False
print('The restored database is located at ' + restore_destination)
logging.info('The restored database is located at {}'
.format(restore_destination))
return True
|
StarcoderdataPython
|
6481731
|
<reponame>fabaff/penin
"""Init file for PenIn."""
|
StarcoderdataPython
|
3565511
|
from tkinter import*
root = Tk()
photo = PhotoImage(file="/home/pi/std_googleAssistant/GUI/Icons/Home.png")
label = Label(root, image=photo)
label.pack()
root.mainloop()
|
StarcoderdataPython
|
381752
|
<reponame>kalpishs/download_kit<gh_stars>1-10
import logging
import shutil
from urllib import parse
from downloadKit.protocol_factory.constants import bufsize
from downloadKit.protocol_factory.protocol_template import ProtocolTemplate
import paramiko
import os
class sftpUrlDownloader(ProtocolTemplate):
def __init__(self, url, output_dir):
super().__init__(url, output_dir)
self._file_length = 0
self._downloaded_length = 0
self._sftp = None
def get_file(self):
return os.path.basename(self.url.path)
"""
execution of url downloader request for sftp
"""
def execute(self):
try:
super().execute()
try:
port_binding = paramiko.Transport((self.url.hostname, self.url.port or 0))
port_binding.connect(username=self.url.username, password=self.url.password)
self._sftp = paramiko.SFTPClient.from_transport(port_binding)
except Exception as e:
logging.info(f"login error Url: {parse.urlunparse(self.url)}")
print(f"login error Url: {parse.urlunparse(self.url)}")
self.clear()
self._file_length=self._sftp.stat(self.url.path).st_size
with open(self.file_details, 'wb') as f:
with self._sftp.open(self.url.path, mode="r", bufsize=bufsize) as write_file:
self._downloaded_length= self._file_length * 0.10
shutil.copyfileobj(write_file, f)
self._downloaded_length = self._file_length
except Exception as ex:
print(f"downloading failed {ex}")
self.clear()
def clear(self):
try:
self.clear_downloads()
self._sftp.close()
except Exception as e:
logging.info(f"failed closing & clearing downloads exception{e}")
print("failed closing & clearing downloads.")
def progress(self):
return self._downloaded_length, self._file_length
|
StarcoderdataPython
|
3358115
|
<reponame>zsb514/message_bot_plateform
from .telebot import TeleBot
from .wxbot import WxBot
from .dingbot import DingBot
def init_bot(bot_token, bot_type, chat_id=None, secret=None):
if bot_type == 0:
return WxBot(bot_token)
elif bot_type == 1:
return DingBot(bot_token, secret)
elif bot_type == 2:
return TeleBot(bot_token, chat_id)
|
StarcoderdataPython
|
140802
|
<filename>processing/boundaries/inputs/concat.py
from psycopg2.sql import SQL, Identifier
from .utils import logging, get_ids, get_ps_ids
logger = logging.getLogger(__name__)
query_1 = """
DROP TABLE IF EXISTS adm4_polygons_pop_03;
CREATE TABLE adm4_polygons_pop_03 AS
SELECT {ids}, geom FROM adm4_polygons_pop_02
UNION ALL
SELECT {ids}, geom FROM adm3_polygons_pop_02
UNION ALL
SELECT {ids}, geom FROM adm2_polygons_pop_02
UNION ALL
SELECT {ids}, geom FROM adm1_polygons_pop_02
UNION ALL
SELECT {ids}, geom FROM adm0_polygons_pop_02
ORDER BY adm4_id;
"""
def main(cur):
cur.execute(SQL(query_1).format(
ids=SQL(',').join(map(Identifier, get_ids(4) + get_ps_ids())),
))
logger.info('finished')
|
StarcoderdataPython
|
8033738
|
<gh_stars>0
import numpy as np
import matplotlib
from PIL import Image
import math
from functions import *
x = 0
y = 0
nb_pixels_noirs_i1 = 0
nb_pixels_noirs_i2 = 0
nb_pixels_noirs_i = 0
nb_pixels_cercle = 0
i_inconnue = Image.open('images/image1.jpg')
i_ar = np.asarray(i_inconnue)
x_max_i = len(i_ar)
y_max_i = len(i_ar[0])
# pointsCercle = cercle(x_max_i, y_max_i, 1.* y_max_i/2, [0,0], 1000)
for x in range(x_max_i):
for y in range(y_max_i):
if (x - 1.* x_max_i / 2)*(x - 1.* x_max_i / 2) + (y - 1.*y_max_i/2)*(y - 1.*y_max_i/2) <= np.min([y_max_i, x_max_i])*np.min([y_max_i, x_max_i]):
nb_pixels_cercle += 1
if i_ar[x][y][0] <= 50 and i_ar[x][y][1] <= 50 and i_ar[x][y][2] <= 50:
nb_pixels_noirs_i += 1
# print "les coordonnes des points en noir sont: ", x, y
rapport_inc = 1.* nb_pixels_noirs_i / nb_pixels_cercle * 100
print rapport_inc
if rapport_inc > 5 and rapport_inc < 8:
print 'Cette image est stylee'
|
StarcoderdataPython
|
292673
|
<reponame>krishna-saravan/linkml<gh_stars>10-100
import re
import unittest
from functools import reduce
from typing import List, Tuple
from rdflib import Graph
from linkml.generators.owlgen import OwlSchemaGenerator
from tests.test_utils.environment import env
from tests.utils.compare_rdf import compare_rdf
from tests.utils.test_environment import TestEnvironmentTestCase
repl: List[Tuple[str, str]] = [
(r'\s*meta:generation_date ".*" ;', 'meta:generation_date "Fri Jan 25 14:22:29 2019" ;'),
(r'\s*meta:source_file_date ".*" ;', 'meta:source_file_date "Fri Jan 25 14:22:29 2019" ;')
]
def filtr(txt: str) -> str:
return reduce(lambda s, expr: re.sub(expr[0], expr[1], s, flags=re.MULTILINE), repl, txt)
class OWLTestCase(TestEnvironmentTestCase):
env = env
def test_cardinalities(self):
self.env.generate_single_file('owl1.owl',
lambda: OwlSchemaGenerator(env.input_path('owl1.yaml'),
importmap=env.import_map).serialize(),
filtr=filtr, comparator=compare_rdf, value_is_returned=True)
def test_pred_types(self):
self.env.generate_single_file('owl2.owl',
lambda: OwlSchemaGenerator(env.input_path('owl2.yaml'),
importmap=env.import_map).serialize(),
filtr=filtr, comparator=compare_rdf, value_is_returned=True)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
245685
|
<filename>pe_tree/map.py
#
# Copyright (c) 2020 BlackBerry Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""PE Tree file region map"""
# Standard imports
import sys
import itertools
# Qt imports
from PyQt5 import QtCore, QtGui, QtWidgets
class FileRegion():
"""Holds information about a file region for the map view
Args:
name (str): Name of the region
start (int, optional): File offset
size (int, optional): Size in bytes
rva (int, optional): In-memory offset
item (pe_tree.tree.PETree, optional): PE Tree
"""
def __init__(self, name, start=0, size=0, end=0, rva=0, item=None):
if sys.version_info > (3,):
self.name = name
else:
self.name = str(name)
self.start = start
self.size = size
self.end = end
self.rva = rva
self.item = item
self.rect = None
self.colour = None
self.hover = False
if self.end == 0:
self.end = self.start + self.size
if self.size == 0:
self.size = self.end - self.start
class PEMap(QtWidgets.QGroupBox):
"""PE map group box widget for holding region labels
Args:
file_size (int): Size of the PE file
parent (QWidget, optional): Parent widget
"""
def __init__(self, file_size, parent=None):
super(PEMap, self).__init__(parent=parent)
self.file_size = file_size
self.colours = None
self.rainbows = {}
# Create group box layout for the PE map
self.layout = QtWidgets.QVBoxLayout(self)
# Remove space between widgets
self.layout.setContentsMargins(0, 0, 0, 0)
self.layout.setSpacing(0)
# Set background to white (for alpha)
self.setStyleSheet("background-color: white; margin-top: 0px; padding: 0px;")
def add_regions(self, regions):
"""Add list of file regions to the PE map
Args:
regions ([pe_tree.map.FileRegions]: File regions
"""
# Rainbow colours for the PE map
colours = ["#b9413c", "#ea8c2c", "#ffbd20", "#559f7a", "#4c82a4", "#764a73", "#2f0128"]
wanted = len(regions) + len(colours)
if wanted in self.rainbows:
# Use saved colours
colours = self.rainbows[wanted]
else:
# Make a rainbow
while True:
gradients = polylinear_gradient(colours, wanted)
if len(gradients["hex"]) >= len(regions):
break
wanted += 1
colours = []
for colour in gradients["hex"]:
colours.append(QtGui.QColor(int(colour[1:], 16)))
# Save colours
self.rainbows[len(colours)] = colours
self.colours = itertools.cycle(colours)
# Sort regions by start offset, create labels and add to the group box layout
for region in sorted(regions, key=lambda region: (region.start)):
self.layout.addWidget(PEMapLabel(region, next(self.colours), self.file_size, parent=self))
class PEMapLabel(QtWidgets.QWidget):
"""PE map label widget
Args:
region (pe_tree.map.FileRegion): PE file region
colour (QColor): Region background colour
file_size (int): Size of the PE file
parent (QWidget, optional): Parent widget
"""
def __init__(self, region, colour, file_size, parent=None):
super(PEMapLabel, self).__init__(parent=parent)
self.region = region
self.colour = colour
self.file_size = file_size
# Initialise self
self.setMouseTracking(True)
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.context_menu)
self.setMinimumHeight(30)
# Initialise font
families = ["Consolas", "Monospace", "Courier"]
for family in families:
family = family.strip()
if family in QtGui.QFontDatabase().families():
self.setFont(QtGui.QFont(family))
def paintEvent(self, event):
"""Draw the PE map label
Args:
event (QPaintEvent): Paint event
"""
super(PEMapLabel, self).paintEvent(event)
# Get size of the label rect
r = event.region().boundingRect()
w = self.width()
h = self.height()
y = r.y()
x = r.x()
# Get colour and region
colour = self.colour
region = self.region
# Same colour but with alpha
colour_alpha = QtGui.QColor(colour.red(), colour.green(), colour.blue(), alpha=100 if region.hover is True else 175)
# Expand the font on mouse over
font = self.font()
font.setWeight(QtGui.QFont.Medium)
if region.hover:
font.setStretch(QtGui.QFont.Expanded)
else:
font.setStretch(QtGui.QFont.SemiExpanded)
# Draw the main rect
painter = QtGui.QPainter(self)
region.rect = QtCore.QRect(x, y, w, h)
painter.setPen(QtCore.Qt.NoPen)
painter.setBrush(QtGui.QBrush(QtGui.QColor(colour), QtCore.Qt.SolidPattern))
painter.drawRect(region.rect)
# Determine width per byte of file size
delta = float(w) / float(max(self.file_size, region.start + region.size))
# Draw the ratio portion over a white background
painter.setBrush(QtGui.QBrush(QtGui.QColor("white"), QtCore.Qt.SolidPattern))
painter.drawRect(x + int(region.start * delta), y, max(int(region.size * delta), 2), h)
painter.setBrush(QtGui.QBrush(QtGui.QColor(colour_alpha), QtCore.Qt.SolidPattern))
painter.drawRect(x + int(region.start * delta), y, max(int(region.size * delta), 2), h)
# Draw drop shadow text
painter.setFont(font)
painter.setPen(QtGui.QPen(QtGui.QColor(63, 63, 63, 100)))
painter.drawText(QtCore.QRect(x + 1, y + 1, w, h), QtCore.Qt.AlignCenter, str(region.name))
# Write the region name
painter.setPen(QtGui.QPen(QtGui.QColor(244, 244, 250)))
painter.drawText(QtCore.QRect(x, y, w, h), QtCore.Qt.AlignCenter, str(region.name))
painter.end()
def mouseMoveEvent(self, event):
"""Set region hover state and redraw the PE map label
Args:
event (QMouseEvent): Mouse move event
"""
self.region.hover = True
self.update()
def leaveEvent(self, event):
"""Clear region hover state and redraw the PE map label
Args:
event (QMouseEvent): Mouse move event
"""
self.region.hover = False
self.update()
def mousePressEvent(self, event):
"""Locate item related to PE map label in tree view
Args:
event (QMouseEvent): Mouse press event
"""
if event.button() == QtCore.Qt.RightButton:
return
item = self.region.item
form = item.tree.form
index = form.model.indexFromItem(item)
# Find/expand/scroll to the item in the PE tree
form.treeview.setCurrentIndex(index)
form.expanding = True
form.expand_items(index)
form.expanding = False
form.treeview.resizeColumnToContents(0)
form.treeview.resizeColumnToContents(1)
form.treeview.scrollTo(index, QtWidgets.QAbstractItemView.PositionAtTop)
def mouseDoubleClickEvent(self, event):
"""Locate item related to PE map label in external view
Args:
event (QMouseEvent): Mouse click event
"""
self.region.hover = False
self.region.item.tree.form.runtime.jumpto(self.region.item, self.region.rva)
def context_menu(self, point):
"""Show PE map label right-click context menu
Args:
point (QPoint): Right-click location
"""
item = self.region.item
form = item.tree.form
point = self.mapToGlobal(point)
index = form.model.indexFromItem(item)
item.context_menu(form.context_menu_actions.new_menu(form, point, item, index))
class PEMapScrollArea(QtWidgets.QScrollArea):
"""PE map scroll area widget"""
def eventFilter(self, obj, event):
if event.type() is QtCore.QEvent.MouseMove:
# Force the map view to update when the mouse moves over the scrollbar
self.widget().update()
return super(PEMapScrollArea, self).eventFilter(obj, event)
#
# The following code is gratefully borrowed from:
# https://github.com/bsouthga/blog/blob/master/public/posts/color-gradients-with-python.md
# licensed as follows:
# Copyright 2017 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE
#
def hex_to_RGB(hex):
''' "#FFFFFF" -> [255,255,255] '''
# Pass 16 to the integer function for change of base
return [int(hex[i:i+2], 16) for i in range(1,6,2)]
def RGB_to_hex(RGB):
''' [255,255,255] -> "#FFFFFF" '''
# Components need to be integers for hex to make sense
RGB = [int(x) for x in RGB]
return "#"+"".join(["0{0:x}".format(v) if v < 16 else "{0:x}".format(v) for v in RGB])
def color_dict(gradient):
''' Takes in a list of RGB sub-lists and returns dictionary of
colors in RGB and hex form for use in a graphing function
defined later on '''
return {"hex":[RGB_to_hex(RGB) for RGB in gradient],
"r":[RGB[0] for RGB in gradient],
"g":[RGB[1] for RGB in gradient],
"b":[RGB[2] for RGB in gradient]}
def linear_gradient(start_hex, finish_hex="#FFFFFF", n=10):
''' returns a gradient list of (n) colors between
two hex colors. start_hex and finish_hex
should be the full six-digit color string,
inlcuding the number sign ("#FFFFFF") '''
# Starting and ending colors in RGB form
s = hex_to_RGB(start_hex)
f = hex_to_RGB(finish_hex)
# Initilize a list of the output colors with the starting color
RGB_list = [s]
# Calcuate a color at each evenly spaced value of t from 1 to n
for t in range(1, n):
# Interpolate RGB vector for color at the current value of t
curr_vector = [
int(s[j] + (float(t)/(n-1))*(f[j]-s[j]))
for j in range(3)
]
# Add it to our list of output colors
RGB_list.append(curr_vector)
return color_dict(RGB_list)
def polylinear_gradient(colors, n):
''' returns a list of colors forming linear gradients between
all sequential pairs of colors. "n" specifies the total
number of desired output colors '''
# The number of colors per individual linear gradient
n_out = int(float(n) / (len(colors) - 1))
# returns dictionary defined by color_dict()
gradient_dict = linear_gradient(colors[0], colors[1], n_out)
if len(colors) > 1:
for col in range(1, len(colors) - 1):
next = linear_gradient(colors[col], colors[col+1], n_out)
for k in ("hex", "r", "g", "b"):
# Exclude first point to avoid duplicates
gradient_dict[k] += next[k][1:]
return gradient_dict
|
StarcoderdataPython
|
1882821
|
<gh_stars>100-1000
trick.exec_set_terminate_time(5.2)
|
StarcoderdataPython
|
1818889
|
import pytest
@pytest.mark.skip("Already bgpv4 and bgpv6 testcases are available")
def test_devices(api, utils):
"""This is a BGPv4 demo test script with router ranges"""
config = api.config()
tx, rx = config.ports.port(
name="tx", location=utils.settings.ports[0]
).port(name="rx", location=utils.settings.ports[1])
config.options.port_options.location_preemption = True
ly = config.layer1.layer1()[-1]
ly.name = "ly"
ly.port_names = [tx.name, rx.name]
ly.speed = utils.settings.speed
ly.media = utils.settings.media
tx_device, rx_device = config.devices.device(
name="tx_device", container_name=tx.name
).device(name="rx_device", container_name=rx.name)
# tx_device config
tx_eth = tx_device.ethernet
tx_eth.name = "tx_eth"
tx_ipv4 = tx_eth.ipv4
tx_ipv4.name = "tx_ipv4"
tx_ipv4.address.value = "192.168.127.12"
tx_ipv4.prefix.value = "24"
tx_ipv4.gateway.value = "192.168.3.11"
tx_bgpv4 = tx_ipv4.bgpv4
tx_bgpv4.name = "tx_bgpv4"
tx_bgpv4.as_type = "ibgp"
tx_bgpv4.dut_ipv4_address.value = "172.16.31.10"
tx_bgpv4.as_number.value = "65200"
tx_rr = tx_bgpv4.bgpv4_route_ranges.bgpv4routerange()[-1]
tx_rr.name = "tx_rr"
tx_rr.address_count = "2000"
tx_rr.address.value = "192.168.127.12"
tx_rr.prefix.value = "32"
tx_v6rr = tx_bgpv4.bgpv6_route_ranges.bgpv6routerange()[-1]
tx_v6rr.name = "tx_v6rr"
tx_v6rr.address_count = "1000"
tx_v6rr.address.value = "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b"
tx_v6rr.prefix.value = "64"
# rx_device config
rx_eth = rx_device.ethernet
rx_eth.name = "rx_eth"
rx_ipv4 = rx_eth.ipv4
rx_ipv4.name = "rx_ipv4"
rx_ipv4.address.value = "172.16.31.10"
rx_ipv4.prefix.value = "24"
rx_ipv4.gateway.value = "172.16.58.3"
rx_bgpv4 = rx_ipv4.bgpv4
rx_bgpv4.name = "rx_bgp"
rx_bgpv4.as_type = "ibgp"
rx_bgpv4.dut_ipv4_address.value = "172.16.58.3"
rx_bgpv4.as_number.value = "65200"
rx_rr = rx_bgpv4.bgpv4_route_ranges.bgpv4routerange()[-1]
rx_rr.name = "rx_rr"
rx_rr.address_count = "1000"
rx_rr.address.value = "192.168.127.12"
rx_rr.prefix.value = "32"
# flow config
flow = config.flows.flow(name="convergence_test")[-1]
flow.tx_rx.device.tx_names = [tx_rr.name]
flow.tx_rx.device.rx_names = [rx_rr.name]
api.set_config(config)
if __name__ == "__main__":
pytest.main(["-s", __file__])
|
StarcoderdataPython
|
1820242
|
<reponame>tracelytics/python-traceview<gh_stars>0
"""Tracelytics instrumentation for Django
Copyright (C) 2016 by SolarWinds, LLC.
All rights reserved.
"""
# django middleware for passing values to oboe
__all__ = ("OboeDjangoMiddleware", "install_oboe_instrumentation")
import oboe
from oboeware import imports
from oboeware import oninit
import sys, threading, functools
from distutils.version import StrictVersion
class OboeWSGIHandler(object):
""" Wrapper WSGI Handler for Django's django.core.handlers.wsgi:WSGIHandler
Can be used as a replacement for Django's WSGIHandler, e.g. with uWSGI.
"""
def __init__(self):
""" Import and instantiate django.core.handlers.WSGIHandler,
now that the load_middleware wrapper below has been initialized. """
from django.core.handlers.wsgi import WSGIHandler as djhandler
self._handler = djhandler()
def __call__(self, environ, start_response):
return self._handler(environ, start_response)
# Middleware hooks listed here: http://docs.djangoproject.com/en/dev/ref/middleware/
class OboeDjangoMiddleware(object):
def __init__(self):
from django.conf import settings
try:
self.layer = settings.OBOE_BASE_LAYER
except AttributeError, e:
self.layer = 'django'
def _singleline(self, e): # some logs like single-line errors better
return str(e).replace('\n', ' ').replace('\r', ' ')
def process_request(self, request):
try:
xtr_hdr = request.META.get("HTTP_X-Trace", request.META.get("HTTP_X_TRACE"))
avw_hdr = request.META.get("HTTP_X-TV-Meta", request.META.get("HTTP_X_TV_META"))
oboe.start_trace(self.layer, xtr=xtr_hdr, avw=avw_hdr, store_backtrace=False)
except Exception, e:
print >> sys.stderr, "Oboe middleware error:", self._singleline(e)
def process_view(self, request, view_func, view_args, view_kwargs):
if not oboe.Context.get_default().is_valid():
return
try:
kvs = {'Controller': view_func.__module__,
# XXX Not Python2.4-friendly
'Action': view_func.__name__ if hasattr(view_func, '__name__') else None}
oboe.log('process_view', None, keys=kvs, store_backtrace=False)
except Exception, e:
print >> sys.stderr, "Oboe middleware error:", self._singleline(e)
def process_response(self, request, response):
if not oboe.Context.get_default().is_valid():
return response
try:
kvs = {'HTTP-Host': request.META['HTTP_HOST'],
'Method': request.META['REQUEST_METHOD'],
'URL': request.build_absolute_uri(),
'Status': response.status_code}
response['X-Trace'] = oboe.end_trace(self.layer, keys=kvs)
except Exception, e:
print >> sys.stderr, "Oboe middleware error:", self._singleline(e)
return response
def process_exception(self, request, exception):
try:
oboe.log_exception()
except Exception, e:
print >> sys.stderr, "Oboe middleware error:", self._singleline(e)
def middleware_hooks(module, objname):
try:
# wrap middleware callables we want to wrap
cls = getattr(module, objname, None)
if not cls:
return
for method in ['process_request',
'process_view',
'process_response',
'process_template_response',
'process_exception']:
fn = getattr(cls, method, None)
if not fn:
continue
profile_name = '%s.%s.%s' % (module.__name__, objname, method)
setattr(cls, method,
oboe.profile_function(profile_name)(fn))
except Exception, e:
print >> sys.stderr, "Oboe error:", str(e)
load_middleware_lock = threading.Lock()
def add_rum_template_tags():
""" Register Django template tags.
1. simple_tag uses method name, so make some proxy methods
2. inserting into django.templates.libraries shortcut
"""
def oboe_rum_header():
return oboe.rum_header()
def oboe_rum_footer():
return oboe.rum_footer()
import django.template as tem_mod
l = tem_mod.Library()
l.simple_tag(oboe_rum_header)
l.simple_tag(oboe_rum_footer)
tem_mod.libraries['oboe'] = l
def on_load_middleware():
""" wrap Django middleware from a list """
# protect middleware wrapping: only a single thread proceeds
global load_middleware_lock # lock gets overwritten as None after init
if not load_middleware_lock: # already initialized? abort
return
mwlock = load_middleware_lock
mwlock.acquire() # acquire global lock
if not load_middleware_lock: # check again
mwlock.release() # abort
return
load_middleware_lock = None # mark global as "init done"
try:
# middleware hooks
from django.conf import settings
for i in settings.MIDDLEWARE_CLASSES:
if i.startswith('oboe'):
continue
dot = i.rfind('.')
if dot < 0 or dot+1 == len(i):
continue
objname = i[dot+1:]
imports.whenImported(i[:dot],
functools.partial(middleware_hooks, objname=objname)) # XXX Not Python2.4-friendly
# ORM
if oboe.config['inst_enabled']['django_orm']:
from oboeware import inst_django_orm
imports.whenImported('django.db.backends', inst_django_orm.wrap)
# templates
if oboe.config['inst_enabled']['django_templates']:
from oboeware import inst_django_templates
import django
if StrictVersion(django.get_version()) >= StrictVersion('1.3'):
imports.whenImported('django.template.base', inst_django_templates.wrap)
else:
imports.whenImported('django.template', inst_django_templates.wrap)
# load pluggaable instrumentation
from loader import load_inst_modules
load_inst_modules()
# it's usually a tuple, but sometimes it's a list
if type(settings.MIDDLEWARE_CLASSES) is tuple:
settings.MIDDLEWARE_CLASSES = ('oboeware.djangoware.OboeDjangoMiddleware',) + settings.MIDDLEWARE_CLASSES
elif type(settings.MIDDLEWARE_CLASSES) is list:
settings.MIDDLEWARE_CLASSES = ['oboeware.djangoware.OboeDjangoMiddleware'] + settings.MIDDLEWARE_CLASSES
else:
print >> sys.stderr, "Oboe error: thought MIDDLEWARE_CLASSES would be either a tuple or a list, got " + \
str(type(settings.MIDDLEWARE_CLASSES))
finally: # release instrumentation lock
mwlock.release()
try:
add_rum_template_tags()
except Exception, e:
print >> sys.stderr, "Oboe error: couldn't add RUM template tags: %s" % (e,)
def install_oboe_middleware(module):
def base_handler_wrapper(func):
@functools.wraps(func) # XXX Not Python2.4-friendly
def wrap_method(*f_args, **f_kwargs):
on_load_middleware()
return func(*f_args, **f_kwargs)
return wrap_method
try:
cls = getattr(module, 'BaseHandler', None)
try:
if not cls or cls.OBOE_MIDDLEWARE_LOADER:
return
except AttributeError, e:
cls.OBOE_MIDDLEWARE_LOADER = True
fn = getattr(cls, 'load_middleware', None)
setattr(cls, 'load_middleware', base_handler_wrapper(fn))
except Exception, e:
print >> sys.stderr, "Oboe error:", str(e)
try:
imports.whenImported('django.core.handlers.base', install_oboe_middleware)
# phone home
oninit.report_layer_init(layer='django')
except ImportError, e:
# gracefully disable tracing if Tracelytics oboeware not present
print >> sys.stderr, "[oboe] Unable to instrument app and middleware: %s" % e
|
StarcoderdataPython
|
11345001
|
<filename>lang/py/cookbook/v2/source/cb2_19_13_exm_4.py
for result in fetchsome(cursor):
doSomethingWith(result)
|
StarcoderdataPython
|
3509116
|
import json
import os
"""
cd {DanceTrack ROOT}/CenterNet
cd data
mkdir -p dancetrack_coco_hp/annotations
cd dancetrack_coco_hp
ln -s ../coco/train2017 coco_train
ln -s ../dancetrack/train dancetrack_train
cd ../..
"""
print('coco_hp is loading...')
coco_json = json.load(open('data/coco/annotations/person_keypoints_train2017.json','r'))
max_img_id = 0
img_list = list()
for img in coco_json['images']:
img['file_name'] = 'coco_train/' + img['file_name']
img_list.append(img)
max_img_id = max(max_img_id, int(img['id']))
max_ann_id = 0
ann_list = list()
for ann in coco_json['annotations']:
ann_list.append(ann)
max_ann_id = max(max_ann_id, int(ann['id']))
category_list = coco_json['categories']
print('dancetrack is loading...')
dancetrack_json = json.load(open('data/dancetrack/annotations/train.json','r'))
img_id_count = 0
for img in dancetrack_json['images']:
img_id_count += 1
img['file_name'] = 'dancetrack_train/' + img['file_name']
img['id'] = img['id'] + max_img_id
img_list.append(img)
for ann in dancetrack_json['annotations']:
ann['id'] = ann['id'] + max_ann_id
ann['image_id'] = ann['image_id'] + max_img_id
ann['category_id'] = 1
ann_list.append(ann)
print('mix is saving...')
mix_json = dict()
mix_json['images'] = img_list
mix_json['annotations'] = ann_list
mix_json['categories'] = category_list
json.dump(mix_json, open('data/dancetrack_coco_hp/annotations/train.json','w'))
|
StarcoderdataPython
|
3574487
|
import os
from unittest import TestCase
from image_keras.supports import path
class TestPath(TestCase):
current_path: str = os.path.dirname(os.path.abspath(__file__))
working_path: str = os.getcwd()
test_path: str = os.path.join(working_path, "tests")
test_resource_folder_name: str = "test_resources"
def test_split_fullpath_1(self):
test_resource_path = path.split_fullpath(
os.path.join(self.test_path, self.test_resource_folder_name)
)
print(test_resource_path)
self.assertEqual(test_resource_path[1], None)
self.assertEqual(test_resource_path[2], None)
def test_split_fullpath_2(self):
test_resource_sample_img_path = path.split_fullpath(
os.path.join(self.test_path, self.test_resource_folder_name, "sample.png")
)
print(test_resource_sample_img_path)
self.assertEqual(test_resource_sample_img_path[1], "sample")
self.assertEqual(test_resource_sample_img_path[2], ".png")
def test_split_fullpath_3(self):
test_filename_path = path.split_fullpath(
os.path.join(self.working_path, "LICENSE")
)
print(test_filename_path)
self.assertEqual(test_filename_path[1], "LICENSE")
self.assertEqual(test_filename_path[2], "")
def test_get_image_filenames(self):
test_image_filenames = path.get_image_filenames(
os.path.join(self.test_path, self.test_resource_folder_name)
)
self.assertEqual(len(test_image_filenames), 4)
|
StarcoderdataPython
|
243986
|
#!/usr/bin/python
#
# This program source code file is part of KiCad, a free EDA CAD application.
#
# Copyright (C) 2012-2014 KiCad Developers, see change_log.txt for contributors.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, you may find one here:
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# or you may search the http://www.gnu.org website for the version 2 license,
# or you may write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
#
from pcbnew import *
import HelpfulFootprintWizardPlugin as HFPW
class TouchSliderWizard(HFPW.HelpfulFootprintWizardPlugin):
def GetName(self):
"""
Return footprint name.
This is specific to each footprint class, you need to implement this
"""
return 'Touch Slider'
def GetDescription(self):
"""
Return footprint description.
This is specific to each footprint class, you need to implement this
"""
return 'Capacitive Touch Slider wizard'
def GetValue(self):
steps = int(self.parameters["Pads"]["*steps"])
return "TS"+str(steps)
def GenerateParameterList(self):
self.AddParam("Pads", "steps", self.uNatural, 4)
self.AddParam("Pads", "bands", self.uNatural, 2)
self.AddParam("Pads", "width", self.uMM, 10)
self.AddParam("Pads", "length", self.uMM, 50)
self.AddParam("Pads", "clearance", self.uMM, 1)
# build a rectangular pad
def smdRectPad(self,module,size,pos,name):
pad = D_PAD(module)
pad.SetSize(size)
pad.SetShape(PAD_SHAPE_RECT)
pad.SetAttribute(PAD_ATTRIB_SMD)
pad.SetLayerSet(pad.ConnSMDMask())
pad.SetPos0(pos)
pad.SetPosition(pos)
pad.SetPadName(name)
return pad
def smdTrianglePad(self,module,size,pos,name,up_down=1,left_right=0):
pad = D_PAD(module)
pad.SetSize(wxSize(size[0],size[1]))
pad.SetShape(PAD_SHAPE_TRAPEZOID)
pad.SetAttribute(PAD_ATTRIB_SMD)
pad.SetLayerSet(pad.ConnSMDMask())
pad.SetPos0(pos)
pad.SetPosition(pos)
pad.SetPadName(name)
pad.SetDelta(wxSize(left_right*size[1],up_down*size[0]))
return pad
# This method checks the parameters provided to wizard and set errors
def CheckParameters(self):
prms = self.parameters["Pads"]
steps = prms["*steps"]
bands = prms["*bands"]
if steps < 1:
self.parameter_errors["Pads"]["*steps"]="steps must be positive"
if bands < 1:
self.parameter_errors["Pads"]["*bands"]="bands must be positive"
touch_width = prms["width"]
touch_length = prms["length"]
touch_clearance = prms["clearance"]
# The start pad is made of a rectangular pad plus a couple of
# triangular pads facing tips on the middle/right of the first
# rectangular pad
def AddStartPad(self,position,touch_width,step_length,clearance,name):
module = self.module
step_length = step_length - clearance
size_pad = wxSize(step_length/2.0+(step_length/3),touch_width)
pad = self.smdRectPad(module,size_pad,position-wxPoint(step_length/6,0),name)
module.Add(pad)
size_pad = wxSize(step_length/2.0,touch_width)
tp = self.smdTrianglePad(module,wxSize(size_pad[0],size_pad[1]/2),
position+wxPoint(size_pad[0]/2,size_pad[1]/4),
name)
module.Add(tp)
tp = self.smdTrianglePad(module,wxSize(size_pad[0],size_pad[1]/2),
position+wxPoint(size_pad[0]/2,-size_pad[1]/4),
name
,-1)
module.Add(tp)
# compound a "start pad" shape plus a triangle on the left, pointing to
# the previous touch-pad
def AddMiddlePad(self,position,touch_width,step_length,clearance,name):
module = self.module
step_length = step_length - clearance
size_pad = wxSize(step_length/2.0,touch_width)
size_pad = wxSize(step_length/2.0,touch_width)
pad = self.smdRectPad(module,size_pad,position,name)
module.Add(pad)
tp = self.smdTrianglePad(module,wxSize(size_pad[0],size_pad[1]/2),
position+wxPoint(size_pad[0]/2,size_pad[1]/4),
name)
module.Add(tp)
tp = self.smdTrianglePad(module,wxSize(size_pad[0],size_pad[1]/2),
position+wxPoint(size_pad[0]/2,-size_pad[1]/4),
name
,-1)
module.Add(tp)
tp = self.smdTrianglePad(module,wxSize(size_pad[0],size_pad[1]/2),
position+wxPoint(-size_pad[0],0),
name,
0,
-1)
module.Add(tp)
def AddFinalPad(self,position,touch_width,step_length,clearance,name):
module = self.module
step_length = step_length - clearance
size_pad = wxSize(step_length/2.0,touch_width)
pad = self.smdRectPad(module,
wxSize(size_pad[0]+(step_length/3),size_pad[1]),
position+wxPoint(step_length/6,0),
name)
module.Add(pad)
tp = self.smdTrianglePad(module,wxSize(size_pad[0],size_pad[1]/2),
position+wxPoint(-size_pad[0],0),
name,
0,
-1)
module.Add(tp)
def AddStrip(self,pos,steps,touch_width,step_length,touch_clearance):
self.AddStartPad(pos,touch_width,step_length,touch_clearance,"1")
for n in range(2,steps):
pos = pos + wxPoint(step_length,0)
self.AddMiddlePad(pos,touch_width,step_length,touch_clearance,str(n))
pos = pos + wxPoint(step_length,0)
self.AddFinalPad(pos,touch_width,step_length,touch_clearance,str(steps))
# build the footprint from parameters
# FIX ME: the X and Y position of the footprint can be better.
def BuildThisFootprint(self):
prm = self.parameters["Pads"]
steps = int(prm["*steps"])
bands = int(prm["*bands"])
touch_width = prm["width"]
touch_length = prm["length"]
touch_clearance = prm["clearance"]
step_length = float(touch_length) / float(steps)
t_size = self.GetTextSize()
w_text = self.draw.GetLineTickness()
ypos = touch_width/(bands*2) + t_size/2 + w_text
self.draw.Value(0, -ypos, t_size)
ypos += t_size + w_text*2
self.draw.Reference(0, -ypos, t_size)
# starting pad
pos = wxPointMM(0,0)
band_width = touch_width/bands
for b in range(bands):
self.AddStrip(pos,steps,band_width,step_length,touch_clearance)
pos += wxPoint(0,band_width)
TouchSliderWizard().register()
|
StarcoderdataPython
|
206489
|
<reponame>owen800q/execution-trace-viewer
import sys
import os
import functools
import traceback
from PyQt5 import QtCore, QtGui, QtWidgets, uic
from yapsy.PluginManager import PluginManager
from core.trace_data import TraceData
from core.bookmark import Bookmark
from core import trace_files
from core.filter_and_find import find
from core.filter_and_find import filter_trace
from core.filter_and_find import TraceField
from core.syntax import AsmHighlighter
from core.api import Api
from core import prefs
class MainWindow(QtWidgets.QMainWindow):
"""MainWindow class
Attributes:
trace_data (TraceData): TraceData object
filtered_trace (list): Filtered trace
"""
def __init__(self):
"""Inits MainWindow, UI and plugins"""
super(MainWindow, self).__init__()
self.api = Api(self)
self.trace_data = TraceData()
self.filtered_trace = None
self.init_plugins()
self.init_ui()
if len(sys.argv) > 1:
self.open_trace(sys.argv[1])
def dragEnterEvent(self, event):
"""QMainWindow method reimplementation for file drag."""
event.accept()
def dropEvent(self, event):
"""QMainWindow method reimplementation for file drop."""
if event.mimeData().hasUrls():
for url in event.mimeData().urls():
local_file = url.toLocalFile()
if os.path.isfile(local_file):
self.open_trace(local_file)
def init_plugins(self):
"""Inits plugins"""
self.manager = PluginManager()
self.manager.setPluginPlaces(["plugins"])
self.manager.collectPlugins()
for plugin in self.manager.getAllPlugins():
print_debug("Plugin found: %s" % plugin.name)
def init_plugins_menu(self):
"""Inits plugins menu"""
self.plugins_topmenu.clear()
reload_action = QtWidgets.QAction("Reload plugins", self)
func = functools.partial(self.reload_plugins)
reload_action.triggered.connect(func)
self.plugins_topmenu.addAction(reload_action)
self.plugins_topmenu.addSeparator()
for plugin in self.manager.getAllPlugins():
action = QtWidgets.QAction(plugin.name, self)
func = functools.partial(self.execute_plugin, plugin)
action.triggered.connect(func)
self.plugins_topmenu.addAction(action)
def reload_plugins(self):
"""Reloads plugins"""
self.init_plugins()
self.init_trace_table_menu()
self.init_plugins_menu()
def init_ui(self):
"""Inits UI"""
uic.loadUi("gui/mainwindow.ui", self)
title = prefs.PACKAGE_NAME + " " + prefs.PACKAGE_VERSION
self.setWindowTitle(title)
self.filter_button.clicked.connect(self.on_filter_clicked)
self.filter_check_box.stateChanged.connect(self.on_filter_check_box_state_changed)
self.find_next_button.clicked.connect(lambda: self.on_find_clicked(1))
self.find_prev_button.clicked.connect(lambda: self.on_find_clicked(-1))
# accept file drops
self.setAcceptDrops(True)
# make trace table wider than regs&mem
self.splitter1.setSizes([1400, 100])
self.splitter2.setSizes([600, 100])
# Init trace table
self.trace_table.setColumnCount(len(prefs.TRACE_LABELS))
self.trace_table.setHorizontalHeaderLabels(prefs.TRACE_LABELS)
self.trace_table.itemSelectionChanged.connect(self.on_trace_table_selection_changed)
self.trace_table.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.trace_table.customContextMenuRequested.connect(
self.trace_table_context_menu_event
)
# Init register table
self.reg_table.setColumnCount(len(prefs.REG_LABELS))
self.reg_table.setHorizontalHeaderLabels(prefs.REG_LABELS)
self.reg_table.horizontalHeader().setStretchLastSection(True)
# Init memory table
self.mem_table.setColumnCount(len(prefs.MEM_LABELS))
self.mem_table.setHorizontalHeaderLabels(prefs.MEM_LABELS)
self.mem_table.horizontalHeader().setStretchLastSection(True)
# Init bookmark table
self.bookmark_table.setColumnCount(len(prefs.BOOKMARK_LABELS))
self.bookmark_table.setHorizontalHeaderLabels(prefs.BOOKMARK_LABELS)
self.bookmark_table.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.bookmark_table.customContextMenuRequested.connect(
self.bookmark_table_context_menu_event
)
self.bookmark_menu = QtWidgets.QMenu(self)
go_action = QtWidgets.QAction("Go to bookmark", self)
go_action.triggered.connect(self.go_to_bookmark)
self.bookmark_menu.addAction(go_action)
delete_bookmarks_action = QtWidgets.QAction("Delete bookmark(s)", self)
delete_bookmarks_action.triggered.connect(self.delete_bookmarks)
self.bookmark_menu.addAction(delete_bookmarks_action)
# Menu
exit_action = QtWidgets.QAction("&Exit", self)
exit_action.setShortcut("Ctrl+Q")
exit_action.setStatusTip("Exit application")
exit_action.triggered.connect(self.close)
open_trace_action = QtWidgets.QAction("&Open trace..", self)
open_trace_action.setStatusTip("Open trace")
open_trace_action.triggered.connect(self.dialog_open_trace)
self.save_trace_action = QtWidgets.QAction("&Save trace", self)
self.save_trace_action.setShortcut("Ctrl+S")
self.save_trace_action.setStatusTip("Save trace")
self.save_trace_action.triggered.connect(self.save_trace)
self.save_trace_action.setEnabled(False)
save_trace_as_action = QtWidgets.QAction("&Save trace as..", self)
save_trace_as_action.setStatusTip("Save trace as..")
save_trace_as_action.triggered.connect(self.dialog_save_trace_as)
save_trace_as_json_action = QtWidgets.QAction("&Save trace as JSON..", self)
save_trace_as_json_action.setStatusTip("Save trace as JSON..")
save_trace_as_json_action.triggered.connect(self.dialog_save_trace_as_json)
file_menu = self.menu_bar.addMenu("&File")
file_menu.addAction(open_trace_action)
file_menu.addAction(self.save_trace_action)
file_menu.addAction(save_trace_as_action)
file_menu.addAction(save_trace_as_json_action)
file_menu.addAction(exit_action)
self.plugins_topmenu = self.menu_bar.addMenu("&Plugins")
clear_bookmarks_action = QtWidgets.QAction("&Clear bookmarks", self)
clear_bookmarks_action.setStatusTip("Clear bookmarks")
clear_bookmarks_action.triggered.connect(self.clear_bookmarks)
bookmarks_menu = self.menu_bar.addMenu("&Bookmarks")
bookmarks_menu.addAction(clear_bookmarks_action)
# Init right click menu for trace table
self.init_trace_table_menu()
self.init_plugins_menu()
about_action = QtWidgets.QAction("&About", self)
about_action.triggered.connect(self.show_about_dialog)
about_menu = self.menu_bar.addMenu("&About")
about_menu.addAction(about_action)
if prefs.USE_SYNTAX_HIGHLIGHT:
self.highlight = AsmHighlighter(self.log_text_edit.document())
for field in prefs.FIND_FIELDS:
self.find_combo_box.addItem(field)
if prefs.SHOW_SAMPLE_FILTERS:
for sample_filter in prefs.SAMPLE_FILTERS:
self.filter_edit.addItem(sample_filter)
self.filter_edit.keyPressEvent = self.on_filter_edit_key_pressed
self.show()
def init_trace_table_menu(self):
"""Initializes right click menu for trace table"""
self.trace_table_menu = QtWidgets.QMenu(self)
copy_action = QtWidgets.QAction("Print selected cells", self)
copy_action.triggered.connect(self.trace_table_print_cells)
self.trace_table_menu.addAction(copy_action)
add_bookmark_action = QtWidgets.QAction("Add Bookmark", self)
add_bookmark_action.triggered.connect(self.trace_table_create_bookmark)
self.trace_table_menu.addAction(add_bookmark_action)
plugins_menu = QtWidgets.QMenu("Plugins", self)
for plugin in self.manager.getAllPlugins():
action = QtWidgets.QAction(plugin.name, self)
func = functools.partial(self.execute_plugin, plugin)
action.triggered.connect(func)
plugins_menu.addAction(action)
self.trace_table_menu.addMenu(plugins_menu)
def set_filter(self, filter_text):
"""Sets a a new filter for trace and filters trace"""
try:
self.filtered_trace = filter_trace(
self.trace_data.trace,
self.trace_data.regs,
filter_text
)
except Exception as exc:
print("Error on filter: " + str(exc))
print(traceback.format_exc())
def get_visible_trace(self):
"""Returns the trace that is currently shown on trace table"""
if self.filter_check_box.isChecked() and self.filtered_trace is not None:
return self.filtered_trace
return self.trace_data.trace
def bookmark_table_context_menu_event(self):
"""Context menu for bookmark table right click"""
self.bookmark_menu.popup(QtGui.QCursor.pos())
def dialog_open_trace(self):
"""Shows dialog to open trace file"""
all_traces = "All traces (*.tvt *.trace32 *.trace64)"
all_files = "All files (*.*)"
filename = QtWidgets.QFileDialog.getOpenFileName(
self, "Open trace", "", all_traces + ";; " + all_files
)[0]
if filename:
self.open_trace(filename)
if self.trace_data:
self.save_trace_action.setEnabled(True)
def dialog_save_trace_as(self):
"""Shows a dialog to select a save file"""
filename = QtWidgets.QFileDialog.getSaveFileName(
self, "Save trace as", "", "Trace Viewer traces (*.tvt);; All files (*.*)"
)[0]
print_debug("Save trace as: " + filename)
if filename and trace_files.save_as_tv_trace(self.trace_data, filename):
self.trace_data.filename = filename
self.save_trace_action.setEnabled(True)
def dialog_save_trace_as_json(self):
"""Shows a dialog to save trace to JSON file"""
filename = QtWidgets.QFileDialog.getSaveFileName(
self, "Save as JSON", "", "JSON files (*.txt);; All files (*.*)"
)[0]
print_debug("Save trace as: " + filename)
if filename:
trace_files.save_as_json(self.trace_data, filename)
def execute_plugin(self, plugin):
"""Executes a plugin and updates tables"""
print_debug("Executing a plugin: %s" % plugin.name)
try:
plugin.plugin_object.execute(self.api)
except Exception:
print_debug("Error in plugin:")
print_debug(traceback.format_exc())
self.print("Error in plugin:")
self.print(traceback.format_exc())
finally:
if prefs.USE_SYNTAX_HIGHLIGHT:
self.highlight.rehighlight()
def on_filter_edit_key_pressed(self, event):
"""Checks if enter is pressed on filterEdit"""
key = event.key()
if key == QtCore.Qt.Key_Return:
self.on_filter_clicked()
QtWidgets.QComboBox.keyPressEvent(self.filter_edit, event)
def show_filtered_trace(self):
"""Shows filtered_trace on trace_table"""
if not self.filter_check_box.isChecked():
self.filter_check_box.setChecked(True) # this will also update trace_table
else:
self.update_trace_table()
def on_filter_check_box_state_changed(self):
"""Callback function for state change of filter checkbox"""
self.update_trace_table()
def on_find_clicked(self, direction):
"""Find next or prev button clicked"""
row = self.trace_table.currentRow()
if row < 0:
row = 0
keyword = self.search_edit.text()
index = self.find_combo_box.currentIndex()
if index == 0:
field = TraceField.DISASM
elif index == 1:
field = TraceField.REGS
elif index == 2:
field = TraceField.MEM
elif index == 3:
field = TraceField.COMMENT
elif index == 4:
field = TraceField.ANY
try:
row_number = find(
trace=self.get_visible_trace(),
field=field,
keyword=keyword,
start_row=row + direction,
direction=direction,
)
except Exception as exc:
print("Error on find: " + str(exc))
print(traceback.format_exc())
self.print(traceback.format_exc())
return
if row_number is not None:
self.goto_row(self.trace_table, row_number)
self.select_row(self.trace_table, row_number)
else:
print_debug(
"%s not found (row %d, direction %d)" % (keyword, row, direction)
)
def on_filter_clicked(self):
"""Sets a filter and filters trace data"""
filter_text = self.filter_edit.currentText()
print_debug("Set filter: %s" % filter_text)
self.set_filter(filter_text)
if not self.filter_check_box.isChecked():
self.filter_check_box.setChecked(True)
else:
self.update_trace_table()
def on_trace_table_cell_edited(self, item):
"""Called when any cell is edited on trace table"""
table = self.trace_table
cell_type = item.whatsThis()
if cell_type == "comment":
row = table.currentRow()
if row < 0:
print_debug("Error, could not edit trace.")
return
row_id = int(table.item(row, 0).text())
self.trace_data.set_comment(item.text(), row_id)
else:
print_debug("Only comment editing allowed for now...")
def on_bookmark_table_cell_edited(self, item):
"""Called when any cell is edited on bookmark table"""
cell_type = item.whatsThis()
bookmarks = self.trace_data.get_bookmarks()
row = self.bookmark_table.currentRow()
if row < 0:
print_debug("Error, could not edit bookmark.")
return
if cell_type == "startrow":
bookmarks[row].startrow = int(item.text())
elif cell_type == "endrow":
bookmarks[row].endrow = int(item.text())
elif cell_type == "address":
bookmarks[row].addr = item.text()
elif cell_type == "disasm":
bookmarks[row].disasm = item.text()
elif cell_type == "comment":
bookmarks[row].comment = item.text()
else:
print_debug("Unknown field edited in bookmark table...")
def open_trace(self, filename):
"""Opens and reads a trace file"""
print_debug("Opening trace file: %s" % filename)
self.close_trace()
self.trace_data = trace_files.open_trace(filename)
if self.trace_data is None:
print_debug("Error, couldn't open trace file: %s" % filename)
self.update_ui()
self.update_column_widths(self.trace_table)
def close_trace(self):
"""Clears trace and updates UI"""
self.trace_data = None
self.filtered_trace = None
self.update_ui()
def update_ui(self):
"""Updates tables and status bar"""
self.update_trace_table()
self.update_bookmark_table()
self.update_status_bar()
def save_trace(self):
"""Saves a trace file"""
filename = self.trace_data.filename
print_debug("Save trace: " + filename)
if filename:
trace_files.save_as_tv_trace(self.trace_data, filename)
def show_about_dialog(self):
"""Shows an about dialog"""
title = "About"
name = prefs.PACKAGE_NAME
version = prefs.PACKAGE_VERSION
copyrights = prefs.PACKAGE_COPYRIGHTS
url = prefs.PACKAGE_URL
text = "%s %s \n %s \n %s" % (name, version, copyrights, url)
QtWidgets.QMessageBox().about(self, title, text)
def add_bookmark_to_table(self, bookmark):
"""Adds a bookmark to bookmark table"""
table = self.bookmark_table
row_count = table.rowCount()
table.setRowCount(row_count + 1)
startrow = QtWidgets.QTableWidgetItem(bookmark.startrow)
startrow.setData(QtCore.Qt.DisplayRole, int(bookmark.startrow))
startrow.setWhatsThis("startrow")
table.setItem(row_count, 0, startrow)
endrow = QtWidgets.QTableWidgetItem(bookmark.endrow)
endrow.setData(QtCore.Qt.DisplayRole, int(bookmark.endrow))
endrow.setWhatsThis("endrow")
table.setItem(row_count, 1, endrow)
address = QtWidgets.QTableWidgetItem(bookmark.addr)
address.setWhatsThis("address")
table.setItem(row_count, 2, address)
disasm = QtWidgets.QTableWidgetItem(bookmark.disasm)
disasm.setWhatsThis("disasm")
table.setItem(row_count, 3, disasm)
comment = QtWidgets.QTableWidgetItem(bookmark.comment)
comment.setWhatsThis("comment")
table.setItem(row_count, 4, comment)
def update_column_widths(self, table):
"""Updates column widths of a TableWidget to match the content"""
table.setVisible(False) # fix ui glitch with column widths
table.resizeColumnsToContents()
table.horizontalHeader().setStretchLastSection(True)
table.setVisible(True)
def update_trace_table(self):
"""Updates trace table"""
table = self.trace_table
if self.trace_data is None:
table.setRowCount(0)
return
try:
table.itemChanged.disconnect()
except Exception:
pass
trace = self.get_visible_trace()
row_count = len(trace)
print_debug("Updating trace table: %d rows." % row_count)
table.setRowCount(row_count)
if row_count == 0:
return
ip_name = self.trace_data.get_instruction_pointer_name()
if ip_name:
ip_reg_index = self.trace_data.regs[ip_name]
for i in range(0, row_count):
row_id = str(trace[i]["id"])
if ip_name:
address = trace[i]["regs"][ip_reg_index]
table.setItem(i, 1, QtWidgets.QTableWidgetItem(hex(address)))
opcodes = trace[i]["opcodes"]
disasm = trace[i]["disasm"]
comment = str(trace[i]["comment"])
comment_item = QtWidgets.QTableWidgetItem(comment)
comment_item.setWhatsThis("comment")
table.setItem(i, 0, QtWidgets.QTableWidgetItem(row_id))
table.setItem(i, 2, QtWidgets.QTableWidgetItem(opcodes))
table.setItem(i, 3, QtWidgets.QTableWidgetItem(disasm))
table.setItem(i, 4, comment_item)
table.itemChanged.connect(self.on_trace_table_cell_edited)
def update_regs_and_mem(self):
"""Updates register and memory tables"""
# clear mem_table
self.mem_table.setRowCount(0)
if self.trace_data is None:
return
table = self.trace_table
row_ids = self.get_selected_row_ids(table)
if not row_ids:
return
row_id = int(row_ids[0])
trace_row = self.trace_data.trace[row_id]
if "regs" in trace_row:
registers = []
flags = None
reg_values = trace_row["regs"]
for reg_name, reg_index in self.trace_data.regs.items():
if (self.trace_data.arch in ('x86', 'x64') and prefs.REG_FILTER_ENABLED
and reg_name not in prefs.REG_FILTER):
continue # don't show this register
reg_value = reg_values[reg_index]
reg = {}
reg["name"] = reg_name
reg["value"] = reg_value
registers.append(reg)
if reg_name == "eflags":
eflags = reg_value
flags = {
"c": eflags & 1, # carry
"p": (eflags >> 2) & 1, # parity
# "a": (eflags >> 4) & 1, # aux_carry
"z": (eflags >> 6) & 1, # zero
"s": (eflags >> 7) & 1, # sign
# "d": (eflags >> 10) & 1, # direction
# "o": (eflags >> 11) & 1 # overflow
}
if self.reg_table.rowCount() != len(registers):
self.reg_table.setRowCount(len(registers))
modified_regs = []
if prefs.HIGHLIGHT_MODIFIED_REGS:
modified_regs = self.trace_data.get_modified_regs(row_id)
# fill register table
for i, reg in enumerate(registers):
self.reg_table.setItem(i, 0, QtWidgets.QTableWidgetItem(reg["name"]))
self.reg_table.setItem(i, 1, QtWidgets.QTableWidgetItem(hex(reg["value"])))
self.reg_table.setItem(i, 2, QtWidgets.QTableWidgetItem(str(reg["value"])))
if reg["name"] in modified_regs:
self.reg_table.item(i, 0).setBackground(QtGui.QColor(100, 100, 150))
self.reg_table.item(i, 1).setBackground(QtGui.QColor(100, 100, 150))
self.reg_table.item(i, 2).setBackground(QtGui.QColor(100, 100, 150))
if flags:
flags_text = f"C:{flags['c']} P:{flags['p']} Z:{flags['z']} S:{flags['s']}"
row_count = self.reg_table.rowCount()
self.reg_table.setRowCount(row_count + 1)
self.reg_table.setItem(row_count, 0, QtWidgets.QTableWidgetItem("flags"))
self.reg_table.setItem(row_count, 1, QtWidgets.QTableWidgetItem(flags_text))
if "mem" in trace_row:
mems = trace_row["mem"]
self.mem_table.setRowCount(len(mems))
for i, mem in enumerate(mems):
self.mem_table.setItem(i, 0, QtWidgets.QTableWidgetItem(mem["access"]))
self.mem_table.setItem(i, 1, QtWidgets.QTableWidgetItem(hex(mem["addr"])))
self.mem_table.setItem(i, 2, QtWidgets.QTableWidgetItem(hex(mem["value"])))
self.update_column_widths(self.mem_table)
def update_status_bar(self):
"""Updates status bar"""
if self.trace_data is None:
return
table = self.trace_table
row = table.currentRow()
row_count = table.rowCount()
row_info = "%d/%d" % (row, row_count - 1)
filename = self.trace_data.filename.split("/")[-1]
msg = "File: %s | Row: %s " % (filename, row_info)
selected_row_id = 0
row_ids = self.get_selected_row_ids(table)
if row_ids:
selected_row_id = int(row_ids[0])
bookmark = self.trace_data.get_bookmark_from_row(selected_row_id)
if bookmark:
msg += " | Bookmark: %s ; %s" % (bookmark.disasm, bookmark.comment)
self.status_bar.showMessage(msg)
def get_selected_row_ids(self, table):
"""Returns IDs of all selected rows of TableWidget.
Args:
table: PyQt TableWidget
returns:
Ordered list of row ids
"""
row_ids_set = set(
table.item(index.row(), 0).text() for index in table.selectedIndexes()
)
row_ids_list = list(row_ids_set)
try:
row_ids_list.sort(key=int)
except ValueError:
print_debug("Error. Values in the first column must be integers.")
return None
return row_ids_list
def trace_table_create_bookmark(self):
"""Context menu action for creating a bookmark"""
table = self.trace_table
selected = table.selectedItems()
if not selected:
print_debug("Could not create a bookmark. Nothing selected.")
return
first_row = selected[0].row()
last_row = selected[-1].row()
addr = table.item(first_row, 1).text()
first_row_id = int(table.item(first_row, 0).text())
last_row_id = int(table.item(last_row, 0).text())
print_debug("New bookmark: %d-%d, addr: %s" % (first_row_id, last_row_id, addr))
bookmark = Bookmark(startrow=first_row_id, endrow=last_row_id, addr=addr)
self.trace_data.add_bookmark(bookmark)
self.update_bookmark_table()
def trace_table_print_cells(self):
"""Context menu action for trace table print cells"""
items = self.trace_table.selectedItems()
for item in items:
self.print(item.text())
def trace_table_context_menu_event(self):
"""Context menu for trace table right click"""
self.trace_table_menu.popup(QtGui.QCursor.pos())
def go_to_bookmark(self):
"""Goes to selected bookmark"""
selected_row_ids = self.get_selected_row_ids(self.bookmark_table)
if not selected_row_ids:
print_debug("Error. No bookmark selected.")
return
row_id = int(selected_row_ids[0])
if self.filter_check_box.isChecked():
self.filter_check_box.setChecked(False)
self.goto_row(self.trace_table, row_id)
self.select_row(self.trace_table, row_id)
self.tab_widget.setCurrentIndex(0)
def clear_bookmarks(self):
"""Clears all bookmarks"""
self.trace_data.clear_bookmarks()
self.update_bookmark_table()
def delete_bookmarks(self):
"""Deletes selected bookmarks"""
selected = self.bookmark_table.selectedItems()
if not selected:
print_debug("Could not delete a bookmark. Nothing selected.")
return
selected_rows = sorted(set({sel.row() for sel in selected}))
for row in reversed(selected_rows):
self.trace_data.delete_bookmark(row)
self.update_bookmark_table()
def get_selected_bookmarks(self):
"""Returns selected bookmarks"""
selected = self.bookmark_table.selectedItems()
if not selected:
print_debug("No bookmarks selected.")
return []
selected_rows = sorted(set({sel.row() for sel in selected}))
all_bookmarks = self.trace_data.get_bookmarks()
return [all_bookmarks[i] for i in selected_rows]
def update_bookmark_table(self):
"""Updates bookmarks table from trace_data"""
if self.trace_data is None:
return
table = self.bookmark_table
try:
table.itemChanged.disconnect()
except Exception:
pass
table.setRowCount(0)
bookmarks = self.trace_data.get_bookmarks()
print_debug("Updating bookmark table: %d rows." % len(bookmarks))
for bookmark in bookmarks:
self.add_bookmark_to_table(bookmark)
table.setSortingEnabled(True)
table.itemChanged.connect(self.on_bookmark_table_cell_edited)
self.update_column_widths(table)
def on_trace_table_selection_changed(self):
"""Callback function for trace table selection change"""
self.update_regs_and_mem()
self.update_status_bar()
def print(self, text):
"""Prints text to TextEdit on log tab"""
self.log_text_edit.appendPlainText(str(text))
def goto_row(self, table, row):
"""Scrolls a table to the specified row"""
table.scrollToItem(table.item(row, 3), QtWidgets.QAbstractItemView.PositionAtCenter)
def select_row(self, table, row):
"""Selects a row in a table"""
table.clearSelection()
item = table.item(row, 0)
table.setCurrentItem(
item,
QtCore.QItemSelectionModel.Select
| QtCore.QItemSelectionModel.Rows
| QtCore.QItemSelectionModel.Current,
)
def ask_user(self, title, question):
"""Shows a messagebox with yes/no question
Args:
title (str): MessageBox title
question (str): MessageBox qustion label
Returns:
bool: True if user clicked yes, False otherwise
"""
answer = QtWidgets.QMessageBox.question(
self,
title,
question,
QtWidgets.QMessageBox.StandardButtons(
QtWidgets.QMessageBox.Yes|QtWidgets.QMessageBox.No
)
)
return bool(answer == QtWidgets.QMessageBox.Yes)
def get_string_from_user(self, title, label):
"""Gets a string from user
Args:
title (str): Input dialog title
label (str): Input dialog label
Returns:
string: String given by user, empty string if user pressed cancel
"""
answer, ok_pressed = QtWidgets.QInputDialog.getText(self,
title,
label,
QtWidgets.QLineEdit.Normal,
""
)
if ok_pressed:
return answer
return ""
def show_messagebox(self, title, msg):
"""Shows a messagebox"""
alert = QtWidgets.QMessageBox()
alert.setWindowTitle(title)
alert.setText(msg)
alert.exec_()
def print_debug(msg):
"""Prints a debug message"""
if prefs.DEBUG:
print(msg)
|
StarcoderdataPython
|
8013146
|
from Rules import ASingleRule
from Utils import ColorUtil
from RectUtils import RectUtil
from Rules import TextValidator
from Utils import Constants
from ocr.OCRTextWrapper import OCRTextWrapper
from Utils import GroupUtil
from Utils import TextUtils
#/**
# *
# * This word only have one child view, and the child view is too small compare
# * with it"
# *
# * @author tuannguyen
# *
# */
#@Deprecated
class RuleWordOnlyOneTinyChild(ASingleRule.ASingleRule):
def __init__(self,dipCalculator, tesseractOCR, matLog, ocrs, views):
super().__init__(dipCalculator, tesseractOCR, matLog, ocrs, views)
# @Override
def accept(self,ocr):
# Count to see how may child view this word have, if it only have
# one child view. Test:
# (1) This word is not messy. Mean that it did not
# "intersect not include" with other views (except the child view)
# (2) If the child view is too small compare with it
foundChildRects = RectUtil.findChildRect(ocr.rect, self.mViews)
if len(foundChildRects) == 1:
rectView = foundChildRects[0]
newList = []
if rectView in newList:
newList.remove(rectView)
findIntersectNotIncludeRect = RectUtil.findIntersectNotIncludeRect(ocr, newList)
if len(findIntersectNotIncludeRect) == 1:
iRect = findIntersectNotIncludeRect[0]
if RectUtil.dimesionEqual(ocr, iRect, 0.2) and RectUtil.dimesionSmallerThan(rectView, ocr, 0.8):
# this is wrong, ignore this word
# DarkSalmon
# http:#www.w3schools.com/tags/ref_color_tryit.asp?color=DarkSalmon
tv = TextValidator.TextValidator(ocr, (233, 150, 122), False, "This word only have one child view, and the child view is too small compare with it")
return tv
return None
|
StarcoderdataPython
|
79185
|
# based on tut_mission_B737.py and Vehicle.py from Regional Jet Optimization
#
# Created: Aug 2014, SUAVE Team
# Modified: Aug 2017, SUAVE Team
# Modified: Jul 2018, geo
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
# Python Imports
import numpy as np
import pylab as plt
# SUAVE Imports
import SUAVE
from SUAVE.Core import Data, Units
from SUAVE.Methods.Propulsion.turbofan_sizing import turbofan_sizing
from SUAVE.Methods.Geometry.Two_Dimensional.Cross_Section.Propulsion import compute_turbofan_geometry
from SUAVE.Input_Output.Results import print_parasite_drag, \
print_compress_drag, \
print_engine_data, \
print_mission_breakdown, \
print_weight_breakdown
# ----------------------------------------------------------------------
# Main
# ----------------------------------------------------------------------
def main():
configs, analyses = full_setup()
simple_sizing(configs)
configs.finalize()
analyses.finalize()
# weight analysis
weights = analyses.configs.base.weights
breakdown = weights.evaluate()
weights.vehicle.mass_properties.center_of_gravity = SUAVE.Methods.Center_of_Gravity.compute_aircraft_center_of_gravity(weights.vehicle, nose_load_fraction=.06)
# mission analysis
mission = analyses.missions.base
results = mission.evaluate()
CM = results.conditions.cruise.stability.static.CM[0][0]
cm0 = results.conditions.cruise.stability.static.cm0[0][0]
cm_alpha = results.conditions.cruise.stability.static.cm_alpha[0][0]
cn_beta = results.conditions.cruise.stability.static.cn_beta[0][0]
static_margin = results.conditions.cruise.stability.static.static_margin[0][0]
# print weight breakdown
print_weight_breakdown(configs.base,filename = 'E170_weight_breakdown.dat')
# print engine data into file
print_engine_data(configs.base,filename = 'E170_engine_data.dat')
# print parasite drag data into file
# define reference condition for parasite drag
ref_condition = Data()
ref_condition.mach_number = 0.3
ref_condition.reynolds_number = 12e6
print_parasite_drag(ref_condition,configs.cruise,analyses,'E170_parasite_drag.dat')
# print compressibility drag data into file
print_compress_drag(configs.cruise,analyses,filename = 'E170_compress_drag.dat')
# print mission breakdown
print_mission_breakdown(results,filename='E170_mission_breakdown.dat')
# plt the old results
plot_mission(results)
return
# ----------------------------------------------------------------------
# Analysis Setup
# ----------------------------------------------------------------------
def full_setup():
# vehicle data
vehicle = vehicle_setup()
configs = configs_setup(vehicle)
# vehicle analyses
configs_analyses = analyses_setup(configs)
# mission analyses
mission = mission_setup(configs_analyses)
missions_analyses = missions_setup(mission)
analyses = SUAVE.Analyses.Analysis.Container()
analyses.configs = configs_analyses
analyses.missions = missions_analyses
return configs, analyses
# ----------------------------------------------------------------------
# Define the Vehicle Analyses
# ----------------------------------------------------------------------
def analyses_setup(configs):
analyses = SUAVE.Analyses.Analysis.Container()
# build a base analysis for each config
for tag,config in configs.items():
analysis = base_analysis(config)
analyses[tag] = analysis
return analyses
def base_analysis(vehicle):
# ------------------------------------------------------------------
# Initialize the Analyses
# ------------------------------------------------------------------
analyses = SUAVE.Analyses.Vehicle()
# ------------------------------------------------------------------
# Basic Geometry Relations
sizing = SUAVE.Analyses.Sizing.Sizing()
sizing.features.vehicle = vehicle
analyses.append(sizing)
# ------------------------------------------------------------------
# Weights
weights = SUAVE.Analyses.Weights.Weights_Tube_Wing()
weights.vehicle = vehicle
analyses.append(weights)
# ------------------------------------------------------------------
# Aerodynamics Analysis
aerodynamics = SUAVE.Analyses.Aerodynamics.Fidelity_Zero()
aerodynamics.geometry = vehicle
analyses.append(aerodynamics)
# ------------------------------------------------------------------
# Stability Analysis
stability = SUAVE.Analyses.Stability.Fidelity_Zero()
stability.geometry = vehicle
analyses.append(stability)
# ------------------------------------------------------------------
# Planet Analysis
planet = SUAVE.Analyses.Planets.Planet()
analyses.append(planet)
# ------------------------------------------------------------------
# Atmosphere Analysis
atmosphere = SUAVE.Analyses.Atmospheric.US_Standard_1976()
atmosphere.features.planet = planet.features
analyses.append(atmosphere)
return analyses
# ----------------------------------------------------------------------
# Define the Vehicle
# ----------------------------------------------------------------------
def vehicle_setup():
# ------------------------------------------------------------------
# Initialize the Vehicle
# ------------------------------------------------------------------
vehicle = SUAVE.Vehicle()
vehicle.tag = 'Embraer_E190'
# ------------------------------------------------------------------
# Vehicle-level Properties
# ------------------------------------------------------------------
# mass properties
vehicle.mass_properties.max_takeoff = 38600. * Units.kg
vehicle.mass_properties.operating_empty = 21157. * Units.kg
vehicle.mass_properties.takeoff = 38600. * Units.kg
vehicle.mass_properties.max_zero_fuel = 30900. * Units.kg
vehicle.mass_properties.cargo = 0.0 * Units.kg
vehicle.mass_properties.max_payload = 9743.0 * Units.kg
vehicle.mass_properties.max_fuel = 9335.0 * Units.kg
vehicle.mass_properties.center_of_gravity = [14.85, 0, 0]
# envelope properties
vehicle.envelope.ultimate_load = 3.75
vehicle.envelope.limit_load = 2.50
# basic parameters
vehicle.reference_area = 72.72 * Units['meters**2']
vehicle.passengers = 72
vehicle.systems.control = "fully powered"
vehicle.systems.accessories = "medium range"
# ------------------------------------------------------------------
# Main Wing
# ------------------------------------------------------------------
wing = SUAVE.Components.Wings.Main_Wing()
wing.tag = 'main_wing'
wing.aspect_ratio = 8.6
wing.sweeps.quarter_chord = 23.0 * Units.deg # 22.5
wing.thickness_to_chord = 0.11
wing.taper = 0.28
wing.span_efficiency = 1.0 #
wing.spans.projected = 26.0 * Units.meter
wing.chords.root = 5.428 * Units.meter # 5.203
wing.chords.tip = 1.380 * Units.meter # 1.460
wing.chords.mean_aerodynamic = 3.806 * Units.meter
wing.areas.reference = 72.72 * Units['meters**2']
wing.areas.wetted = 2.0 * wing.areas.reference
wing.areas.exposed = 0.8 * wing.areas.wetted
wing.areas.affected = 0.6 * wing.areas.reference
wing.twists.root = 2.0 * Units.degrees
wing.twists.tip = 0.0 * Units.degrees
wing.origin = [10.36122,0,0] #
wing.vertical = False
wing.symmetric = True
wing.high_lift = True
wing.flaps.type = "double_slotted"
wing.flaps.chord = 0.280 * Units.meter #
wing.dynamic_pressure_ratio = 1.0
# add to vehicle
vehicle.append_component(wing)
# ------------------------------------------------------------------
# Horizontal Stabilizer
# ------------------------------------------------------------------
wing = SUAVE.Components.Wings.Wing()
wing.tag = 'horizontal_stabilizer'
wing.aspect_ratio = 4.3 #5.5
wing.sweeps.quarter_chord = 30.0 * Units.deg #34.5
wing.thickness_to_chord = 0.3707 #0.11
wing.taper = 0.11
wing.span_efficiency = 0.9 #
wing.spans.projected = 10.000 * Units.meter
wing.chords.root = 3.394 * Units.meter
wing.chords.tip = 1.258 * Units.meter
wing.chords.mean_aerodynamic = 2.4895 * Units.meter
wing.areas.reference = 23.25 * Units['meters**2']
wing.areas.wetted = 2.0 * wing.areas.reference
wing.areas.exposed = 0.8 * wing.areas.wetted
wing.areas.affected = 0.6 * wing.areas.reference
wing.twists.root = 2.0 * Units.degrees
wing.twists.tip = 2.0 * Units.degrees
wing.origin = [24.6,0,0]
wing.vertical = False
wing.symmetric = True
wing.dynamic_pressure_ratio = 0.9 #
# add to vehicle
vehicle.append_component(wing)
# ------------------------------------------------------------------
# Vertical Stabilizer
# ------------------------------------------------------------------
wing = SUAVE.Components.Wings.Wing()
wing.tag = 'vertical_stabilizer'
# equal to E190 data
wing.aspect_ratio = 1.7
wing.sweeps.quarter_chord = 35 * Units.deg
wing.thickness_to_chord = 0.11
wing.taper = 0.31
wing.span_efficiency = 0.9
wing.spans.projected = 5.270 * Units.meter
wing.chords.root = 4.70 * Units.meter
wing.chords.tip = 1.45 * Units.meter
wing.chords.mean_aerodynamic = 3.36 * Units.meter
wing.areas.reference = 16.0 * Units['meters**2']
wing.areas.wetted = 2.0 * wing.areas.reference
wing.areas.exposed = 0.8 * wing.areas.wetted
wing.areas.affected = 0.6 * wing.areas.reference
wing.twists.root = 0.0 * Units.degrees
wing.twists.tip = 0.0 * Units.degrees
wing.origin = [23.9,0,0]
wing.vertical = True
wing.symmetric = False
wing.dynamic_pressure_ratio = 1.0
# add to vehicle
vehicle.append_component(wing)
# ------------------------------------------------------------------
# Fuselage
# ------------------------------------------------------------------
fuselage = SUAVE.Components.Fuselages.Fuselage()
fuselage.tag = 'fuselage'
fuselage.number_coach_seats = vehicle.passengers
fuselage.seats_abreast = 4
fuselage.seat_pitch = 0.7455 #
fuselage.fineness.nose = 2.0 #
fuselage.fineness.tail = 3.0 #
fuselage.lengths.nose = 6.82 * Units.meter
fuselage.lengths.tail = 10.67 * Units.meter
fuselage.lengths.cabin = 18.23 * Units.meter
fuselage.lengths.total = 29.90 * Units.meter
fuselage.lengths.fore_space = 0. * Units.meter
fuselage.lengths.aft_space = 0. * Units.meter
fuselage.width = 2.955 * Units.meter
fuselage.heights.maximum = 3.361 * Units.meter
fuselage.areas.side_projected = 203.32 * Units['meters**2']
fuselage.areas.wetted = 277.96 * Units['meters**2']
fuselage.areas.front_projected = 31.2 * Units['meters**2'] # 8.0110
fuselage.effective_diameter = 3.18
fuselage.differential_pressure = 10**5 * Units.pascal
fuselage.heights.at_quarter_length = 3.35 * Units.meter
fuselage.heights.at_three_quarters_length = 3.35 * Units.meter
fuselage.heights.at_wing_root_quarter_chord = 3.50 * Units.meter
# add to vehicle
vehicle.append_component(fuselage)
# ------------------------------------------------------------------
# Turbofan Network
# ------------------------------------------------------------------
#instantiate the gas turbine network
turbofan = SUAVE.Components.Energy.Networks.Turbofan()
turbofan.tag = 'turbofan'
# setup
turbofan.number_of_engines = 2
turbofan.bypass_ratio = 5.0
turbofan.engine_length = 3.1 * Units.meter
turbofan.nacelle_diameter = 1.64395 * Units.meter
turbofan.origin = [[9.721, 3.984,-1],[9.721,-3.984,-1]] # meters
#compute engine areas
turbofan.areas.wetted = 1.1*np.pi*turbofan.nacelle_diameter*turbofan.engine_length
# working fluid
turbofan.working_fluid = SUAVE.Attributes.Gases.Air()
# ------------------------------------------------------------------
# Component 1 - Ram
# to convert freestream static to stagnation quantities
# instantiate
ram = SUAVE.Components.Energy.Converters.Ram()
ram.tag = 'ram'
# add to the network
turbofan.append(ram)
# ------------------------------------------------------------------
# Component 2 - Inlet Nozzle
# instantiate
inlet_nozzle = SUAVE.Components.Energy.Converters.Compression_Nozzle()
inlet_nozzle.tag = 'inlet_nozzle'
# setup
inlet_nozzle.polytropic_efficiency = 0.98
inlet_nozzle.pressure_ratio = 0.98
# add to network
turbofan.append(inlet_nozzle)
# ------------------------------------------------------------------
# Component 3 - Low Pressure Compressor
# instantiate
compressor = SUAVE.Components.Energy.Converters.Compressor()
compressor.tag = 'low_pressure_compressor'
# setup
compressor.polytropic_efficiency = 0.91
compressor.pressure_ratio = 1.9
# add to network
turbofan.append(compressor)
# ------------------------------------------------------------------
# Component 4 - High Pressure Compressor
# instantiate
compressor = SUAVE.Components.Energy.Converters.Compressor()
compressor.tag = 'high_pressure_compressor'
# setup
compressor.polytropic_efficiency = 0.91
compressor.pressure_ratio = 10.0
# add to network
turbofan.append(compressor)
# ------------------------------------------------------------------
# Component 5 - Low Pressure Turbine
# instantiate
turbine = SUAVE.Components.Energy.Converters.Turbine()
turbine.tag='low_pressure_turbine'
# setup
turbine.mechanical_efficiency = 0.99
turbine.polytropic_efficiency = 0.93
# add to network
turbofan.append(turbine)
# ------------------------------------------------------------------
# Component 6 - High Pressure Turbine
# instantiate
turbine = SUAVE.Components.Energy.Converters.Turbine()
turbine.tag='high_pressure_turbine'
# setup
turbine.mechanical_efficiency = 0.99
turbine.polytropic_efficiency = 0.93
# add to network
turbofan.append(turbine)
# ------------------------------------------------------------------
# Component 7 - Combustor
# instantiate
combustor = SUAVE.Components.Energy.Converters.Combustor()
combustor.tag = 'combustor'
# setup
combustor.efficiency = 0.99
combustor.alphac = 1.0
combustor.turbine_inlet_temperature = 1500 # K
combustor.pressure_ratio = 0.95
combustor.fuel_data = SUAVE.Attributes.Propellants.Jet_A()
# add to network
turbofan.append(combustor)
# ------------------------------------------------------------------
# Component 8 - Core Nozzle
# instantiate
nozzle = SUAVE.Components.Energy.Converters.Expansion_Nozzle()
nozzle.tag = 'core_nozzle'
# setup
nozzle.polytropic_efficiency = 0.95
nozzle.pressure_ratio = 0.99
# add to network
turbofan.append(nozzle)
# ------------------------------------------------------------------
# Component 9 - Fan Nozzle
# instantiate
nozzle = SUAVE.Components.Energy.Converters.Expansion_Nozzle()
nozzle.tag = 'fan_nozzle'
# setup
nozzle.polytropic_efficiency = 0.95
nozzle.pressure_ratio = 0.99
# add to network
turbofan.append(nozzle)
# ------------------------------------------------------------------
# Component 10 - Fan
# instantiate
fan = SUAVE.Components.Energy.Converters.Fan()
fan.tag = 'fan'
# setup
fan.polytropic_efficiency = 0.93
fan.pressure_ratio = 1.7
# add to network
turbofan.append(fan)
# ------------------------------------------------------------------
#Component 10 : thrust (to compute the thrust)
thrust = SUAVE.Components.Energy.Processes.Thrust()
thrust.tag ='compute_thrust'
#total design thrust (includes all the engines)
thrust.total_design = 52700.0 * Units.N #Newtons
#design sizing conditions
altitude = 35000.0*Units.ft
mach_number = 0.78
isa_deviation = 0.
#Engine setup for noise module
# add to network
turbofan.thrust = thrust
#size the turbofan
turbofan_sizing(turbofan,mach_number,altitude)
# add gas turbine network turbofan to the vehicle
vehicle.append_component(turbofan)
# ------------------------------------------------------------------
# ------------------------------------------------------------------
#now add weights objects
vehicle.landing_gear = SUAVE.Components.Landing_Gear.Landing_Gear()
vehicle.control_systems = SUAVE.Components.Physical_Component()
vehicle.electrical_systems = SUAVE.Components.Physical_Component()
vehicle.avionics = SUAVE.Components.Energy.Peripherals.Avionics()
vehicle.passenger_weights = SUAVE.Components.Physical_Component()
vehicle.furnishings = SUAVE.Components.Physical_Component()
vehicle.air_conditioner = SUAVE.Components.Physical_Component()
vehicle.fuel = SUAVE.Components.Physical_Component()
vehicle.apu = SUAVE.Components.Physical_Component()
vehicle.hydraulics = SUAVE.Components.Physical_Component()
vehicle.optionals = SUAVE.Components.Physical_Component()
vehicle.wings['vertical_stabilizer'].rudder = SUAVE.Components.Physical_Component()
# ------------------------------------------------------------------
# Vehicle Definition Complete
# ------------------------------------------------------------------
return vehicle
# ----------------------------------------------------------------------
# Define the Configurations
# ---------------------------------------------------------------------
def configs_setup(vehicle):
# ------------------------------------------------------------------
# Initialize Configurations
# ------------------------------------------------------------------
configs = SUAVE.Components.Configs.Config.Container()
base_config = SUAVE.Components.Configs.Config(vehicle)
base_config.tag = 'base'
configs.append(base_config)
# ------------------------------------------------------------------
# Cruise Configuration
# ------------------------------------------------------------------
config = SUAVE.Components.Configs.Config(base_config)
config.tag = 'cruise'
configs.append(config)
config.maximum_lift_coefficient = 1.2
# ------------------------------------------------------------------
# Cruise with Spoilers Configuration
# ------------------------------------------------------------------
config = SUAVE.Components.Configs.Config(base_config)
config.tag = 'cruise_spoilers'
configs.append(config)
config.maximum_lift_coefficient = 1.2
# ------------------------------------------------------------------
# Takeoff Configuration
# ------------------------------------------------------------------
config = SUAVE.Components.Configs.Config(base_config)
config.tag = 'takeoff'
config.wings['main_wing'].flaps.angle = 20. * Units.deg
config.wings['main_wing'].slats.angle = 25. * Units.deg
config.V2_VS_ratio = 1.21
config.maximum_lift_coefficient = 2.
configs.append(config)
# ------------------------------------------------------------------
# Landing Configuration
# ------------------------------------------------------------------
config = SUAVE.Components.Configs.Config(base_config)
config.tag = 'landing'
config.wings['main_wing'].flaps_angle = 30. * Units.deg
config.wings['main_wing'].slats_angle = 25. * Units.deg
config.Vref_VS_ratio = 1.23
config.maximum_lift_coefficient = 2.
configs.append(config)
# ------------------------------------------------------------------
# Short Field Takeoff Configuration
# ------------------------------------------------------------------
config = SUAVE.Components.Configs.Config(base_config)
config.tag = 'short_field_takeoff'
config.wings['main_wing'].flaps.angle = 20. * Units.deg
config.wings['main_wing'].slats.angle = 25. * Units.deg
# config.V2_VS_ratio = 1.21
# config.maximum_lift_coefficient = 2.
configs.append(config)
return configs
def simple_sizing(configs):
base = configs.base
base.pull_base()
# zero fuel weight
base.mass_properties.max_zero_fuel = 0.9 * base.mass_properties.max_takeoff
# wing areas
for wing in base.wings:
wing.areas.wetted = 2.0 * wing.areas.reference
wing.areas.exposed = 0.8 * wing.areas.wetted
wing.areas.affected = 0.6 * wing.areas.wetted
# diff the new data
base.store_diff()
# ------------------------------------------------------------------
# Landing Configuration
# ------------------------------------------------------------------
landing = configs.landing
# make sure base data is current
landing.pull_base()
# landing weight
landing.mass_properties.landing = 0.85 * base.mass_properties.takeoff
# diff the new data
landing.store_diff()
return
# ----------------------------------------------------------------------
# Define the Mission
# ----------------------------------------------------------------------
def mission_setup(analyses):
# ------------------------------------------------------------------
# Initialize the Mission
# ------------------------------------------------------------------
mission = SUAVE.Analyses.Mission.Sequential_Segments()
mission.tag = 'the_mission'
#airport
airport = SUAVE.Attributes.Airports.Airport()
airport.altitude = 0.0 * Units.ft
airport.delta_isa = 0.0
airport.atmosphere = SUAVE.Analyses.Atmospheric.US_Standard_1976()
mission.airport = airport
# unpack Segments module
Segments = SUAVE.Analyses.Mission.Segments
# base segment
base_segment = Segments.Segment()
atmosphere=SUAVE.Attributes.Atmospheres.Earth.US_Standard_1976()
planet = SUAVE.Attributes.Planets.Earth()
# ------------------------------------------------------------------
# First Climb Segment: Constant Speed, Constant Rate
# ------------------------------------------------------------------
segment = Segments.Climb.Constant_Speed_Constant_Rate()
segment.tag = "climb_1"
# connect vehicle configuration
segment.analyses.extend( analyses.base )
# define segment attributes
segment.atmosphere = atmosphere
segment.planet = planet
segment.altitude_start = 0.0 * Units.km
segment.altitude_end = 3.048 * Units.km
segment.air_speed = 138.0 * Units['m/s']
segment.climb_rate = 3000. * Units['ft/min']
# add to misison
mission.append_segment(segment)
# ------------------------------------------------------------------
# Second Climb Segment: Constant Speed, Constant Rate
# ------------------------------------------------------------------
segment = Segments.Climb.Constant_Speed_Constant_Rate()
segment.tag = "climb_2"
# connect vehicle configuration
segment.analyses.extend( analyses.cruise )
# segment attributes
segment.atmosphere = atmosphere
segment.planet = planet
segment.altitude_end = 3.657 * Units.km
segment.air_speed = 168.0 * Units['m/s']
segment.climb_rate = 2500. * Units['ft/min']
# add to mission
mission.append_segment(segment)
# ------------------------------------------------------------------
# Third Climb Segment: Constant Speed, Constant Climb Rate
# ------------------------------------------------------------------
segment = Segments.Climb.Constant_Speed_Constant_Rate()
segment.tag = "climb_3"
# connect vehicle configuration
segment.analyses.extend( analyses.cruise )
# segment attributes
segment.atmosphere = atmosphere
segment.planet = planet
segment.altitude_end = 25000. * Units.ft
segment.air_speed = 200.0 * Units['m/s']
segment.climb_rate = 1800. * Units['ft/min']
# add to mission
mission.append_segment(segment)
# ------------------------------------------------------------------
# Fourth Climb Segment: Constant Speed, Constant Rate
# ------------------------------------------------------------------
segment = Segments.Climb.Constant_Speed_Constant_Rate()
segment.tag = "climb_4"
# connect vehicle configuration
segment.analyses.extend( analyses.cruise )
# segment attributes
segment.atmosphere = atmosphere
segment.planet = planet
segment.altitude_end = 32000. * Units.ft
segment.air_speed = 230.0* Units['m/s']
segment.climb_rate = 900. * Units['ft/min']
# add to mission
mission.append_segment(segment)
# ------------------------------------------------------------------
# Fifth Climb Segment: Constant Speed, Constant Rate
# ------------------------------------------------------------------
segment = Segments.Climb.Constant_Speed_Constant_Rate()
segment.tag = "climb_5"
# connect vehicle configuration
segment.analyses.extend( analyses.cruise )
# segment attributes
segment.atmosphere = atmosphere
segment.planet = planet
segment.altitude_end = 37000. * Units.ft
segment.air_speed = 230.0 * Units['m/s']
segment.climb_rate = 300. * Units['ft/min']
# add to mission
mission.append_segment(segment)
# ------------------------------------------------------------------
# Cruise Segment: Constant Speed, Constant Altitude
# ------------------------------------------------------------------
segment = Segments.Cruise.Constant_Speed_Constant_Altitude()
segment.tag = "cruise"
# connect vehicle configuration
segment.analyses.extend( analyses.cruise )
# segment attributes
segment.atmosphere = atmosphere
segment.planet = planet
segment.air_speed = 450. * Units.knots
segment.distance = 2050. * Units.nmi
# add to mission
mission.append_segment(segment)
# ------------------------------------------------------------------
# First Descent Segment: Constant Speed, Constant Rate
# ------------------------------------------------------------------
segment = Segments.Descent.Constant_Speed_Constant_Rate()
segment.tag = "descent_1"
# connect vehicle configuration
segment.analyses.extend( analyses.cruise )
# segment attributes
segment.atmosphere = atmosphere
segment.planet = planet
segment.altitude_end = 9.31 * Units.km
segment.air_speed = 440.0 * Units.knots
segment.descent_rate = 2600. * Units['ft/min']
# add to mission
mission.append_segment(segment)
# ------------------------------------------------------------------
# Second Descent Segment: Constant Speed, Constant Rate
# ------------------------------------------------------------------
segment = Segments.Descent.Constant_Speed_Constant_Rate()
segment.tag = "descent_2"
# connect vehicle configuration
segment.analyses.extend( analyses.cruise_spoilers )
# segment attributes
segment.atmosphere = atmosphere
segment.planet = planet
segment.altitude_end = 3.657 * Units.km
segment.air_speed = 365.0 * Units.knots
segment.descent_rate = 2300. * Units['ft/min']
# append to mission
mission.append_segment(segment)
# ------------------------------------------------------------------
# Third Descent Segment: Constant Speed, Constant Rate
# ------------------------------------------------------------------
segment = Segments.Descent.Constant_Speed_Constant_Rate()
segment.tag = "descent_3"
# connect vehicle configuration
segment.analyses.extend( analyses.cruise )
# segment attributes
segment.atmosphere = atmosphere
segment.planet = planet
segment.altitude_end = 0.0 * Units.km
segment.air_speed = 250.0 * Units.knots
segment.descent_rate = 1500. * Units['ft/min']
# append to mission
mission.append_segment(segment)
# ------------------------------------------------------------------
# Mission definition complete
# ------------------------------------------------------------------
#------------------------------------------------------------------
### Reserve mission
#------------------------------------------------------------------
# ------------------------------------------------------------------
# First Climb Segment: Constant Speed, Constant Throttle
# ------------------------------------------------------------------
segment = Segments.Climb.Constant_Speed_Constant_Rate()
segment.tag = "reserve_climb"
# connect vehicle configuration
segment.analyses.extend( analyses.base )
# define segment attributes
segment.atmosphere = atmosphere
segment.planet = planet
segment.altitude_start = 0.0 * Units.km
segment.altitude_end = 15000. * Units.ft
segment.air_speed = 138.0 * Units['m/s']
segment.climb_rate = 3000. * Units['ft/min']
# add to misison
mission.append_segment(segment)
# ------------------------------------------------------------------
# Cruise Segment: constant speed, constant altitude
# ------------------------------------------------------------------
segment = Segments.Cruise.Constant_Mach_Constant_Altitude(base_segment)
segment.tag = "reserve_cruise"
segment.analyses.extend( analyses.cruise )
segment.mach = 0.5
segment.distance = 140.0 * Units.nautical_mile
mission.append_segment(segment)
# ------------------------------------------------------------------
# Loiter Segment: constant mach, constant time
# ------------------------------------------------------------------
segment = Segments.Cruise.Constant_Mach_Constant_Altitude_Loiter(base_segment)
segment.tag = "reserve_loiter"
segment.analyses.extend( analyses.cruise )
segment.mach = 0.5
segment.time = 30.0 * Units.minutes
mission.append_segment(segment)
# ------------------------------------------------------------------
# Final Descent Segment: consant speed, constant segment rate
# ------------------------------------------------------------------
segment = Segments.Descent.Linear_Mach_Constant_Rate(base_segment)
segment.tag = "reserve_descent_1"
segment.analyses.extend( analyses.landing )
segment.altitude_end = 0.0 * Units.km
segment.descent_rate = 3.0 * Units['m/s']
segment.mach_end = 0.24
segment.mach_start = 0.3
# append to mission
mission.append_segment(segment)
#------------------------------------------------------------------
### Reserve mission completed
#------------------------------------------------------------------
return mission
def missions_setup(base_mission):
# the mission container
missions = SUAVE.Analyses.Mission.Mission.Container()
# ------------------------------------------------------------------
# Base Mission
# ------------------------------------------------------------------
missions.base = base_mission
return missions
# ----------------------------------------------------------------------
# Plot Mission
# ----------------------------------------------------------------------
def plot_mission(results,line_style='bo-'):
axis_font = {'fontname':'Arial', 'size':'14'}
# ------------------------------------------------------------------
# Aerodynamics
# ------------------------------------------------------------------
fig = plt.figure("Aerodynamic Forces",figsize=(8,6))
for segment in results.segments.values():
time = segment.conditions.frames.inertial.time[:,0] / Units.min
Thrust = segment.conditions.frames.body.thrust_force_vector[:,0] / Units.lbf
eta = segment.conditions.propulsion.throttle[:,0]
axes = fig.add_subplot(2,1,1)
axes.plot( time , Thrust , line_style )
axes.set_ylabel('Thrust (lbf)',axis_font)
axes.grid(True)
axes = fig.add_subplot(2,1,2)
axes.plot( time , eta , line_style )
axes.set_xlabel('Time (min)',axis_font)
axes.set_ylabel('Throttle',axis_font)
axes.grid(True)
plt.savefig("B737_engine.pdf")
plt.savefig("B737_engine.png")
# ------------------------------------------------------------------
# Aerodynamics 2
# ------------------------------------------------------------------
fig = plt.figure("Aerodynamic Coefficients",figsize=(8,10))
for segment in results.segments.values():
time = segment.conditions.frames.inertial.time[:,0] / Units.min
CLift = segment.conditions.aerodynamics.lift_coefficient[:,0]
CDrag = segment.conditions.aerodynamics.drag_coefficient[:,0]
aoa = segment.conditions.aerodynamics.angle_of_attack[:,0] / Units.deg
l_d = CLift/CDrag
axes = fig.add_subplot(3,1,1)
axes.plot( time , CLift , line_style )
axes.set_ylabel('Lift Coefficient',axis_font)
axes.grid(True)
axes = fig.add_subplot(3,1,2)
axes.plot( time , l_d , line_style )
axes.set_ylabel('L/D',axis_font)
axes.grid(True)
axes = fig.add_subplot(3,1,3)
axes.plot( time , aoa , 'ro-' )
axes.set_xlabel('Time (min)',axis_font)
axes.set_ylabel('AOA (deg)',axis_font)
axes.grid(True)
plt.savefig("B737_aero.pdf")
plt.savefig("B737_aero.png")
# ------------------------------------------------------------------
# Aerodynamics 2
# ------------------------------------------------------------------
fig = plt.figure("Drag Components",figsize=(8,10))
axes = plt.gca()
for i, segment in enumerate(results.segments.values()):
time = segment.conditions.frames.inertial.time[:,0] / Units.min
drag_breakdown = segment.conditions.aerodynamics.drag_breakdown
cdp = drag_breakdown.parasite.total[:,0]
cdi = drag_breakdown.induced.total[:,0]
cdc = drag_breakdown.compressible.total[:,0]
cdm = drag_breakdown.miscellaneous.total[:,0]
cd = drag_breakdown.total[:,0]
if line_style == 'bo-':
axes.plot( time , cdp , 'ko-', label='CD parasite' )
axes.plot( time , cdi , 'bo-', label='CD induced' )
axes.plot( time , cdc , 'go-', label='CD compressibility' )
axes.plot( time , cdm , 'yo-', label='CD miscellaneous' )
axes.plot( time , cd , 'ro-', label='CD total' )
if i == 0:
axes.legend(loc='upper center')
else:
axes.plot( time , cdp , line_style )
axes.plot( time , cdi , line_style )
axes.plot( time , cdc , line_style )
axes.plot( time , cdm , line_style )
axes.plot( time , cd , line_style )
axes.set_xlabel('Time (min)')
axes.set_ylabel('CD')
axes.grid(True)
plt.savefig("B737_drag.pdf")
plt.savefig("B737_drag.png")
# ------------------------------------------------------------------
# Altitude, sfc, vehicle weight
# ------------------------------------------------------------------
fig = plt.figure("Altitude_sfc_weight",figsize=(8,10))
for segment in results.segments.values():
time = segment.conditions.frames.inertial.time[:,0] / Units.min
aoa = segment.conditions.aerodynamics.angle_of_attack[:,0] / Units.deg
mass = segment.conditions.weights.total_mass[:,0] / Units.lb
altitude = segment.conditions.freestream.altitude[:,0] / Units.ft
mdot = segment.conditions.weights.vehicle_mass_rate[:,0]
thrust = segment.conditions.frames.body.thrust_force_vector[:,0]
sfc = (mdot / Units.lb) / (thrust /Units.lbf) * Units.hr
axes = fig.add_subplot(3,1,1)
axes.plot( time , altitude , line_style )
axes.set_ylabel('Altitude (ft)',axis_font)
axes.grid(True)
axes = fig.add_subplot(3,1,3)
axes.plot( time , sfc , line_style )
axes.set_xlabel('Time (min)',axis_font)
axes.set_ylabel('sfc (lb/lbf-hr)',axis_font)
axes.grid(True)
axes = fig.add_subplot(3,1,2)
axes.plot( time , mass , 'ro-' )
axes.set_ylabel('Weight (lb)',axis_font)
axes.grid(True)
plt.savefig("B737_mission.pdf")
plt.savefig("B737_mission.png")
# ------------------------------------------------------------------
# Velocities
# ------------------------------------------------------------------
fig = plt.figure("Velocities",figsize=(8,10))
for segment in results.segments.values():
time = segment.conditions.frames.inertial.time[:,0] / Units.min
Lift = -segment.conditions.frames.wind.lift_force_vector[:,2]
Drag = -segment.conditions.frames.wind.drag_force_vector[:,0] / Units.lbf
Thrust = segment.conditions.frames.body.thrust_force_vector[:,0] / Units.lb
velocity = segment.conditions.freestream.velocity[:,0]
pressure = segment.conditions.freestream.pressure[:,0]
density = segment.conditions.freestream.density[:,0]
EAS = velocity * np.sqrt(density/1.225)
mach = segment.conditions.freestream.mach_number[:,0]
axes = fig.add_subplot(3,1,1)
axes.plot( time , velocity / Units.kts, line_style )
axes.set_ylabel('velocity (kts)',axis_font)
axes.grid(True)
axes = fig.add_subplot(3,1,2)
axes.plot( time , EAS / Units.kts, line_style )
axes.set_xlabel('Time (min)',axis_font)
axes.set_ylabel('Equivalent Airspeed',axis_font)
axes.grid(True)
axes = fig.add_subplot(3,1,3)
axes.plot( time , mach , line_style )
axes.set_xlabel('Time (min)',axis_font)
axes.set_ylabel('Mach',axis_font)
axes.grid(True)
return
if __name__ == '__main__':
main()
plt.show()
|
StarcoderdataPython
|
1949585
|
<reponame>hfhchan/Cantonese
"""
Created at 2021/1/16 16:23
Last update at 2021/6/6 9:11
The interpret for Cantonese
"""
import re
import sys
import io
import os
from pygame.constants import KEYDOWN
"""
Get the Cantonese Token List
"""
def cantonese_token(code : str) -> list:
keywords = r'(?P<keywords>(畀我睇下){1}|(点样先){1}|(收工){1}|(喺){1}|(定){1}|(老作一下){1}|(起底){1}|' \
r'(讲嘢){1}|(咩系){1}|(唔系){1}|(系){1})|(如果){1}|(嘅话){1}|(->){1}|({){1}|(}){1}|(同埋){1}|(咩都唔做){1}|' \
r'(落操场玩跑步){1}|(\$){1}|(用下){1}|(使下){1}|(要做咩){1}|(搞掂){1}|(就){1}|(谂下){1}|(佢嘅){1}|' \
r'(玩到){1}|(为止){1}|(返转头){1}|(执嘢){1}|(揾到){1}|(执手尾){1}|(掟个){1}|(来睇下){1}|' \
r'(从){1}|(行到){1}|(行晒){1}|(佢个老豆叫){1}|(佢识得){1}|(明白未啊){1}|(落Order){1}|(饮茶先啦){1}|' \
r'(拍住上){1}|(係){1}|(比唔上){1}|(或者){1}|(辛苦晒啦){1}|(同我躝)|(唔啱){1}|(啱){1}|(冇){1}|' \
r'(有条仆街叫){1}|(顶你){1}|(丢你){1}|(嗌){1}|(过嚟估下){1}|(佢有啲咩){1}|(自己嘅){1}|(下){1}|(\@){1}'
kw_get_code = re.findall(re.compile(r'[(](.*?)[)]', re.S), keywords[13 : ])
keywords_gen_code = ["print", "endprint", "exit", "in", "or", "turtle_begin", "gettype",
"assign", "class", "is not", "is", "if", "then", "do", "begin", "end", "and", "pass",
"while_do", "$", "call", "import", "funcbegin", "funcend", "is", "assert", "assign",
"while", "whi_end", "return", "try", "except", "finally", "raise", "endraise",
"from", "to", "endfor", "extend", "method", "endclass", "cmd", "break", "ass_list", "is",
"<", "or", "exit", "exit", "False", "True", "None", "stackinit", "push", "pop", "model",
"mod_new", "class_init", "self.", "call_begin", "get_value"
]
num = r'(?P<num>\d+)'
ID = r'(?P<ID>[a-zA-Z_][a-zA-Z_0-9]*)'
op = r'(?P<op>(相加){1}|(加){1}|(减){1}|(乘){1}|(整除){1}|(除){1}|(余){1}|(异或){1}|(取反){1}|(左移){1}|(右移){1}'\
r'(与){1}|(或){1})'
op_get_code = re.findall(re.compile(r'[(](.*?)[)]', re.S), op[5 : ])
op_gen_code = ["矩阵.matrix_addition", "+", "-", "*", "//", "/", "%", "^", "~", "<<", ">>",
"&", "|"]
string = r'(?P<string>\"([^\\\"]|\\.)*\")'
expr = r'(?P<expr>[|](.*?)[|])'
callfunc = r'(?P<callfunc>[&](.*?)[)])'
build_in_funcs = r'(?P<build_in_funcs>(瞓){1}|(加啲){1}|(摞走){1}|(嘅长度){1}|(阵先){1}|' \
r'(畀你){1}|(散水){1})'
bif_get_code = re.findall(re.compile(r'[(](.*?)[)]', re.S), build_in_funcs[19 :])
bif_gen_code = ["sleep", "append", "remove", ".__len__()", "2", "input", "clear"]
patterns = re.compile('|'.join([keywords, ID, num, string, expr, callfunc, build_in_funcs, op]))
def make_rep(list1 : list, list2 : list) -> list:
assert len(list1) == len(list2)
ret = []
for i in range(len(list1)):
ret.append([list1[i], list2[i]])
return ret
def trans(lastgroup : str, code : str, rep : str) -> str:
if lastgroup != 'string' and lastgroup != 'ID':
if lastgroup == 'expr':
p = re.match(r'\|(.*)同(.*)有几衬\|', code, re.M|re.I)
if p:
code = " corr(" + p.group(1) +", " + p.group(2) + ") "
for r in rep:
code = code.replace(r[0], r[1])
return code
for match in re.finditer(patterns, code):
group = match.group()
group = trans(match.lastgroup, group, make_rep(kw_get_code, keywords_gen_code))
group = trans(match.lastgroup, group, make_rep(bif_get_code, bif_gen_code))
group = trans(match.lastgroup, group, make_rep(op_get_code, op_gen_code))
yield [match.lastgroup, group]
"""
AST node for the Token List
"""
def node_print_new(Node : list, arg) -> None:
"""
Node_print
|
arg
"""
Node.append(["node_print", arg])
def node_sleep_new(Node : list, arg) -> None:
"""
Node_sleep
|
arg
"""
Node.append(["node_sleep", arg])
def node_break_new(Node : list) -> None:
Node.append(["node_break"])
def node_exit_new(Node : list) -> None:
"""
Node_exit
|
arg
"""
Node.append(["node_exit"])
def node_let_new(Node : list, key ,value) -> None:
"""
Node_let
/ \
key value
"""
Node.append(["node_let", key, value])
def node_if_new(Node : list, cond, stmt) -> None:
"""
Node_if
/ \
cond stmt
"""
Node.append(["node_if", cond, stmt])
def node_elif_new(Node : list, cond, stmt) -> None:
"""
Node_elif
/ \
cond stmt
"""
Node.append(["node_elif", cond, stmt])
def node_else_new(Node : list, stmt) -> None:
"""
Node_else
|
stmt
"""
Node.append(["node_else", stmt])
def node_loop_new(Node : list, cond, stmt) -> None:
"""
Node_loop
/ \
cond stmt
"""
Node.append(["node_loop", cond, stmt])
def node_func_new(Node : list, func_name, args, body) -> None:
"""
Node_fundef
/ | \
name args body
"""
Node.append(["node_fundef", func_name, args, body])
def node_call_new(Node : list, func_name) -> None:
"""
Node_call
|
name
"""
Node.append(["node_call", func_name])
def node_build_in_func_call_new(Node : list, var, func_name, args) -> None:
"""
Node_bcall
/ \
name args
"""
Node.append(["node_bcall", var, func_name, args])
def node_import_new(Node : list, name) -> None:
"""
Node_import
|
name
"""
Node.append(["node_import", name])
def node_return_new(Node : list, v) -> None:
"""
Node_return
|
value
"""
Node.append(["node_return", v])
def node_try_new(Node : list, try_part) -> None:
"""
Node_try
|
stmt
"""
Node.append(["node_try", try_part])
def node_except_new(Node : list, _except, except_part) -> None:
"""
Node_except
/ \
exception stmt
"""
Node.append(["node_except", _except, except_part])
def node_finally_new(Node : list, finally_part) -> None:
"""
Node_finally
|
stmt
"""
Node.append(["node_finally", finally_part])
def node_raise_new(Node : list, execption) -> None:
"""
Node_raise
|
exception
"""
Node.append(["node_raise", execption])
def node_for_new(Node : list, iterating_var, sequence, stmt_part) -> None:
"""
Node_for
/ | \
iter seq stmt
"""
Node.append(["node_for", iterating_var, sequence, stmt_part])
def node_turtle_new(Node : list, instruction) -> None:
Node.append(["node_turtle", instruction])
def node_assert_new(Node : list, args) -> None:
Node.append(["node_assert", args])
def node_model_new(Node : list, model, datatest) -> None:
"""
Node_model
/ \
model dataset
"""
Node.append(["node_model", model, datatest])
def node_gettype_new(Node : list, value) -> None:
Node.append(["node_gettype", value])
def node_class_new(Node : list, name, extend, method) -> None:
"""
Node_class
/ | \
name extend method
"""
Node.append(["node_class", name, extend, method])
def node_attribute_new(Node : list, attr_list) -> None:
Node.append(["node_attr", attr_list])
def node_method_new(Node : list, name, args, stmt) -> None:
"""
Node_method
/ | \
name args stmt
"""
Node.append(["node_method", name, args, stmt])
def node_cmd_new(Node : list, cmd) -> None:
"""
Node_cmd
|
conmmand
"""
Node.append(["node_cmd", cmd])
def node_list_new(Node : list, name, list) -> None:
"""
Node_list
/ \
name list
"""
Node.append(["node_list", name, list])
def node_stack_new(Node : list, name) -> None:
"""
Node_stack
|
name
"""
Node.append(["node_stack", name])
"""
Parser for cantonese Token List
"""
class Parser(object):
def __init__(self, tokens, Node):
self.tokens = tokens
self.pos = 0
self.Node = Node
def syntax_check(self, token, tag):
if tag == "value" and self.get(0)[1] == token:
return
elif tag == "type" and self.get(0)[0] == token:
return
else:
raise "Syntax error!"
def get(self, offset):
if self.pos + offset >= len(self.tokens):
return ["", ""]
return self.tokens[self.pos + offset]
def get_value(self, token):
if token[0] == 'expr':
# If is expr, Remove the "|"
token[1] = token[1][1 : -1]
if token[0] == 'callfunc':
# If is call func, Remove the '&'
token[1] = token[1][1 :]
return token
def last(self, offset):
return self.tokens[self.pos - offset]
def skip(self, offset):
self.pos += offset
def match(self, name):
if self.get(0)[1] == name:
self.pos += 1
return True
else:
return False
def match_type(self, type):
if self.get(0)[0] == type:
self.pos += 1
return True
else:
return False
# TODO: Add error check
def parse(self):
while True:
if self.match("print"):
node_print_new(self.Node, self.get_value(self.get(0)))
self.skip(2) # Skip the args and end_print
elif self.match("sleep"):
node_sleep_new(self.Node, self.get(0))
self.skip(1)
elif self.match("exit"):
node_exit_new(self.Node)
self.skip(1)
elif self.match("assign") and self.get(1)[1] == 'is':
node_let_new(self.Node, self.get_value(self.get(0)), self.get_value(self.get(2)))
self.skip(3)
elif self.match("if"):
cond = self.get_value(self.get(0))
self.skip(4) # Skip the "then", "do", "begin"
if_case_end = 0 # The times of case "end"
if_should_end = 1
node_if = []
stmt_if = []
while if_case_end != if_should_end and self.pos < len(self.tokens):
if self.get(0)[1] == "if":
if_should_end += 1
stmt_if.append(self.tokens[self.pos])
self.pos += 1
elif self.get(0)[1] == "end":
if_case_end += 1
if if_case_end != if_should_end:
stmt_if.append(self.tokens[self.pos])
self.pos += 1
elif self.get(0)[1] == "or" and self.get(1)[1] == "is":
if_should_end += 1
stmt_if.append(self.tokens[self.pos])
self.pos += 1
else:
stmt_if.append(self.tokens[self.pos])
self.pos += 1
Parser(stmt_if, node_if).parse()
node_if_new(self.Node, cond, node_if)
elif self.match("or") and self.match("is"): # case "定系" elif
cond = self.get_value(self.get(0))
self.skip(4) # Skip the "then", "do", "begin"
elif_case_end = 0 # The times of case "end"
elif_should_end = 1
node_elif = []
stmt_elif = []
while elif_case_end != elif_should_end and self.pos < len(self.tokens):
if self.get(0)[1] == "if":
elif_should_end += 1
stmt_elif.append(self.tokens[self.pos])
self.pos += 1
elif self.get(0)[1] == "end":
elif_case_end += 1
if elif_case_end != elif_should_end:
stmt_elif.append(self.tokens[self.pos])
self.pos += 1
elif self.get(0)[1] == "or" and self.get(1)[1] == "is":
elif_should_end += 1
stmt_elif.append(self.tokens[self.pos])
self.pos += 1
else:
stmt_elif.append(self.tokens[self.pos])
self.pos += 1
Parser(stmt_elif, node_elif).parse()
node_elif_new(self.Node, cond, node_elif)
elif self.match("is not"): # case "唔系" else
self.skip(3) # Skip the "then", "do", "begin"
else_case_end = 0 # The times of case "end"
else_should_end = 1
node_else = []
stmt_else = []
while else_case_end != else_should_end and self.pos < len(self.tokens):
if self.get(0)[1] == "if":
else_should_end += 1
stmt_else.append(self.tokens[self.pos])
self.pos += 1
elif self.get(0)[1] == "end":
else_case_end += 1
if else_case_end != else_should_end:
stmt_else.append(self.tokens[self.pos])
self.pos += 1
elif self.get(0)[1] == "or" and self.get(1)[1] == "is":
else_should_end += 1
stmt_else.append(self.tokens[self.pos])
self.pos += 1
else:
stmt_else.append(self.tokens[self.pos])
self.pos += 1
Parser(stmt_else, node_else).parse()
node_else_new(self.Node, node_else)
elif self.match("while_do"):
stmt = []
while self.tokens[self.pos][1] != "while":
stmt.append(self.tokens[self.pos])
self.pos += 1
node_while = []
self.skip(1)
cond = self.get_value(self.get(0))
Parser(stmt, node_while).parse()
node_loop_new(self.Node, cond, node_while)
self.skip(2) # Skip the "end"
elif self.match("$"): # Case "function"
if self.get(1)[0] == 'expr':
func_name = self.get_value(self.get(0))
args = self.get_value(self.get(1))
self.skip(3)
func_stmt = []
while self.tokens[self.pos][1] != "funcend":
func_stmt.append(self.tokens[self.pos])
self.pos += 1
node_func = []
Parser(func_stmt, node_func).parse()
node_func_new(self.Node, func_name, args, node_func)
self.skip(1) # Skip the funcend
else:
func_name = self.get_value(self.get(0))
self.skip(2) # Skip the funcbegin
func_stmt = []
while self.tokens[self.pos][1] != "funcend":
func_stmt.append(self.tokens[self.pos])
self.pos += 1
node_func = []
Parser(func_stmt, node_func).parse()
node_func_new(self.Node, func_name, "None", node_func)
self.skip(1) # Skip the funcend
elif self.match("turtle_begin"):
self.skip(2) # Skip the "do", "begin"
turtle_inst = []
while self.tokens[self.pos][1] != "end":
turtle_inst.append(self.get_value(self.tokens[self.pos])[1])
self.pos += 1
node_turtle_new(self.Node, turtle_inst)
self.skip(1)
elif self.match("call"):
node_call_new(self.Node, self.get_value(self.get(0)))
self.skip(1)
elif self.match("import"):
node_import_new(self.Node, self.get_value(self.get(0)))
self.skip(1)
elif self.match_type("expr") or self.match_type("ID"):
if self.match("from"):
iterating_var = self.get_value(self.get(-2))
seq = "(" + str(self.get_value(self.get(0))[1]) + "," \
+ str(self.get_value(self.get(2))[1]) + ")"
self.skip(3)
node_for = []
for_stmt = []
for_case_end = 0
for_should_end = 1
while for_should_end != for_case_end and self.pos < len(self.tokens):
if (self.get(0)[0] == "expr" or self.get(0)[0] == "ID") \
and self.get(1)[1] == "from":
for_should_end += 1
for_stmt.append(self.tokens[self.pos])
self.pos += 1
elif self.get(0)[1] == "endfor":
for_case_end += 1
if for_case_end != for_should_end:
for_stmt.append(self.tokens[self.pos])
self.pos += 1
else:
for_stmt.append(self.tokens[self.pos])
self.pos += 1
Parser(for_stmt, node_for).parse()
node_for_new(self.Node, iterating_var, seq, node_for)
if self.get(0)[1] == "ass_list":
self.skip(1)
list = self.get_value(self.get(-2))
name = self.get_value(self.get(1))
node_list_new(self.Node, name, list)
self.skip(2)
if self.get(0)[1] == 'do':
self.skip(1)
id = self.get_value(self.get(-2))
args = self.get_value(self.get(1))
func = self.get_value(self.get(0))
node_build_in_func_call_new(self.Node, id, func, args)
self.skip(2)
if self.get(0)[1] == 'call_begin':
func_name = self.get_value(self.get(-1))
self.skip(2)
args = self.get_value(self.get(0))
cons = ['expr', func_name[1] + '(' + args[1] + ')']
self.skip(1)
if self.get(0)[1] == "@":
self.skip(1)
v = self.get_value(self.get(0))
node_let_new(self.Node, v, cons)
else:
node_call_new(self.Node, cons)
elif self.match("return"):
node_return_new(self.Node, self.get_value(self.get(0)))
self.skip(1)
elif self.match("try"):
self.skip(2) # SKip the "begin, do"
should_end = 1
case_end = 0
node_try = []
stmt_try = []
while case_end != should_end and self.pos < len(self.tokens):
if self.get(0)[1] == "end":
case_end += 1
self.pos += 1
else:
stmt_try.append(self.tokens[self.pos])
self.pos += 1
Parser(stmt_try, node_try).parse()
node_try_new(self.Node, node_try)
elif self.match("except"):
_except = self.get_value(self.get(0))
self.skip(4) # SKip the "except", "then", "begin", "do"
should_end = 1
case_end = 0
node_except = []
stmt_except = []
while case_end != should_end and self.pos < len(self.tokens):
if self.get(0)[1] == "end":
case_end += 1
self.pos += 1
else:
stmt_except.append(self.tokens[self.pos])
self.pos += 1
Parser(stmt_except, node_except).parse()
node_except_new(self.Node, _except , node_except)
elif self.match("finally"):
self.skip(2) # Skip the "begin", "do"
should_end = 1
case_end = 0
node_finally = []
stmt_finally = []
while case_end != should_end and self.pos < len(self.tokens):
if self.get(0)[1] == "end":
case_end += 1
self.pos += 1
else:
stmt_finally.append(self.tokens[self.pos])
self.pos += 1
Parser(stmt_finally, node_finally).parse()
node_finally_new(self.Node, node_finally)
elif self.match("assert"):
node_assert_new(self.Node, self.get_value(self.get(0)))
self.skip(1)
elif self.match("raise"):
node_raise_new(self.Node, self.get_value(self.get(0)))
self.skip(2)
elif self.match("gettype"):
node_gettype_new(self.Node, self.get_value(self.get(0)))
self.skip(1)
elif self.match("pass"):
self.Node.append(["node_pass"])
elif self.match("break"):
node_break_new(self.Node)
elif self.match("class"):
class_name = self.get_value(self.get(0))
self.skip(1)
if self.match("extend"):
extend = self.get_value(self.get(0))
self.skip(1)
class_stmt = []
node_class = []
while self.tokens[self.pos][1] != "endclass":
class_stmt.append(self.tokens[self.pos])
self.pos += 1
Parser(class_stmt, node_class).parse()
self.skip(1) # Skip the "end"
node_class_new(self.Node, class_name, extend, node_class)
elif self.match("class_init"):
self.skip(1)
attr_lst = self.get_value(self.get(0))
self.skip(1)
node_attribute_new(self.Node, attr_lst)
elif self.match("method"):
method_name = self.get_value(self.get(0))
self.skip(1)
# Check if has args
if self.get(0)[0] == "expr":
args = self.get_value(self.get(0))
self.skip(1)
else:
args = "None"
self.skip(2) # Skip the "do", "begin"
method_stmt = []
node_method = []
method_should_end = 1
method_case_end = 0
while method_case_end != method_should_end and self.pos < len(self.tokens):
if self.get(0)[1] == "end":
method_case_end += 1
if method_case_end != method_should_end:
method_stmt.append(self.tokens[self.pos])
self.pos += 1
elif self.get(0)[1] == "if":
method_should_end += 1
method_stmt.append(self.tokens[self.pos])
self.pos += 1
elif self.get(0)[1] == "or" and self.get(1)[1] == "is":
method_should_end += 1
method_stmt.append(self.tokens[self.pos])
self.pos += 1
elif self.get(0)[1] == "is not":
method_should_end += 1
method_stmt.append(self.tokens[self.pos])
self.pos += 1
else:
method_stmt.append(self.tokens[self.pos])
self.pos += 1
Parser(method_stmt, node_method).parse()
node_method_new(self.Node, method_name, args, node_method)
elif self.match("cmd"):
node_cmd_new(self.Node, self.get_value(self.get(0)))
self.skip(1)
elif self.match("model"):
model = self.get_value(self.get(0))
self.skip(1)
self.syntax_check("mod_new", "value")
self.skip(2)
datatest = self.get_value(self.get(0))
self.skip(1)
node_model_new(self.Node, model, datatest)
elif self.match("stackinit"):
node_stack_new(self.Node, self.get_value(self.get(0)))
self.skip(1)
elif self.match("push"):
self.syntax_check("do", "value")
self.skip(1)
self.Node.append(["stack_push", self.get_value(self.get(0)), self.get_value(self.\
get(1))])
self.skip(2)
elif self.match("pop"):
self.syntax_check("do", "value")
self.skip(1)
self.Node.append(["stack_pop", self.get_value(self.get(0)), self.get_value(self.\
get(1))])
self.skip(1)
else:
break
variable = {}
TO_PY_CODE = ""
def run(Nodes : list, TAB = '', label = '', path = '') -> None:
def check(tab):
if label != 'whi_run' and label != 'if_run' and label != 'else_run' and \
label != 'elif_run' and label != "func_run" and label != "try_run" and \
label != "except_run" and label != "finally_run" and label != "for_run" and \
label != "class_run" and label != "method_run":
tab = ''
global TO_PY_CODE
if Nodes == None:
return None
for node in Nodes:
if node[0] == "node_print":
check(TAB)
TO_PY_CODE += TAB + "print(" + node[1][1] + ")\n"
if node[0] == "node_sleep":
check(TAB)
TO_PY_CODE += TAB + "import time\n"
TO_PY_CODE += TAB + "time.sleep(" + node[1][1] + ")\n"
if node[0] == "node_import":
check(TAB)
if cantonese_lib_import(node[1][1]) == "Not found":
cantonese_lib_run(node[1][1], path)
else:
TO_PY_CODE += TAB + "import " + node[1][1] + "\n"
if node[0] == "node_exit":
check(TAB)
TO_PY_CODE += TAB + "exit()\n"
if node[0] == "node_let":
check(TAB)
TO_PY_CODE += TAB + node[1][1] + " = " + node[2][1] + "\n"
if node[0] == "node_if":
check(TAB)
TO_PY_CODE += TAB + "if " + node[1][1] + ":\n"
run(node[2], TAB + '\t', 'if_run')
label = ''
if node[0] == "node_elif":
check(TAB)
TO_PY_CODE += TAB + "elif " + node[1][1] + ":\n"
run(node[2], TAB + '\t', 'elif_run')
label = ''
if node[0] == "node_else":
check(TAB)
TO_PY_CODE += TAB + "else:\n"
run(node[1], TAB + '\t', 'else_run')
label = ''
if node[0] == "node_loop":
check(TAB)
TO_PY_CODE += TAB + "while " + "not (" + node[1][1] + "):\n"
run(node[2], TAB + '\t', 'whi_run')
label = ''
if node[0] == "node_for":
check(TAB)
TO_PY_CODE += TAB + "for " + node[1][1] + " in " + "range" + \
node[2] + ":\n"
run(node[3], TAB + '\t', "for_run")
label = ''
if node[0] == "node_fundef":
# check if has args
if node[2] == 'None':
check(TAB)
TO_PY_CODE += TAB + "def " + node[1][1] + "():\n"
run(node[3], TAB + '\t', 'func_run')
label = ''
else:
check(TAB)
TO_PY_CODE += TAB + "def " + node[1][1] + "(" + node[2][1] + "):\n"
run(node[3], TAB + '\t', 'func_run')
label = ''
if node[0] == "node_call":
check(TAB)
TO_PY_CODE += TAB + node[1][1] + "\n"
if node[0] == "node_break":
check(TAB)
TO_PY_CODE += TAB + "break\n"
if node[0] == "node_pass":
check(TAB)
TO_PY_CODE += TAB + "pass\n"
if node[0] == "node_bcall":
check(TAB)
# check if has args
if node[3] != "None":
TO_PY_CODE += TAB + node[1][1] + "." + node[2][1] + "(" + node[3][1] + ")\n"
else:
TO_PY_CODE += TAB + node[1][1] + "." + node[2][1] + "()\n"
if node[0] == "node_return":
check(TAB)
TO_PY_CODE += TAB + "return " + node[1][1] + "\n"
if node[0] == "node_list":
check(TAB)
TO_PY_CODE += TAB + node[1][1] + " = [" + node[2][1] + "]\n"
if node[0] == "node_raise":
check(TAB)
TO_PY_CODE += TAB + "raise " + node[1][1] + "\n"
if node[0] == "node_cmd":
check(TAB)
TO_PY_CODE += TAB + "os.system(" + node[1][1] + ")\n"
if node[0] == "node_turtle":
check(TAB)
cantonese_turtle_init()
for ins in node[1]:
TO_PY_CODE += TAB + ins + "\n"
if node[0] == "node_assert":
check(TAB)
TO_PY_CODE += TAB + "assert " + node[1][1] + "\n"
if node[0] == "node_gettype":
check(TAB)
TO_PY_CODE += TAB + "print(type(" + node[1][1] + "))\n"
if node[0] == "node_try":
check(TAB)
TO_PY_CODE += TAB + "try:\n"
run(node[1], TAB + '\t', 'try_run')
label = ''
if node[0] == "node_except":
check(TAB)
TO_PY_CODE += TAB + "except " + node[1][1] + ":\n"
run(node[2], TAB + '\t', 'except_run')
label = ''
if node[0] == "node_finally":
check(TAB)
TO_PY_CODE += TAB + "finally:\n"
run(node[1], TAB + '\t', 'finally_run')
label = ''
if node[0] == "node_class":
check(TAB)
TO_PY_CODE += TAB + "class " + node[1][1] + "(" \
+ node[2][1] + "):\n"
run(node[3], TAB + '\t', 'class_run')
label = ''
if node[0] == "node_attr":
check(TAB)
TO_PY_CODE += TAB + "def __init__(self, " + node[1][1] + "):\n"
attr_lst = node[1][1].replace(" ", "").split(',')
for i in attr_lst:
TO_PY_CODE += TAB + '\t' + "self." + i + " = " + i + "\n"
if node[0] == "node_method":
check(TAB)
if node[2] == 'None':
TO_PY_CODE += TAB + "def " + node[1][1] + "(self):\n"
else:
TO_PY_CODE += TAB + "def " + node[1][1] + "(self, " + node[2][1] + "):\n"
run(node[3], TAB + '\t', "method_run")
label = ''
if node[0] == "node_stack":
check(TAB)
cantonese_stack_init()
TO_PY_CODE += TAB + node[1][1] + " = stack()\n"
if node[0] == "stack_push":
check(TAB)
TO_PY_CODE += TAB + node[1][1] + ".push(" + node[2][1] +")\n"
if node[0] == "stack_pop":
check(TAB)
TO_PY_CODE += TAB + node[1][1] + ".pop()\n"
if node[0] == "node_model":
check(TAB)
TO_PY_CODE += TAB + cantonese_model_new(node[1][1], node[2][1], \
TAB, TO_PY_CODE)
"""
Built-in library for Cantonese
"""
def cantonese_lib_import(name : str) -> None:
if name == "random":
cantonese_random_init()
elif name == "datetime":
cantonese_datetime_init()
elif name == "math":
cantonese_math_init()
elif name == "smtplib":
cantonese_smtplib_init()
elif name == "xml":
cantonese_xml_init()
elif name == "csv":
cantonese_csv_init()
elif name == "os":
pass
elif name == "re":
cantonese_re_init()
elif name == "urllib":
cantonese_urllib_init()
elif name == "requests":
cantonese_requests_init()
elif name == "socket":
cantonese_socket_init()
elif name == "kivy":
cantonese_kivy_init()
elif name == "pygame":
cantonese_pygame_init()
elif name == "json":
cantonese_json_init()
else:
return "Not found"
def cantonese_lib_init() -> None:
def cantonese_open(file, 模式 = 'r', 解码 = None):
return open(file, mode = 模式, encoding = 解码)
def cantonese_close(file) -> None:
file.close()
def out_name(file) -> None:
print(file.name)
def out_ctx(file, size = None) -> None:
if size == None:
print(file.read())
return
print(file.read(size))
def get_name(file) -> str:
return file.name
def cantonese_read(file, size = None) -> str:
if size == None:
return file.read()
return file.read(size)
cantonese_func_def("开份文件", cantonese_open)
cantonese_func_def("关咗佢", cantonese_close)
cantonese_func_def("睇睇文件名", out_name)
cantonese_func_def("睇睇有咩", out_ctx)
cantonese_func_def("文件名", get_name)
cantonese_func_def("读取", cantonese_read)
def get_list_end(lst : list):
return lst[-1]
def get_list_beg(lst : list):
return lst[0]
def where(lst : list, index : int, index2 = None, index3 = None, index4 = None):
if index2 != None and index3 == None and index4 == None:
return lst[index][index2]
if index3 != None and index2 != None and index4 == None:
return lst[index][index2][index3]
if index4 != None and index2 != None and index3 != None:
return lst[index][index2][index3][index4]
return lst[index]
def lst_insert(lst : list, index : int, obj) -> None:
lst.insert(index, obj)
def list_get(lst : list, index : int):
return lst[index]
cantonese_func_def("最尾", get_list_end)
cantonese_func_def("身位", where)
cantonese_func_def("挜位", lst_insert)
cantonese_func_def("排头位", get_list_beg)
cantonese_func_def("摞位", list_get)
def cantonese_json_init() -> None:
import json
def json_load(text):
return json.loads(text)
def show_json_load(text):
print(json.loads(text))
cantonese_func_def("睇下json", show_json_load)
cantonese_func_def("读取json", json_load)
def cantonese_csv_init() -> None:
import csv
def out_csv_read(file):
for i in csv.reader(file):
print(i)
def get_csv(file):
ret = []
for i in csv.reader(file):
ret.append(i)
return i
cantonese_func_def("睇睇csv有咩", out_csv_read)
cantonese_func_def("读取csv", get_csv)
def cantonese_random_init() -> None:
import random
cantonese_func_def("求其啦", random.random)
cantonese_func_def("求其int下啦", random.randint)
def cantonese_datetime_init() -> None:
import datetime
cantonese_func_def("宜家几点", datetime.datetime.now)
def cantonese_xml_init() -> None:
from xml.dom.minidom import parse
import xml.dom.minidom
def make_dom(file) -> None:
return xml.dom.minidom.parse(file).documentElement
def has_attr(docelm, attr) -> bool:
return docelm.hasAttribute(attr)
def get_attr(docelm, attr):
print(docelm.getAttribute(attr))
def getElementsByTag(docelm, tag : str, out = None, ctx = None):
if out == 1:
print(docelm.getElementsByTagName(tag))
if ctx != None:
print(ctx + docelm.getElementsByTagName(tag)[0].childNodes[0].data)
return docelm.getElementsByTagName(tag)
cantonese_func_def("整樖Dom树", make_dom)
cantonese_func_def("Dom有嘢", has_attr)
cantonese_func_def("睇Dom有咩", get_attr)
cantonese_func_def("用Tag揾", getElementsByTag)
cantonese_func_def("用Tag揾嘅", getElementsByTag)
def cantonese_turtle_init() -> None:
import turtle
cantonese_func_def("画个圈", turtle.circle)
cantonese_func_def("写隻字", turtle.write)
cantonese_func_def("听我支笛", turtle.exitonclick)
def cantonese_smtplib_init() -> None:
import smtplib
def send(sender : str, receivers : str, message : str,
smtpObj = smtplib.SMTP('localhost')) -> None:
try:
smtpObj.sendmail(sender, receivers, message)
print("Successfully sent email!")
except SMTPException:
print("Error: unable to send email")
cantonese_func_def("send份邮件", send)
def cantonese_stack_init() -> None:
class _stack(object):
def __init__(self):
self.stack = []
def __str__(self):
return 'Stack: ' + str(self.stack)
def push(self, value):
self.stack.append(value)
def pop(self):
if self.stack:
return self.stack.pop()
else:
raise LookupError('stack 畀你丢空咗!')
cantonese_func_def("stack", _stack)
cantonese_func_def("我丢", _stack.pop)
cantonese_func_def("我顶", _stack.push)
def cantonese_func_def(func_name : str, func) -> None:
variable[func_name] = func
def cantonese_math_init():
import math
class Matrix(object):
def __init__(self, list_a):
assert isinstance(list_a, list)
self.matrix = list_a
self.shape = (len(list_a), len(list_a[0]))
self.row = self.shape[0]
self.column = self.shape[1]
def __str__(self):
return 'Matrix: ' + str(self.matrix)
def build_zero_value_matrix(self, shape):
zero_value_mat = []
for i in range(shape[0]):
zero_value_mat.append([])
for j in range(shape[1]):
zero_value_mat[i].append(0)
zero_value_matrix = Matrix(zero_value_mat)
return zero_value_matrix
def matrix_addition(self, the_second_mat):
assert isinstance(the_second_mat, Matrix)
assert the_second_mat.shape == self.shape
result_mat = self.build_zero_value_matrix(self.shape)
for i in range(self.row):
for j in range(self.column):
result_mat.matrix[i][j] = self.matrix[i][j] + the_second_mat.matrix[i][j]
return result_mat
def matrix_multiplication(self, the_second_mat):
assert isinstance(the_second_mat, Matrix)
assert self.shape[1] == the_second_mat.shape[0]
shape = (self.shape[0], the_second_mat.shape[1])
result_mat = self.build_zero_value_matrix(shape)
for i in range(self.shape[0]):
for j in range(the_second_mat.shape[1]):
number = 0
for k in range(self.shape[1]):
number += self.matrix[i][k] * the_second_mat.matrix[k][j]
result_mat.matrix[i][j] = number
return result_mat
def corr(a, b):
if len(a) == 0 or len(b) == 0:
return None
a_avg = sum(a) / len(a)
b_avg = sum(b) / len(b)
cov_ab = sum([(x - a_avg) * (y - b_avg) for x, y in zip(a, b)])
sq = math.sqrt(sum([(x - a_avg) ** 2 for x in a]) * sum([(x - b_avg) ** 2 for x in b]))
corr_factor = cov_ab / sq
return corr_factor
def KNN(inX, dataSet, labels, k):
m, n = len(dataSet), len(dataSet[0])
distances = []
for i in range(m):
sum = 0
for j in range(n):
sum += (inX[j] - dataSet[i][j]) ** 2
distances.append(sum ** 0.5)
sortDist = sorted(distances)
classCount = {}
for i in range(k):
voteLabel = labels[distances.index(sortDist[i])]
classCount[voteLabel] = classCount.get(voteLabel, 0) + 1
sortedClass = sorted(classCount.items(), key = lambda d : d[1], reverse = True)
return sortedClass[0][0]
def l_reg(testX, X, Y):
a = b = mxy = sum_x = sum_y = lxy = xiSubSqr = 0.0
for i in range(len(X)):
sum_x += X[i]
sum_y += Y[i]
x_ave = sum_x / len(X)
y_ave = sum_y / len(X)
for i in range(len(X)):
lxy += (X[i] - x_ave) * (Y[i] - y_ave)
xiSubSqr += (X[i] - x_ave) * (X[i] - x_ave)
b = lxy / xiSubSqr
a = y_ave - b * x_ave
print("Linear function is:")
print("y=" + str(b) + "x+"+ str(a))
return b * testX + a
cantonese_func_def("KNN", KNN)
cantonese_func_def("l_reg", l_reg)
cantonese_func_def("corr", corr)
cantonese_func_def("矩阵", Matrix)
cantonese_func_def("点积", Matrix.matrix_multiplication)
def cantonese_model_new(model, datatest, tab, code) -> str:
if model == "KNN":
code += tab + "print(KNN(" + datatest + ", 数据, 标签, K))"
if model == "L_REG":
code += tab + "print(l_reg(" + datatest + ", X, Y))"
else:
print("揾唔到你嘅模型: " + model + "!")
code = ""
return code
def cantonese_re_init() -> None:
def can_re_match(pattern : str, string : str, flags = 0):
return re.match(pattern, string, flags)
def can_re_match_out(pattern : str, string : str, flags = 0) -> None:
print(re.match(pattern, string, flags).span())
cantonese_func_def("衬", can_re_match_out)
cantonese_func_def("衬唔衬", can_re_match)
def cantonese_urllib_init() -> None:
import urllib.request
def can_urlopen_out(url : str) -> None:
print(urllib.request.urlopen(url).read())
def can_urlopen(url : str):
return urllib.request.urlopen(url)
cantonese_func_def("睇网页", can_urlopen_out)
cantonese_func_def("揾网页", can_urlopen)
def cantonese_requests_init() -> None:
import requests
def req_get(url : str):
headers = {
'user-agent':
'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Mobile Safari/537.36' \
}
res = requests.get(url, headers)
res.encoding = 'utf-8'
return res.text
cantonese_func_def("𠯠求", req_get)
def cantonese_socket_init() -> None:
import socket
def s_new():
return socket.socket()
def s_connect(s, port, host = socket.gethostname()):
s.connect((host, port))
return s
def s_recv(s, i : int):
return s.recv(i)
def s_close(s) -> None:
s.close()
cantonese_func_def("倾偈", s_connect)
cantonese_func_def("收风", s_recv)
cantonese_func_def("通电话", s_new)
cantonese_func_def("收线", s_close)
def cantonese_kivy_init() -> None:
from kivy.app import App
from kivy.uix.label import Label
from kivy.uix.button import Button
from kivy.uix.boxlayout import BoxLayout
def app_show(ctx, 宽 = (.5, .5),
高 = {"center_x": .5, "center_y": .5}) -> None:
return Label(text = ctx, size_hint = 宽, pos_hint = 高)
def app_run(app_main, build_func) -> None:
print("The app is running ...")
def build(self):
return build_func()
app_main.build = build
app_main().run()
def app_button(ctx, 宽 = (.5, .5),
高 = {"center_x": .5, "center_y": .5}) -> None:
return Button(text = ctx, size_hint = 宽, pos_hint = 高)
def app_layout(types, 布局 = "", 方向 = 'vertical', 间距 = 15, 内边距 = 10):
if 布局 == "":
if types == "Box":
return BoxLayout(orientation = 方向,
spacing = 间距, padding = 内边距)
else:
for i in types.stack:
布局.add_widget(i)
def button_bind(btn, func) -> None:
btn.bind(on_press = func)
cantonese_func_def("App", App)
cantonese_func_def("Label", Label)
cantonese_func_def("Button", Button)
cantonese_func_def("App运行", app_run)
cantonese_func_def("同我show", app_show)
cantonese_func_def("开掣", app_button)
cantonese_func_def("老作", app_layout)
cantonese_func_def("睇实佢", button_bind)
def cantonese_pygame_init() -> None:
import pygame
pygame.init()
def pygame_setmode(size, caption = ""):
if caption != "":
pygame.display.set_caption(caption)
return pygame.display.set_mode(size)
return pygame.display.set_mode(size)
def pygame_imgload(path):
return pygame.image.load(path)
def pygame_move(object, speed):
return object.move(speed)
def object_rect(object):
return object.get_rect()
def pygame_color(color):
return pygame.Color(color)
def pygame_key(e):
return e.key
def draw(屏幕, obj = "", obj_where = "", event = "", 颜色 = "") -> None:
if event == "":
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
else:
event_map = {
"KEYDOWN" : KEYDOWN
}
for events in pygame.event.get():
for my_ev in event.stack:
if events.type == event_map[my_ev[0]]:
my_ev[1](events)
if events.type == pygame.QUIT:
sys.exit()
if 颜色 != "":
屏幕.fill(颜色)
if obj != "" and obj_where != "":
屏幕.blit(obj, obj_where)
pygame.time.delay(2)
pygame.display.flip()
def direction(obj, dir):
if dir == "左边" or dir == "left":
return obj.left
if dir == "右边" or dir == "right":
return obj.right
if dir == "上边" or dir == "top":
return obj.top
if dir == "call_begin边" or dir == "bottom":
return obj.bottom
cantonese_func_def("屏幕老作", pygame_setmode)
cantonese_func_def("图片老作", pygame_imgload)
cantonese_func_def("玩跑步", pygame_move)
cantonese_func_def("in边", object_rect)
cantonese_func_def("上画", draw)
cantonese_func_def("揾位", direction)
cantonese_func_def("画公仔", pygame.sprite.Sprite.__init__)
cantonese_func_def("公仔", pygame.sprite.Sprite)
cantonese_func_def("校色", pygame_color)
cantonese_func_def("摞掣", pygame_key)
def cantonese_lib_run(lib_name : str, path : str) -> None:
pa = os.path.dirname(path) # Return the last file Path
tokens = []
code = ""
found = False
for dirpath,dirnames,files in os.walk(pa):
if lib_name + '.cantonese' in files:
code = open(pa + '/' + lib_name + '.cantonese', encoding = 'utf-8').read()
found = True
if found == False:
raise ImportError(lib_name + '.cantonese not found.')
for token in cantonese_token(code):
tokens.append(token)
cantonese_parser = Parser(tokens, [])
cantonese_parser.parse()
run(cantonese_parser.Node, path = path)
def cantonese_run(code : str, is_to_py : bool, file : str) -> None:
tokens = []
for token in cantonese_token(code):
tokens.append(token)
cantonese_parser = Parser(tokens, [])
cantonese_parser.parse()
run(cantonese_parser.Node, path = file)
cantonese_lib_init()
if is_to_py:
print(TO_PY_CODE)
else:
import traceback
try:
exec(TO_PY_CODE, variable)
except Exception as e:
print("濑嘢: " + repr(e) + "!")
class WebParser(object):
def __init__(self, tokens : list, Node : list) -> None:
self.tokens = tokens
self.pos = 0
self.Node = Node
def get(self, offset : int) -> list:
if self.pos + offset >= len(self.tokens):
return ["", ""]
return self.tokens[self.pos + offset]
def match(self, name : str) -> bool:
if self.get(0)[1] == name:
return True
return False
def match_type(self, name : str) -> bool:
if self.get(0)[0] == name:
return True
return False
def check(self, a, b) -> None:
if a == b:
return
raise LookupError("Error Token:" + str(b))
def skip(self, offset) -> None:
self.pos += offset
def run(self, Nodes : list) -> None:
for node in Nodes:
if node[0] == "node_call":
web_call_new(node[1][0], node[1][1], node[2])
if node[0] == "node_css":
style_def(node[1][0], node[1][1], node[1][2])
def parse(self) -> None:
while True:
if self.match("老作一下"):
self.skip(1)
self.check(self.get(0)[1], "{")
self.skip(1)
stmt = []
node_main = []
while self.tokens[self.pos][1] != "}":
stmt.append(self.tokens[self.pos])
self.pos += 1
self.skip(1)
WebParser(stmt, node_main).parse()
self.Node = node_main
self.run(self.Node)
elif self.match_type("id"):
if self.get(1)[0] == "keywords" and self.get(1)[1] == "要点画":
id = self.get(0)[1]
self.skip(2)
style_stmt = []
node_style = []
while self.tokens[self.pos][1] != "搞掂":
style_stmt.append(self.tokens[self.pos])
self.pos += 1
self.skip(1)
self.cantonese_css_parser(style_stmt, id)
else:
name = self.get(0)[1]
self.skip(1)
self.check(self.get(0)[1], "=>")
self.skip(1)
self.check(self.get(0)[1], "[")
self.skip(1)
args = []
while self.tokens[self.pos][1] != "]":
args.append(self.tokens[self.pos][1])
self.pos += 1
self.skip(1)
with_style = False
if self.match('$'): # case 'style_with'
style_id = self.get(1)[1]
self.skip(2)
args.append(style_id)
with_style = True
web_ast_new(self.Node, "node_call", [name, args], with_style)
else:
break
def cantonese_css_parser(self, stmt : list, id : str) -> None:
cssParser(stmt, []).parse(id)
class cssParser(WebParser):
def parse(self, id : str) -> None:
while True:
if self.match_type("id"):
key = self.get(0)[1]
self.skip(1)
self.check(self.get(0)[1], "系")
self.skip(1)
self.check(self.get(0)[1], "[")
self.skip(1)
value = []
while self.tokens[self.pos][1] != "]":
value.append(self.tokens[self.pos][1])
self.pos += 1
self.skip(1)
web_ast_new(self.Node, "node_css", [id, key, value])
else:
break
self.run(self.Node)
def web_ast_new(Node : list, type : str, ctx : list, with_style = True) -> None:
Node.append([type, ctx, with_style])
def get_str(s : str) -> str:
return eval("str(" + s + ")")
sym = {}
style_attr = {}
style_value_attr = {}
TO_HTML = "<html>\n"
def title(args : list, with_style : bool) -> None:
global TO_HTML
if len(args) == 1:
t_beg, t_end = "<title>", "</title>\n"
TO_HTML += t_beg + get_str(args[0]) + t_end
if len(args) >= 2:
style = args.pop() if with_style else ""
t_beg, t_end = "<title id = \"" + style + "\">", "</title>\n"
TO_HTML += t_beg + get_str(args[0]) + t_end
def h(args : list, with_style : bool) -> None:
global TO_HTML
if len(args) == 1:
h_beg, h_end = "<h1>", "</h1>\n"
TO_HTML += h_beg + get_str(args[0]) + h_end
if len(args) >= 2:
style = args.pop() if with_style else ""
size = "" if len(args) == 1 else args[1]
t_beg, t_end = "<h" + size + " id = \"" + style + "\">", "</h" + size + ">\n"
TO_HTML += t_beg + get_str(args[0]) + t_end
def img(args : list) -> None:
global TO_HTML
i_beg, i_end = "<img src = ", ">\n"
TO_HTML += i_beg + eval("str(" + args[0] + ")") + i_end
def sym_init() -> None:
global sym
global style_attr
sym['写标题'] = title
sym['写隻字'] = h
sym['睇下'] = img
#sym['画表格'] = table
style_attr['颜色'] = "color"
style_attr['背景颜色'] = "background-color"
style_attr['对齐方式'] = "text-align"
style_value_attr['红色'] = "red"
style_value_attr['黄色'] = "yellow"
style_value_attr['白色'] = "white"
style_value_attr['黑色'] = "black"
style_value_attr['绿色'] = "green"
style_value_attr['蓝色'] = "blue"
style_value_attr['居中'] = "centre"
def head_init() -> None:
global TO_HTML
TO_HTML += "<head>\n"
TO_HTML += "<meta charset=\"utf-8\" />\n"
TO_HTML += "</head>\n"
def web_init() -> None:
global TO_HTML
sym_init()
head_init()
def web_end() -> None:
global TO_HTML
TO_HTML += "</html>"
style_sym = {}
def style_def(id : str, key : str, value : list) -> None:
global style_sym
if id not in style_sym:
style_sym[id] = [[key, value]]
return
style_sym[id].append([key, value])
def style_build(value : list) -> None:
s = ""
for item in value:
if get_str(item[1][0]) not in style_value_attr.keys() and item[0] in style_attr.keys():
s += style_attr[item[0]] + " : " + get_str(item[1][0]) + "\n"
elif get_str(item[1][0]) not in style_value_attr.keys() and item[0] not in style_attr.keys():
s += item[0] + " : " + get_str(item[1][0]) + "\n"
elif get_str(item[1][0]) in style_value_attr.keys() and item[0] not in style_attr.keys():
s += item[0] + " : " + style_value_attr[get_str(item[1][0])] + "\n"
else:
s += style_attr[item[0]] + " : " + style_value_attr[get_str(item[1][0])] + "\n"
return s
def style_exec(sym : dict) -> None:
global TO_HTML
gen = ""
s_beg, s_end = "\n<style type=\"text/css\">\n", "</style>\n"
for key, value in sym.items():
gen += "#" + key + "{\n" + style_build(value) + "}\n"
TO_HTML += s_beg + gen + s_end
def web_call_new(func : str, args_list : list, with_style = False) -> None:
if func in sym:
sym[func](args_list, with_style)
else:
func(args_list, with_style)
def get_html_file(name : str) -> str:
return name[ : len(name) - len('cantonese')] + 'html'
def cantonese_web_run(code : str, file_name : str, open_serv = True) -> None:
global TO_HTML
keywords = r'(?P<keywords>(老作一下){1}|({){1}|(}){1}|(=>){1}|(\[){1}|(\]){1}|(要点画){1}|(搞掂){1}|' \
r'(系){1}|(用下){1}|(->){1}|(\$){1})'
num = r'(?P<num>\d+)'
string = r'(?P<string>\"([^\\\"]|\\.)*\")'
id = r'(?P<id>([\u4e00-\u9fa5]+){1}|([a-zA-Z_][a-zA-Z_0-9]*){1})'
patterns = re.compile('|'.join([keywords, string, num, id]))
tokens = []
for match in re.finditer(patterns, code):
tokens.append([match.lastgroup, match.group()])
web_init()
WebParser(tokens, []).parse()
web_end()
if style_sym != {}:
style_exec(style_sym)
print(TO_HTML)
if open_serv:
import socket
ip_port = ('127.0.0.1', 80)
back_log = 10
buffer_size = 1024
webserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
webserver.bind(ip_port)
webserver.listen(back_log)
print("Cantonese Web Starting at 127.0.0.1:80 ...")
while True:
conn, addr = webserver.accept()
recvdata = conn.recv(buffer_size)
conn.sendall(bytes("HTTP/1.1 201 OK\r\n\r\n", "utf-8"))
conn.sendall(bytes(TO_HTML, "utf-8"))
conn.close()
if input("input Y to exit:"):
print("Cantonese Web exiting...")
break
else:
f = open(get_html_file(file_name), 'w', encoding = 'utf-8')
f.write(TO_HTML)
exit(0)
def main():
try:
if len(sys.argv) > 1:
"""
install the cantonese library
"""
if sys.argv[1] == '-install':
import urllib.request
print("Installing ... ")
urllib.request.urlretrieve(sys.argv[2], sys.argv[3])
print("Successful installed!")
exit()
with open(sys.argv[1], encoding = "utf-8") as f:
code = f.read()
# Skip the comment
code = re.sub(re.compile(r'/\*.*?\*/', re.S), ' ', code)
is_to_py = False
# TODO: Use argparse library
if len(sys.argv) >= 3:
if sys.argv[2] == "-to_py" or sys.argv[2] == "-讲翻py":
is_to_py = True
if sys.argv[2] == "-to_web" or sys.argv[2] == "-倾偈":
if len(sys.argv) > 3 and (sys.argv[3] == "-compile" or sys.argv[3] == "-讲白啲"):
cantonese_web_run(code, sys.argv[1], False)
else:
cantonese_web_run(code, sys.argv[1])
cantonese_run(code, is_to_py, sys.argv[1])
else:
print("你想点啊? (请输入你嘅文件)")
except FileNotFoundError:
print("揾唔到你嘅文件 :(")
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3488457
|
"""
Boredom
v0.1 错误:直接统计了所有可能的数值a和对应的count,忽视了要求中只影响a-1,a+1两个数,而误认为影响了小于a和大于a的两个数
"""
'''
基本思路:
1. 函数找到每个值及其对应的sum
2. 找到最大sum 的所对应的num,并和周围相邻num-1,num+1对应的sum和比较。如果大于则。。,小于就删掉
3. 加入result,并删除对应值
'''
def find_counts(lists):
counts = []
# 排序后方便对元素个数进行计数
lists.sort(reverse=True)
while len(lists) != 0:
temp = lists[0]
num = lists.count(temp)
counts.append([temp, num])
lists = lists[num:]
return counts
'''
def if_need_eliminate(m, n):
if abs(m - n) == 1:
return True
elif abs(m - n) == 0:
return -1
else:
return False
'''
def recursive_maximization(total_num, counts):
if total_num == 0:
return 0
elif total_num == 1:
return counts[0][0] * counts[0][1]
else:
#total_num >= 2:
largest_sum = counts[0][0] * counts[0][1]
if abs(counts[0][0]-counts[1][0]) == 1:
# need elimination
a = recursive_maximization(total_num-1, counts[1:])
b = recursive_maximization(total_num-2, counts[2:]) + largest_sum
return max(a, b)
else:
return recursive_maximization(total_num-1, counts[1:]) + largest_sum
# read the inputs
n = int(input())
integers = list(map(int, input().split(' ')))
counts = find_counts(integers)
result = recursive_maximization(len(counts), counts)
print(result)
|
StarcoderdataPython
|
110209
|
<gh_stars>0
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import uuid
import mock
import st2tests.config as tests_config
tests_config.parse_args()
from unittest2 import TestCase
from st2actions.container.service import RunnerContainerService
from st2common.constants import action as action_constants
from st2tests.fixturesloader import FixturesLoader
from st2tests.fixturesloader import get_fixtures_base_path
from st2common.util.api import get_full_public_api_url
from st2common.util.green import shell
from st2common.constants.runners import LOCAL_RUNNER_DEFAULT_ACTION_TIMEOUT
import local_runner
class LocalShellCommandRunnerTestCase(TestCase):
fixtures_loader = FixturesLoader()
def test_shell_command_action_basic(self):
models = self.fixtures_loader.load_models(
fixtures_pack='generic', fixtures_dict={'actions': ['local.yaml']})
action_db = models['actions']['local.yaml']
runner = self._get_runner(action_db, cmd='echo 10')
runner.pre_run()
status, result, _ = runner.run({})
runner.post_run(status, result)
self.assertEquals(status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
self.assertEquals(result['stdout'], 10)
def test_shell_script_action(self):
models = self.fixtures_loader.load_models(
fixtures_pack='localrunner_pack', fixtures_dict={'actions': ['text_gen.yml']})
action_db = models['actions']['text_gen.yml']
entry_point = self.fixtures_loader.get_fixture_file_path_abs(
'localrunner_pack', 'actions', 'text_gen.py')
runner = self._get_runner(action_db, entry_point=entry_point)
runner.pre_run()
status, result, _ = runner.run({'chars': 1000})
runner.post_run(status, result)
self.assertEquals(status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
self.assertEquals(len(result['stdout']), 1000)
def test_timeout(self):
models = self.fixtures_loader.load_models(
fixtures_pack='generic', fixtures_dict={'actions': ['local.yaml']})
action_db = models['actions']['local.yaml']
# smaller timeout == faster tests.
runner = self._get_runner(action_db, cmd='sleep 10', timeout=0.01)
runner.pre_run()
status, result, _ = runner.run({})
runner.post_run(status, result)
self.assertEquals(status, action_constants.LIVEACTION_STATUS_TIMED_OUT)
@mock.patch.object(
shell, 'run_command',
mock.MagicMock(return_value=(-15, '', '', False)))
def test_shutdown(self):
models = self.fixtures_loader.load_models(
fixtures_pack='generic', fixtures_dict={'actions': ['local.yaml']})
action_db = models['actions']['local.yaml']
runner = self._get_runner(action_db, cmd='sleep 0.1')
runner.pre_run()
status, result, _ = runner.run({})
self.assertEquals(status, action_constants.LIVEACTION_STATUS_ABANDONED)
def test_large_stdout(self):
models = self.fixtures_loader.load_models(
fixtures_pack='localrunner_pack', fixtures_dict={'actions': ['text_gen.yml']})
action_db = models['actions']['text_gen.yml']
entry_point = self.fixtures_loader.get_fixture_file_path_abs(
'localrunner_pack', 'actions', 'text_gen.py')
runner = self._get_runner(action_db, entry_point=entry_point)
runner.pre_run()
char_count = 10 ** 6 # Note 10^7 succeeds but ends up being slow.
status, result, _ = runner.run({'chars': char_count})
runner.post_run(status, result)
self.assertEquals(status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
self.assertEquals(len(result['stdout']), char_count)
def test_common_st2_env_vars_are_available_to_the_action(self):
models = self.fixtures_loader.load_models(
fixtures_pack='generic', fixtures_dict={'actions': ['local.yaml']})
action_db = models['actions']['local.yaml']
runner = self._get_runner(action_db, cmd='echo $ST2_ACTION_API_URL')
runner.pre_run()
status, result, _ = runner.run({})
runner.post_run(status, result)
self.assertEquals(status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
self.assertEqual(result['stdout'].strip(), get_full_public_api_url())
runner = self._get_runner(action_db, cmd='echo $ST2_ACTION_AUTH_TOKEN')
runner.pre_run()
status, result, _ = runner.run({})
runner.post_run(status, result)
self.assertEquals(status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
self.assertEqual(result['stdout'].strip(), 'mock-token')
def test_sudo_and_env_variable_preservation(self):
# Verify that the environment environment are correctly preserved when running as a
# root / non-system user
# Note: This test will fail if SETENV option is not present in the sudoers file
models = self.fixtures_loader.load_models(
fixtures_pack='generic', fixtures_dict={'actions': ['local.yaml']})
action_db = models['actions']['local.yaml']
cmd = 'echo `whoami` ; echo ${VAR1}'
env = {'VAR1': 'poniesponies'}
runner = self._get_runner(action_db, cmd=cmd, sudo=True, env=env)
runner.pre_run()
status, result, _ = runner.run({})
runner.post_run(status, result)
self.assertEquals(status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
self.assertEqual(result['stdout'].strip(), 'root\nponiesponies')
@staticmethod
def _get_runner(action_db,
entry_point=None,
cmd=None,
on_behalf_user=None,
user=None,
kwarg_op=local_runner.DEFAULT_KWARG_OP,
timeout=LOCAL_RUNNER_DEFAULT_ACTION_TIMEOUT,
sudo=False,
env=None):
runner = local_runner.LocalShellRunner(uuid.uuid4().hex)
runner.container_service = RunnerContainerService()
runner.action = action_db
runner.action_name = action_db.name
runner.liveaction_id = uuid.uuid4().hex
runner.entry_point = entry_point
runner.runner_parameters = {local_runner.RUNNER_COMMAND: cmd,
local_runner.RUNNER_SUDO: sudo,
local_runner.RUNNER_ENV: env,
local_runner.RUNNER_ON_BEHALF_USER: user,
local_runner.RUNNER_KWARG_OP: kwarg_op,
local_runner.RUNNER_TIMEOUT: timeout}
runner.context = dict()
runner.callback = dict()
runner.libs_dir_path = None
runner.auth_token = mock.Mock()
runner.auth_token.token = 'mock-token'
return runner
class LocalShellScriptRunner(TestCase):
fixtures_loader = FixturesLoader()
def test_script_with_paramters_parameter_serialization(self):
models = self.fixtures_loader.load_models(
fixtures_pack='generic', fixtures_dict={'actions': ['local_script_with_params.yaml']})
action_db = models['actions']['local_script_with_params.yaml']
entry_point = os.path.join(get_fixtures_base_path(),
'generic/actions/local_script_with_params.sh')
action_parameters = {
'param_string': 'test string',
'param_integer': 1,
'param_float': 2.55,
'param_boolean': True,
'param_list': ['a', 'b', 'c'],
'param_object': {'foo': 'bar'}
}
runner = self._get_runner(action_db=action_db, entry_point=entry_point)
runner.pre_run()
status, result, _ = runner.run(action_parameters=action_parameters)
runner.post_run(status, result)
self.assertEqual(status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
self.assertTrue('PARAM_STRING=test string' in result['stdout'])
self.assertTrue('PARAM_INTEGER=1' in result['stdout'])
self.assertTrue('PARAM_FLOAT=2.55' in result['stdout'])
self.assertTrue('PARAM_BOOLEAN=1' in result['stdout'])
self.assertTrue('PARAM_LIST=a,b,c' in result['stdout'])
self.assertTrue('PARAM_OBJECT={"foo": "bar"}' in result['stdout'])
action_parameters = {
'param_string': 'test string',
'param_integer': 1,
'param_float': 2.55,
'param_boolean': False,
'param_list': ['a', 'b', 'c'],
'param_object': {'foo': 'bar'}
}
runner = self._get_runner(action_db=action_db, entry_point=entry_point)
runner.pre_run()
status, result, _ = runner.run(action_parameters=action_parameters)
runner.post_run(status, result)
self.assertEqual(status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
self.assertTrue('PARAM_BOOLEAN=0' in result['stdout'])
action_parameters = {
'param_string': '',
'param_integer': None,
'param_float': None,
}
runner = self._get_runner(action_db=action_db, entry_point=entry_point)
runner.pre_run()
status, result, _ = runner.run(action_parameters=action_parameters)
runner.post_run(status, result)
self.assertEqual(status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
self.assertTrue('PARAM_STRING=\n' in result['stdout'])
self.assertTrue('PARAM_INTEGER=\n' in result['stdout'])
self.assertTrue('PARAM_FLOAT=\n' in result['stdout'])
def _get_runner(self, action_db, entry_point):
runner = local_runner.LocalShellRunner(uuid.uuid4().hex)
runner.container_service = RunnerContainerService()
runner.action = action_db
runner.action_name = action_db.name
runner.liveaction_id = uuid.uuid4().hex
runner.entry_point = entry_point
runner.runner_parameters = {}
runner.context = dict()
runner.callback = dict()
runner.libs_dir_path = None
runner.auth_token = mock.Mock()
runner.auth_token.token = '<PASSWORD>'
return runner
|
StarcoderdataPython
|
3225359
|
class Solution(object):
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
if len(s) <= 1:
return len(s)
longest = 0
left = 0
seen = {}
for right in range(len(s)):
if s[right] in seen:
left = max(seen[s[right]], left)
longest = max(longest, right - left + 1)
seen[s[right]] = right + 1 #+1 gives the window
return longest
|
StarcoderdataPython
|
9729517
|
<gh_stars>1-10
import numpy
from .utils import mask, condense, pauli_diagonalize1
from .paulialg import Pauli, PauliMonomial, pauli_zero
from .stabilizer import (StabilizerState,
zero_state, identity_map, clifford_rotation_map, random_clifford_map)
class CliffordGate(object):
'''Represents a Clifford gate.
Parameters:
*qubits: int - the qubits that this gate acts on.
Data:
generator: Pauli - if the clifford gate is a rotation generated by a single
Pauli generator (which is generally not the case), then this records
its generator. It is more efficient to implement Clifford rotation than
generic Clifford transform.
forward_map / backward_map: CliffordMap - a generic Clifford gate will be
described by the Clifford map, which is a table specifying how each
single Pauli operator gets mapped to. (forward and backward maps must
be inverse to each other).
Note: if either the geneator or Clifford maps are specified, the gate will
represent the specific unitary transformation; otherwise, the gate
is treated as a random Clifford gate that resamples at every call.'''
def __init__(self, *qubits):
self.qubits = qubits # the qubits this gate acts on
self.n = len(self.qubits) # number of qubits it acts on
self.generator = None
self.forward_map = None
self.backward_map = None
def __repr__(self):
return '[{}]'.format(','.join(str(qubit) for qubit in self.qubits))
def copy(self):
gate = CliffordGate(*self.qubits)
if self.generator is not None:
gate.generator = self.generator.copy()
if self.forward_map is not None:
gate.forward_map = self.forward_map.copy()
if self.backward_map is not None:
gate.backward_map = self.backward_map.copy()
return gate
def commute_with(self, other_gate):
return len(set(self.qubits) & set(other_gate.qubits))==0
def forward(self, obj):
if self.generator is not None: # if generator is given, use generator
if self.n == obj.N: # global gate
obj.rotate_by(self.generator)
else: # local gate
obj.rotate_by(self.generator, mask(self.qubits, obj.N))
else: # if generator not given, check maps
if self.forward_map is None:
if self.backward_map is None:
# if both maps not given, treated as random gate
clifford_map = random_clifford_map(self.n)
else:
self.forward_map = self.backward_map.inverse()
clifford_map = self.forward_map
else:
clifford_map = self.forward_map
if self.n == obj.N: # global gate
obj.transform_by(clifford_map)
else: # local gate
obj.transform_by(clifford_map, mask(self.qubits, obj.N))
return obj
def backward(self, obj):
if self.generator is not None: # if generator is given, use generator
if self.n == obj.N: # global gate
obj.rotate_by(-self.generator)
else: # local gate
obj.rotate_by(-self.generator, mask(self.qubits, obj.N))
else: # if generator not given, check maps
if self.backward_map is None:
if self.forward_map is None:
# if both maps not given, treated as random gate
clifford_map = random_clifford_map(self.n)
else:
self.backward_map = self.forward_map.inverse()
clifford_map = self.backward_map
else:
clifford_map = self.backward_map
if False and self.n == obj.N: # global gate
obj.transform_by(clifford_map)
else: # local gate
obj.transform_by(clifford_map, mask(self.qubits, obj.N))
return obj
def compile(self):
'''construct forward and backward Clifford maps for this gate'''
if self.generator is not None:
self.forward_map = clifford_rotation_map(self.generator)
self.backward_map = clifford_rotation_map(-self.generator)
else:
if self.forward_map is None:
if self.backward_map is None:
raise Exception('random Clifford gate can not be compiled.')
else:
self.forward_map = self.backward_map.inverse()
else:
if self.backward_map is None:
self.backward_map = self.forward_map.inverse()
return self
class CliffordLayer(object):
'''Representes a layer of Clifford gates.
Parameters:
*gate: CliffordGate - the gates that this layer contains'''
def __init__(self, *gates):
self.gates = list(gates) # the gates this layer have
self.prev_layer = None # the previous layer
self.next_layer = None # the next layer
self.forward_map = None
self.backward_map = None
def __repr__(self):
return '|{}|'.format(''.join(repr(gate) for gate in self.gates))
def copy(self):
layer = CliffordLayer(*[gate.copy() for gate in self.gates])
if self.forward_map is not None:
layer.forward_map = self.forward_map.copy()
if self.backward_map is not None:
layer.backward_map = self.backward_map.copy()
return layer
def commute_with(self, other_gate):
return all(gate.commute_with(other_gate) for gate in self.gates)
def take(self, gate):
if self.prev_layer is None: # if I have no previous layer
self.gates.append(gate) # I will take the gate
else: # if I have a previous layer, check it
if self.prev_layer.commute_with(gate): # if commute
self.prev_layer.take(gate) # previous layer take the gate
else: # if not commute
self.gates.append(gate) # I will have to keep the gate
def forward(self, obj):
if self.forward_map is None:
for gate in self.gates:
gate.forward(obj)
else:
obj.transform_by(self.forward_map)
return obj
def backward(self, obj):
if self.backward_map is None:
for gate in self.gates:
gate.backward(obj)
else:
obj.transform_by(self.backward_map)
return obj
def compile(self, N):
'''construct forward and backward Clifford maps for this layer'''
self.forward_map = identity_map(N)
self.backward_map = identity_map(N)
for gate in self.gates:
gate.compile()
self.forward_map.embed(gate.forward_map, mask(gate.qubits, N))
self.backward_map.embed(gate.backward_map, mask(gate.qubits, N))
return self
class CliffordCircuit(object):
'''Represents a circuit of Clifford gates.
Examples:
# create a circuit
circ = CliffordCircuit()
# add a gate between qubits 0 and 1
circ.gate(0,1)
# or take in a specific gate
g = pauli('-XX')
circ.take(clifford_rotation_gate(g))'''
def __init__(self):
self.first_layer = CliffordLayer()
self.last_layer = self.first_layer
self.forward_map = None
self.backward_map = None
def __repr__(self):
layout = '\n'.join(repr(layer) for layer in self.layers_backward())
return 'QuantumCircuit(\n{})'.format(layout).replace('\n','\n ')
def __getattr__(self, item):
if item is 'N': # if self.N not defined
# infer from gates (assuming last qubit is covered)
N = 0
for layer in self.layers_forward():
for gate in layer.gates:
N = max(N, max(gate.qubits)+1)
return N
else:
return super().__getattribute__(item)
def copy(self):
circ = CliffordCircuit()
for i, layer in enumerate(self.layers_forward()):
new_layer = layer.copy()
if i == 0:
circ.first_layer = new_layer
circ.last_layer = new_layer
else:
circ.last_layer.next_layer = new_layer
new_layer.prev_layer = circ.last_layer
circ.last_layer = new_layer
if self.forward_map is not None:
circ.forward_map = self.forward_map.copy()
if self.backward_map is not None:
circ.backward_map = self.backward_map.copy()
return circ
def layers_backward(self):
# yield from last to first layers
layer = self.last_layer
while layer is not None:
yield layer
layer = layer.prev_layer
def layers_forward(self):
# yield from first to last layers
layer = self.first_layer
while layer is not None:
yield layer
layer = layer.next_layer
def take(self, gate):
if self.last_layer.commute_with(gate): # if last layer commute with the new gate
self.last_layer.take(gate) # the last layer takes the gate
else: # otherwise create a new layer to handle this
new_layer = CliffordLayer(gate) # a new layer with the new gate
# link to the layer structure
self.last_layer.next_layer = new_layer
new_layer.prev_layer = self.last_layer
self.last_layer = new_layer # new layer becomes the last
return self
def gate(self, *qubits):
return self.take(CliffordGate(*qubits)) # create a new gate
def compose(self, other):
'''Compose the circuit with another circuit.
U = U_other U_self
Parameters:
other: CliffordCircuit - another circuit to be combined.
Note: composition will not update the compiled information. Need
compilation after circuit composition.'''
for layer in other.layers_forward():
for gate in layer.gates:
self.take(gate)
return self
def forward(self, obj):
if self.forward_map is None:
for layer in self.layers_forward():
layer.forward(obj)
else:
obj.transform_by(self.forward_map)
return obj
def backward(self, obj):
if self.backward_map is None:
for layer in self.layers_backward():
layer.backward(obj)
else:
obj.transform_by(self.backward_map)
return obj
def compile(self, N=None):
'''Construct forward and backward Clifford maps for this circuit
Note: The compilation creates a single Clifford map representing the
entire circuit, which allows it to run faster for deep circuits.'''
N = self.N if N is None else N
self.forward_map = identity_map(N)
self.backward_map = identity_map(N)
for layer in self.layers_forward():
layer.compile(N)
self.forward_map = self.forward_map.compose(layer.forward_map)
self.backward_map = self.backward_map.compose(layer.backward_map)
return self
def povm(self, nsample):
'''Assuming computational basis measurement follows the circuit, this
will back evolve the computational basis state to generate prior POVM.
This returns a generator.'''
zero = zero_state(self.N)
for _ in range(nsample):
yield self.backward(zero)
# ---- gate constructors ----
def clifford_rotation_gate(generator, qubits=None):
'''Construct a Clifford rotation gate generted by a generator.
Parameters:
generator: Pauli - Pauli operator that generates the rotation.
U = exp( i pi/4 g) = (1 + i g)/sqrt(2)'''
g_cond, qubits_cond = condense(generator.g) # extract generator support
if qubits is None:
qubits = qubits_cond
else:
qubits = qubits[qubits_cond]
gate = CliffordGate(*qubits)
gate.generator = Pauli(g_cond, generator.p) # condensed generator
return gate
# ---- circuit constructors ----
def identity_circuit(N):
'''Construct a identity Clifford circuit containing no gate.
Parameters:
N: int - number of qubits.'''
circ = CliffordCircuit()
circ.N = N # fix number of qubits explicitly
return circ
def brickwall_rcc(N, depth):
'''Construct random Clifford circuit with brick wall circuit structure.
Parameters:
N: int - number of qubits.
depth: int - circuit depth.'''
assert(N % 2 == 0) # N should be even
circ = identity_circuit(N)
for l in range(depth):
for i in range(l % 2, N, 2):
circ.gate(i, (i+1) % N)
return circ
def onsite_rcc(N):
'''Construct random Clifford circuit of a layer of single-site gates.
(useful for implementing random Pauli measurements)
Parameters:
N: int - number of qubits.'''
circ = identity_circuit(N)
for i in range(N):
circ.gate(i)
return circ
def global_rcc(N):
'''Construct random Clifford circuit of a global Clifford gate.
(useful for implementing random Clifford measurements)
Parameters:
N: int - number of qubits.'''
circ = identity_circuit(N)
circ.gate(*range(N))
return circ
# ---- diagonalization ----
def diagonalize(obj, i0 = 0, afterward=False):
'''Diagonalize a Pauli operator or a stabilizer state (density matrix).
Parameters:
obj: Pauli - the operator to be diagonalized, or
StabilizerState - the state to be diagonalized.
i0: int - index of the qubit to diagonalize to.
afterward: bool - whether to focus only on qubits i0-afterward.
Returns:
circ: CliffordCircuit - circuit that diagonalizes obj.'''
circ = identity_circuit(obj.N)
if isinstance(obj, (Pauli, PauliMonomial)):
if afterward:
for g in pauli_diagonalize1(obj.g[2*i0:]):
circ.take(clifford_rotation_gate(Pauli(g), numpy.arange(i0,obj.N)))
else:
for g in pauli_diagonalize1(obj.g, i0):
circ.take(clifford_rotation_gate(Pauli(g)))
elif isinstance(obj, StabilizerState):
gate = CliffordGate(*numpy.arange(obj.N))
gate.backward_map = obj.to_map() # set backward map to encoding map
# then the forward map automatically decodes (= diagonalize) the state
circ.take(gate)
else:
raise NotImplementedError('diagonalization is not implemented for {}.'.format(type(obj).__name__))
return circ
def SBRG(hmdl, max_rate=2., tol=1.e-8):
'''Approximately diagonalize a Hamiltonian by SBRG.
Parameters:
hmdl: PauliPolynomial - model Hamiltonian.
Returns:
heff: PauliPolynomial - MBL effective Hamiltonian.
circ: CliffordCircuit - Clifford circuit to diagonalize the Hamiltonian.'''
htmp = hmdl.copy() # copy of model Hamiltonian to workspace
N = htmp.N # system size
circ = identity_circuit(N) # create circuit
heff = pauli_zero(N) # create effective Hamiltonian
# SBRG iteration
for i0 in range(N): # pivot through every qubit
leading = numpy.argmax(numpy.abs(htmp.cs)) # get leading term index
# find circuit to diagonalize leading term to i0
circ_i0 = diagonalize(htmp[leading], i0, afterward=True)
circ.compose(circ_i0) # append it to total circuit
circ_i0.forward(htmp) # apply it to Hamiltonian
mask_commute = htmp.gs[:,2*i0] == 0 # mask diagonal terms
len_anti = sum(~mask_commute) # count number of offdiagonal terms
if len_anti != 0: # if offdiagonal terms exist
# split diagonal from offdiagonal terms
diag = htmp[mask_commute]
offdiag = htmp[~mask_commute]
# eleminate offdiagonal terms by perturbation theory
len_max = int(round(max_rate * len_anti)) # max perturbation terms to keep
prod = (offdiag @ offdiag).reduce(tol)[:len_max]
htmp = diag + 0.5 * (htmp[leading].inverse() @ prod)
# mask terms that has become trivial on the remaining qubits
mask_trivial = numpy.all(htmp.gs[:,(2*i0+2):] == 0, -1)
heff += htmp[mask_trivial] # collect trivial terms to effective Hamiltonian
htmp = htmp[~mask_trivial] # retain with non-trivial terms
return heff, circ
|
StarcoderdataPython
|
3562871
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
from openslides.utils.models import AbsoluteUrlMixin
from openslides.utils.test import TestCase
class MyModel(AbsoluteUrlMixin):
""""
Model for testing
"""
def get_absolute_url(self, link='default'):
if link == 'default' or link == 'known':
url = 'my url'
else:
url = super(MyModel, self).get_absolute_url(link)
return url
class TestAbsoluteUrlMixin(TestCase):
def test_get_absolute_url(self):
my_object = MyModel()
self.assertEqual(my_object.get_absolute_url(), 'my url')
self.assertEqual(my_object.get_absolute_url('known'), 'my url')
self.assertRaisesMessage(
ValueError,
'Unknown Link "unknown" for model "<class \'tests.utils.test_models.MyModel\'>"',
my_object.get_absolute_url, 'unknown')
|
StarcoderdataPython
|
4942745
|
# ==============================================================================
# Copyright 2019 - <NAME>
#
# NOTICE: Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# ==============================================================================
""" Transition
- This module is responsible for defining the transition classes
"""
import collections
class Transition(
collections.namedtuple('Transition', ('phase_ix', # The phase ix
'state', # The proto representation of the state
'phase', # The proto representation of the phase
'policy', # The details of the behaviour policy
'rewards', # The rewards received by each power
'orders', # {power => orders during the phase}
'phase_history', # The phase_history_proto
'possible_orders'))): # The possible_orders_proto
""" Represents a transition (state, action, reward) for all powers """
class TransitionDetails(
collections.namedtuple('TransitionDetails', ('transition', # The transition named tuple
'power_name', # The power that issued orders
'draw_action', # Whether the power wanted a draw or not
'reward_target', # The reward target for the policy update
'value_target', # The value target for the baseline update
'log_p_t', # None or the log of the import sampling ratio
'log_probs'))): # Log probs of each token under the model
""" Contains the details of a transition (as computed by the advantage function) for a specific power """
class ReplaySample(
collections.namedtuple('ReplaySample', ('saved_game_proto', # A SavedGameProto instance
'power_phases_ix'))): # {power_name: [phases_ix]}
""" Contains a saved game proto with a list of prioritized transition ix """
class ReplayPriority(
collections.namedtuple('ReplayPriority', ('game_id', # The game id containing a transition priority
'power_name', # The power issuing orders
'phase_ix', # The phase ix
'priority'))): # The priority assigned to the transition/phase
""" Contains the priority for a given transition ix """
|
StarcoderdataPython
|
3525377
|
<gh_stars>0
from pydub import AudioSegment
from pydub.silence import split_on_silence
def split(filepath, save_path, time_length):
sound = AudioSegment.from_wav(filepath)
dBFS = sound.dBFS
chunks = split_on_silence(sound,
min_silence_len=500,
silence_thresh=dBFS-16,
keep_silence=250 # optional
)
# setting minimum length of each chunk to x seconds
target_length = time_length * 1000
output_chunks = [chunks[0]]
for chunk in chunks[1:]:
if len(output_chunks[-1]) < target_length:
output_chunks[-1] += chunk
else:
# if the last output chunk is longer than the target length,
# we can start a new one
output_chunks.append(chunk)
# Attention!
if os.path.exists(save_path):
shutil.rmtree(save_path)
os.mkdir(save_path)
for i, chunk in enumerate(output_chunks):
chunk.export(os.path.join(
save_path, "{0}.wav".format(i)), format="wav")
return len(output_chunks)
|
StarcoderdataPython
|
1673449
|
<reponame>gdlg/pytorch_nms<gh_stars>10-100
from setuptools import setup
from torch.utils.cpp_extension import CUDAExtension, BuildExtension
setup(name='nms', packages=['nms'],
package_dir={'':'src'},
ext_modules=[CUDAExtension('nms.details', ['src/nms.cpp', 'src/nms_kernel.cu'])],
cmdclass={'build_ext': BuildExtension})
|
StarcoderdataPython
|
3353678
|
#!\usr\bin\python
'''
mjtsai1974@20180603, v1.0, Simple plot
'''
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.stats as stats
import seaborn as sns
#Generate a randomized data set
#x = np.random.randn(500)
#Plot command::Begin
#plot(x, y), y v.s. x::Begin
#plt.plot(x) #Take the x dataset on the Y-axis, interconnect x[i]<->x[i+1]
#plt.plot(x, '.') #Take the x dataset on the Y-axis, draw each x[i] as '.', for '.' is a symbol
#plt.plot(x, np.arange(len(x))) #Take the x dataset on the X-axis, and np.arange(len(x))=[0..500] on the Y-axis
#plt.plot(np.arange(len(x)), x) #Take the np.arange(len(x))=[0..500] as X-axis, and x dataset on the Y-axis, interconnect x[i]<->x[i+1]
#plot(x, y), y v.s. x::End
#scatter(x, y), y v.s. x::Begin
##plt.scatter(np.arange(len(x)),x)
#scatter(x, y), y v.s. x::End
#histogram::Begin
#plt.hist(x, bins=25) #bin=25 specifies 25 bins in graph
#histogram::End
#Kernel density estimation::Begin
#sns.kdeplot(x)
#sns.kdeplot(x, bw=1.0) #bw is the bandwitdh, the determination of the smoothness of the estimation
#Kernel density estimation::End
#Cumulative frequency::Begin
#plt.plot(stats.cumfreq(x, 40)[0]) #stats.cumfreq(x, 40)[0], x->data set, 40->number of bins, [0]->Zero-based index for input data(maybe)
#Cumulative frequency::End
#Errorbar::Begin
'''
index = np.arange(5)
y = index**2
ErrorBar = y/2
plt.errorbar(index, y, yerr=ErrorBar, fmt='o')
'''
#Errorbar::End
#Box plot::Begin
#plt.plot(x, sym='*')
#Box plot::End
#Violinplot::Begin
#to be conti...
#Violinplot::End
#Group bar charts::Begin
'''
x = np.random.rand(10, 4)
df = pd.DataFrame(x, columns=['x1', 'x2', 'x3', 'x4'])
df.plot(kind='bar')
'''
#Group bar charts::End
#Pie charts::Begin
'''
txtLabels = 'Cat', 'Dogs', 'Frogs', 'Others'
fractions = [45, 30, 15, 10]
offsets = (0, 0.05, 0, 0)
#plt.pie(fractions, explode=offsets, labels=txtLabels) #Pie chart basic prototype
#plt.pie(fractions, explode=offsets, labels=txtLabels, autopct='%1.1f%%') #Pie chart basic prototype + proportion%
#plt.pie(fractions, explode=offsets, labels=txtLabels, autopct='%1.1f%%', shadow=True) #Pie chart basic prototype + proportion% + shadow
#plt.pie(fractions, explode=offsets, labels=txtLabels, autopct='%1.1f%%', shadow=True, startangle=90) #Pie chart basic prototype + proportion% + shadow + start with 90 degree consisting of such parts if it is
plt.pie(fractions, explode=offsets, labels=txtLabels, autopct='%1.1f%%', shadow=True, startangle=90, colors=sns.color_palette('muted'))
plt.axis('equal')
'''
#Pie charts::End
#Scatter plot::Begin
df = pd.DataFrame(np.random.rand(50, 4), columns=['a','b','c','d'])
#df.plot(kind='scatter', x='a', y='b') #scatter plot matrix of y v.s. x
df.plot(kind='scatter', x='a', y='b', s=df['c']*10) #s=scale
#df.plot.line()
#df.plot.scatter(x='a', y='b')
#df.plot.hist()
#Scatter plot::End
#Plot command::End
plt.show()
|
StarcoderdataPython
|
5124261
|
<filename>tests/pyflakes_generic_plugins/NoFutureImportTest.py
import ast
import unittest
from pyflakes.checker import Checker
from pyflakes_generic_plugins.NoFutureImport import NoFutureImport
class NoFutureImportTest(unittest.TestCase):
def setUp(self):
self.filename = 'NoFutureImport'
def check_validity(self, file):
tree = ast.parse(''.join(file))
results = NoFutureImport(tree, self.filename).run()
self.assertFalse(len(list(results)))
def check_invalidity(self, file):
tree = ast.parse(''.join(file))
results = NoFutureImport(tree, self.filename).run()
self.assertTrue(len(list(results)))
def test_external_initialization(self):
"""
Assert if provision for external initialization is present.
Ensures that flake8 can initialize plugin with pyflakes
instance.
"""
file = ['import sys']
tree = ast.parse(''.join(file))
checker = Checker(tree, self.filename)
no_future_instance = NoFutureImport(tree, self.filename)
no_future_instance.checker = checker
self.assertTrue(no_future_instance.module_scope)
self.assertFalse(len(list(no_future_instance.run())))
def test_valid(self):
self.check_validity(['import sys'])
self.check_validity(['from os import path'])
self.check_validity(['from ..custom import something'])
def test_invalid(self):
self.check_invalidity(['from __future__ import division'])
self.check_invalidity(['from __future__ import print_function, \\\n',
' division\n'])
self.check_invalidity(['from __future__ import division, '
'print_function'])
self.check_invalidity(['from __future__ import division;'])
|
StarcoderdataPython
|
74696
|
from topaz.module import ClassDef
from topaz.objects.objectobject import W_Object
from topaz.modules.ffi.function import W_FFIFunctionObject
from rpython.rlib import jit
class W_VariadicInvokerObject(W_Object):
classdef = ClassDef('VariadicInvoker', W_Object.classdef)
def __init__(self, space):
W_Object.__init__(self, space)
self.w_info = None
self.w_handle = None
@classdef.singleton_method('allocate')
def singleton_method_allocate(self, space, args_w):
return W_VariadicInvokerObject(space)
@classdef.method('initialize')
def method_initialize(self, space, w_handle, w_arg_types,
w_ret_type, w_options=None):
self.w_ret_type = w_ret_type
self.w_options = w_options
self.w_handle = w_handle
if w_options is None:
w_type_map = space.newhash()
else:
w_key = space.newsymbol('type_map')
w_type_map = space.send(w_options, '[]', [w_key])
space.send(self, 'init', [w_arg_types, w_type_map])
@classdef.method('invoke', arg_values_w='array')
def method_invoke(self, space, w_arg_types, arg_values_w):
w_func_cls = space.getclassfor(W_FFIFunctionObject)
w_func = space.send(
w_func_cls, 'new',
[self.w_ret_type, w_arg_types, self.w_handle, self.w_options])
return self._dli_call(space, w_func, arg_values_w)
@jit.dont_look_inside
def _dli_call(self, space, w_func, arg_values_w):
# XXX we are missing argument promotion for the variadic arguments here
# see
# http://stackoverflow.com/questions/1255775/default-argument-promotions-in-c-function-calls
return space.send(w_func, 'call', arg_values_w)
|
StarcoderdataPython
|
4895033
|
from math import *
# a console is a place where python would output information. which is where the print statement sends our files to.
'''
print(" /|")
print(" / |")
print(" / |")
print("/___|")
'''
# a variable is like a container that stores for storing data-values..
# which makes it alot easy for us to manage and work with all different types of data inside our program
# data types => Strings, numbers and boolean
# name = "Rooney"
# job_description = "Data analyst"
# is_male = False
# print("Hello Guys")
# # print("My name is", name)
# print("My name is" + name)
# print("and I am a", job_description)
# print("yes", job_description)
# print("don't be like " + name)
# print(is_male)
# Strings
# print("Techstudio \nAcademy")
# Concatenation=> The process of taking a string and appending another string onto it
# boot_camp = "Techstudio Academy"
# print(boot_camp + " offers a fullstack web dev program")
# String functions
# a function is basically a block of code that we can run and it'll perform a specific operation for us.
# we can use functions to modify our strings and also get information about our strings.
# Return a copy of the string converted to lowercase.
# print(boot_camp.lower())
# print(boot_camp.upper())
# Return True if the string is an uppercase string, False otherwise.
# A string is uppercase if all cased characters in the string are uppercase andthere is at least one cased character in the string.
# boot_camp2 = boot_camp.upper()
# print(boot_camp2.isupper())
# print(boot_camp.upper().isupper())
# Return the number of items in a container.
# print(len(boot_camp))
# you can also get index of characters.
# print(boot_camp[0])
# the index fn will tell us where a specific character is located inside of our string.
# print(boot_camp.index("d"))
# Return a copy with all occurences of substring old replaced by new.
# print(boot_camp.replace("Techstudio", "Lambda"))
# Working with numbers in python
# Any python you write would most likely be dealing with numbers at some point.
# print(3 + 4)
# print(4 * (5-2) + (2/50))
# modulus
# print(10 % 8)
# my_num = -69
# print(str(my_num) + " is my favorite number")
# print(floor(3.94))
# print(ceil(3.94))
# print(max(4,5))
# print(sqrt(16))
# name = input("Enter your name? ")
# age = input("How old are you? ")
# print("Hello, your name is", name, "and you are", age, "years old")
# Basic calculator
# num1 = input("Enter a number: ")
# num2 = input("Enter another number: ")
# result = int(num1) + int(num2)
# print(result)
# color = input("Enter a color; ")
# plural_noun = input("Enter a plural noun; ")
# person = input("Insert that guy/babe name; ")
# print("Roses are " + color)
# print(plural_noun + " are blue")
# print("No go dey reason " + person + " wey no dey reason you")
# Practise
# weight_kg=55
# print("weight in pounds", int(2.2 * weight_kg))
# weight_lb = 2.2 * weight_kg
# # print(weight_lb)
# print("weight_kg initial value is: ", weight_kg)
# weight_kg =10000
# print("After updating, weight_kg final value is: ", weight_kg)
# print("The final value of weight_lb is: ", weight_lb)
# weight_lb = 2.2 * weight_kg
# print("The new value of weight_lb is: ", weight_lb)
# Datatypes
# print("Hello " * 3)
# x= "datatype"
# print(type("datatype"))
# multiline = '''
# This string
# spans multiple
# lines
# !!!
# '''
# print(multiline)
# print(type(multiline))
# print(type(5))
# print(type(-1000))
# print(type(6 - 33))
# print(type(10 / 3))
# print(type(1.3))
# print(type(22.))
# print(type(True))
# print(type(False))
# print(type(None))
# 4 / 1.3 == 4
# print(str(1) + '1')
# print(int('6') - 7)
# print(int(7.23))
# print(bool('5'))
# print(floor(4 / 1.3))
# Lists
# a list is a structure that we can use inside our python program to
# store lists of information/data. you can take a bunch of related data values
# and organize them inside of a list and we can use it throughout our
# program. this makes keeping track of our data a lot easier
# we create lists like we create variables in py. although with lists, the
# list name has to be as descriptive as it could be.
# whenever you write this => [], py knows you want to create a list
# marvel_avengers = ['Black Panther', 'Captain America', 'Thor', 'Iron Man']
# print(marvel_avengers)
# print(marvel_avengers[-2])
# marvel_avengers[2] = 'Dr. Strange'
# print(marvel_avengers)
# PRACTISE
# example = ['1', '2', '3', '4', '5']
# print(example)
# example = [1, True, None, ['word', 123], 'test'] + ['1', '2', '3', '4', '5']
# print(example)
# print(example[0])
# print(example[1])
# print(example[2])
# print(example[:2])
# print(example[0:2])
# print(example[:-2])
# print(example[3][1])
# print(example1 + example)
# List1 = [1, 2, 3, 4]
# List2 = List1
# List2 += [5, 6, 7]
# print("List 2 is: ", List2)
# print("List 1 is: ", List1)
# print(id(List1))
# print(id(List2))
# .copy() is a method. Methods are special functions associated with an object and define what it can do.
# list1 = [1, 2, 3, 4]
# list2 = list1.copy()
# list2 += [5, 6, 7]
# print("List 2 is: ", list2)
# print("List 1 is: ", list1)
# Other frequently used methods of lists include .append(): this adds a one-element list
# list1.append(77)
# print(list1)
# And .extend() (combines two lists, instead of adding the second list as an element):
# list1.extend([99, 88, 101])
# print(list1)
# And of course, .remove() and .clear() (both do exactly what you think they should do):
# list1.remove(88)
# print(list1)
# list1.clear()
# print(list1)
# And .extend() (combines two lists, instead of adding the second list as an element):
# marvel_avengers = ['Black Panther', 'Captain America', 'Thor', 'Iron Man']
# list1 = [3, 4, 5, 6, 8]
# marvel_avengers.append("Dr Strange")
# print(marvel_avengers)
# marvel_avengers.extend(list1)
# print(marvel_avengers)
# marvel_avengers.insert(2, "Spiderman")
# print(marvel_avengers)
# marvel_avengers.remove("Dr Strange")
# print(marvel_avengers)
# marvel_avengers.pop()
# print(marvel_avengers)
# marvel_avengers.clear()
# print(marvel_avengers)
# marvel_avengers.sort()
# print(marvel_avengers)
# Tuples-Defining and Using Tuples
# A tuple is a collection of objects which are ordered and immutable. Tuples
# are sequences, just like lists. The differences between tuples and lists are,
# the tuples cannot be changed unlike lists and tuples use parentheses,
# whereas lists use square brackets.
# Tuples are identical to lists in all respects, except for the following properties:
# Tuples are defined by enclosing the elements in parentheses (()) instead of square brackets ([]).
# Tuples are immutable.
# t = ('foo', 'bar', 'baz', 'qux', 'quux', 'corge')
# t[2]= 'bark'
# print(t)
# t= (1,)
# print(type(t))
# t = ('foo', 'bar', 'baz', 'qux', 'quux', 'corge')
# print(t[-1])
# (s1, s2, s3, s4, s5, s6, s7)=t
# print(t)
# (s1, s2, s3, s4, s5, s6) = ('foo', 'bar', 'baz', 'qux', 'quux', 'corge')
# print(s4)
# t= 1, 2, 3
# print(t)
# x1, x2, x3 = t
# print(x1, x2, x3)
# x1, x2, x3 = 4, 5, 6
# print(x1, x2, x3 )
# t = 2,
# print(t)
# Functions
# a function is a collection of code which performs a specific task. that is,
# a block of code which only runs when it is called.
# functions allow you to organize your code a lot better. they allow you to
# break up your code into different chunks that are doing different things.
# in python, we use the 'def' keyword when we want to declare a function.
# py will treat everything that comes after the ':' as our function block
# in order to write code that belongs to the block, we need to indent.
# def hello_world(num):
# return num * num * num
# print(hello_world(5))
# the code inside of the function will only get executed when we specify that we want to execute it.
# => invoking the function
# your functions shhould be named in lower case.
# if statements
# 'if' statements are a structure in python that allows us to help our programs make decisions
# by using 'if' statements, we could execute a certain part of our code if certain conditions are True.
# basically 'if' statements allow our programs respond to the input that they are given.
# 'if' statements are something even we humans deal with all the time. as you go through your day,
# you are executing 'if' statements.
# elif for elseif
# not() => negation
# yinka_is_cold = True
# yinka_is_not_cold = False
# yinka_is_shivering = False
# if yinka_is_cold:
# print("The ac should be turned off but the windows open")
# elif yinka_is_not_cold:
# print("The ac should be turned on but the windows closed")
# elif yinka_is_shivering:
# print("The ac should be turned off and the windows shut")
# else:
# print("I'm a hard working youth")
# a = 10
# b = 22
# c = 14
# if (a < b):
# print("The value of a is less than b")
# else:
# print("The value of a is greater than or equal to b")
# if (a < c):
# print("The value of a is less than c")
# else:
# print("The value of a is less than or equal to c")
# if (b < c):
# print("The value of b is less than c")
# elif(b == c):
# print("The value of b is equal to c")
# else:'
# print("the value of b ic greater than c")
# collect input from users.. 1st and 2nd number
# user would input operator and py performs the calculation
# ops = input("Enter the operator: ")
# x = float(input("Enter the first number: "))
# y = float(input("Enter the second number: "))
# if(ops == "/"):
# print(x / y)
# elif(ops == "*"):
# print(x * y)
# elif(ops == "+"):
# print(x + y)
# elif(ops == "-"):
# print(x - y)
# else:
# print("Get a calculator!!!")
# Dictionaries
# a dictionary is a special structure in python which allows us to store informations in what is
# called Key:value pairs
#key:value
# month_conversions = {
# "Jan": "January",
# "Feb": "February",
# "Mar": "March"
# }
# print(month_conversions.get("Feb"))
# print(month_conversions.get("Dec", "Not a valid key"))
# print(month_conversions)
# # Creating a Dictionary with Integer keys
# Dict = {1: "Geeks", 2: "For", 3: "Geeks"}
# print("\nDictionary with the use of integer keys: ")
# print(Dict)
# Creating a Dictionary with Mixed keys
# Dict = {"Name": "Geeks", 1: [1, 2, 3, 4, 5]}
# print("\nDictionary with the use of Mixed keys: ")
# print(Dict)
# creating an empty dictionary
# Dict = {}
# print("Empty Dictionary: ", Dict)
# Creating a Dictionary with a dict() method
# Dict = dict({1: "Geeks", 2: "For", 3: "Geeks"})
# print("\nDictionary with a dict() method")
# print(Dict)
# Creating a dictionary with each item as a pair
# Dict = dict([(1, "Geeks"), (2, "For"), (3, "Geeks")])
# print(Dict.get(2))
# Creating a nested dictionary
# Dict = {1: "Geeks", 2: "For", 3:{"A": "Welcome", "B": "to", "C": "Geeks"}}
# print(Dict[3]["A"])
# Creating an empty dictionary
# Dict = {}
# print("Empty Dictionary: ", Dict)
# Adding elements one at a time
# Dict[0] = "Geeks"
# Dict [2] = "For"
# Dict [3] = 1
# print("\nDictionary after adding three elements: ", Dict)
# Adding sets of values to a single key
# Dict["Value_set"] = 2, 3, 4
# print("\nUpdated key value: ", Dict)
# Adding Nested Key value to Dictionary
# Dict[5] = {"Nested" :{"1" : "Life", "2": "Geeks"}}
# print("\nAdding Nested Key: ", Dict)
# While loop
# a while loop is a structure in python that allows us to loop through and execute a block of code
# repeatedly until a certain condition is false.
# as long as the condition provided to the loop remains true, the loop continues to run in circles.
# It goes back to check the condition after each iteration.
# i = 1
# while i <= 14:
# print (i)
# i +=1
# print ("done looping")
# n = 5
# while (n > 0):
# print(n)
# n -= 1
# a = ["foo", "bar", "baz"]
# while a:
# print(a.pop(-1))
# a = [1, 2, 3, 4, 5]
# while a:
# print(a.pop())
# For loop
# a for loop is a special type of loop which allows us to loop over different collections of items.
# in for loops we specify a variable. this variable will represent a different value on each iteration
# each time, this variable will most likely have a different value
# for every letter in tsa, do something
# for letter in "TechStudio Academy":
# print(letter)
# airlines = ["Air Peace", "Air France", "Emirates Airlines"]
# for airline in airlines:
# print(airline)
# for index in range(12):
# if index == 0:
# print("First iteration")
# else:
# print("Not First")
# for x in range(3, 6):
# print(x)
# for x in range(3, 8, 2):
# print(x)
# for i in range(1, 4):
# print(i)
# else:
# print("No break\n")
# count = 0
# while True:
# print(count)
# count += 1
# if count >= 5:
# break
# for x in range(1, 10, 2):
# print(x)
# for x in range(10):
# if x % 2 == 0:
# continue
# print(x)
# count = 0
# while (count < 5):
# print(count)
# count += 1
# else:
# print("count value reached %d" %(count))
# for i in range(1,10):
# if(i%5 == 0):
# continue
# print(i)
# print("List Iteration")
# l = ['geeks', 'for', 'geeks']
# for i in l:
# print(i)
# print("Tuple \nIteration")
# t = ('geeks', 'for', 'geeks')
# for i in t:
# print(i)
# print("\nString Iteration")
# s = 'Geeks'
# for i in s:
# print(i)
# print("\nDictionary Iteration")
# d = dict()
# d['xyz'] = 123
# d['abc'] = 345
# for i in d:
# print("% s % d" %(i, d[i]))
# for letter in 'geeksforgeeks':
# if letter == "e" or letter == "s":
# continue
# print("Current Letter: ", letter)
# for letter in 'geeksforgeeks':
# if letter == 'e' or letter == 's':
# break
# print("Current Letter", letter)
# for letter in 'geeksforgeeks':
# pass
# print("Last Letter: ", letter)
# for i in range(10):
# print(i, end = " ")
# l = [10, 20, 30, 40]
# for i in range(len(l)):
# print(l[i], end = " ")
# sum = 0
# for i in range(1, 10):
# sum = sum + i
# print(sum, end = " ")
# for i in range(1, 10):
# print(i)
# else:
# print("No Break\n")
# 2D Lists and Nested loops
# T = [[11, 12, 5, 2], [15, 6,10], [10, 8, 12, 5], [12,15,8,6]]
# for r in T:
# for c in T:
# print(c, end = " ")
# print()
# T.remove([2][2])
# for r in T:
# for c in r:
# print(c, end =" ")
# print()
# N = 5
# arr = [0] * N
# print(arr)
# N = 5
# arr = [0 for i in range(N)]
# print(arr)
# rows, cols = (5, 4)
# arr = [[0]*cols]*rows
# print(arr)
# N = 5
# arr = [[0 for i in range(cols)] for j in range(rows)]
# def translate(text):
# translation = ""
# for letter in text:
# if letter in "AEIOUaeiou":
# translation = translation + "m"
# else:
# translation = translation + letter
# return translation
# print(translate(input("ENter any text: ")))
# there might be
# def divide(x, y):
# try:
# floor division: Gives only Fractional part as Answer
# result = x // y
# print("Yeah ! Your answer is :", result)
# except ZeroDivisionError:
# print("Sorry ! You are dividing by zero ")
# Look at parameters and note the working of program
# divide(3, 0)
# Reading from an external file
# syllabus = open("syl.md", "r")
# syllabus = open("./TechStudio/syl.md", "a")
# syllabus.write("\n34=> Some Other stuffs")
# print(syllabus.read())
# print(syllabus.readlines())
# syllabus.close()
# pip install python-docx
# file = open("file.txt", "r")
# for each in file:
# print(each)
# syllabus = open("file.txt", "r")
# print syllabus.read(5)
# file = open("Geek.txt", "w")
# file.write("This is the write command in Python. ")
# file.write("It allows to write in a particular file")
# file.close()
# file = open("sample.txt", "r")
# print(file.read())
# file.close()
# with open("Geek.txt") as file:
# data = print(file.read())
# with open("Geek.txt", "r") as file:
# data = file.readlines()
# for line in data:
# word = line.split()
# print(word)
# with open("syl.md", "w") as file:
# file.write("Hello World!!!")
# import num
# print(num.square(3))
# print(num.cube(3))
# import calc
# print(calc.add(10, 2))
# from math import sqrt, factorial
# print(sqrt(25))
# print(factorial(6))
# import random
# print(dir(random))
# import math
# print(sqrt(25))
# print(pi)
# print(math.degrees(4))
# print(radians(60))
# import random
# print(random.randint(0, 5))
# print(random.random() * 100)
# List = [1, 4, True, 800, "python", 27, "hello"]
# print(random.choice(List))
# import datetime
# from datetime import date
# import time
# print(time.time())
# print(date.fromtimestamp)
# import support
# support.print_func("Zara")
# Money = 2000
# def AddMoney():
# Uncomment the following line to fix the code:
# global Money
# Money = Money + 1
# print(AddMoney)
# AddMoney()
# print(Money)
# fo = open("foo.txt", "wb")
# print("Name of the file: ", fo.name)
# print("Closed or not: ", fo.closed)
# print("Opening mode: ", fo.mode)
# print("Softspace flag: ", fo.softspace)
|
StarcoderdataPython
|
5167142
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from lib.DlsiteScraper import DlsiteScraper
from lib.JsonFile import JsonFile
import lib.setting as setting
# main
import traceback
# extract_genre_count
import collections
import itertools
# calculate_inclination
# sanpu
import matplotlib.pyplot as plt
import numpy as np
def merge_works_detail(purchase_history, works_detail):
result = []
for (purchase, detail) in zip(purchase_history, works_detail):
purchase.update({'genre': detail['genre']})
result.append(purchase)
return result
def extract_genre_count(purchase_history):
items = list(itertools.chain.from_iterable(
[item.get('genre', []) for item in purchase_history]
))
genre_count = collections.OrderedDict(
collections.Counter(items).most_common()
)
return genre_count
def count_genre(genres_count, workcount):
result = []
for genre in workcount:
purchase_count = (
genres_count[genre['text']]
if genre['text'] in genres_count
else 0
)
genre['purchase_count'] = purchase_count
result.append(genre)
return result
def calculate_inclination(items):
text = np.array([])
all_count = np.array([])
purchase_count = np.array([])
for item in items:
text = np.append(text, [item['text']])
all_count = np.append(all_count, [item['all_count']])
purchase_count = np.append(purchase_count, [item['purchase_count']])
ab1 = np.polyfit(all_count, purchase_count, 3)
all_C = np.poly1d(ab1)(all_count)
plt.plot(all_count, all_C)
aaa = []
for (j, t) in zip(purchase_count, all_C):
aaa.append(j - t)
max_list = sorted(aaa, reverse=True)
min_list = sorted(aaa)
result = ''
hr = '======================'
lf = "\n"
result = result + hr + lf
for index in range(10):
result = result + (
'性癖:' +
text[aaa.index(max_list[index])] +
'、' +
str(np.round(max_list[index]))
) + lf
result = result + hr + lf
for index in range(10):
result = result + (
'地雷:' +
text[aaa.index(min_list[index])] +
'、' +
str(np.round(min_list[index]))
) + lf
result = result + hr + lf
return result
def sanpu(items, label=True):
text = np.array([])
all_count = np.array([])
purchase_count = np.array([])
for item in items:
text = np.append(text, [item['text']])
all_count = np.append(all_count, [item['all_count']])
purchase_count = np.append(purchase_count, [item['purchase_count']])
# print(np.corrcoef(np.array([all_count, purchase_count])))
ab1 = np.polyfit(all_count, purchase_count, 3)
all_C = np.poly1d(ab1)(all_count)
plt.plot(all_count, all_C)
for (i, j, k) in zip(all_count, purchase_count, text):
plt.plot(i, j, 'o')
if label:
plt.annotate(k, xy=(i, j))
plt.xlabel('総販売作品のジャンル登録数')
plt.ylabel('購入済作品のジャンル登録数')
# plt.show()
plt.savefig('_figure.png')
def main():
# input user_code and password
login_id = setting.USER
login_pass = <PASSWORD>
purchase_works_json_path = '_purchase_works.json'
count_genre_json_path = '_count_genre_dict.json'
result_path = '_result.txt'
site = DlsiteScraper()
purchase_history = []
try:
# login dlsite
if site.login(login_id, login_pass):
print("login success")
# scrape purchase_history
purchase_history = site.scrape_purchase_history()
print("scrape purchase_history success")
else:
print("login failed")
except BaseException:
traceback.print_exc()
finally:
del site
if purchase_history == []:
print("scrape purchase_history failed")
exit()
# scrape works_detail and workcount
print("scrape works_detail and workcount")
work_url_list = [x['work_name']['url'] for x in purchase_history]
works = []
for work_url in work_url_list:
print(work_url)
works.append(DlsiteScraper.scrape_work_detail(work_url))
workcount = DlsiteScraper.fetch_workcount()
print("scrape works_detail and workcount success")
# merge purchase_history and works
print("merge purchase_history and works")
purchase_works = merge_works_detail(purchase_history, works,)
total_amount = 0
for item in purchase_works:
total_amount += int(''.join(item['work_price'][:-1].split(',')))
print("total_amount:" + str(total_amount))
JsonFile.save(purchase_works_json_path, purchase_works)
print("merge purchase_history and works success")
# count genre
print("count genre")
genres_count = extract_genre_count(purchase_works)
count_genre_dict = count_genre(genres_count, workcount)
JsonFile.save(count_genre_json_path, count_genre_dict)
print("count genre success")
# calculate inclination
print("calculate inclination")
result = calculate_inclination(count_genre_dict)
with open(result_path, 'w', encoding='utf_8') as f:
f.write(result)
print("calculate inclination success")
# drawing graph
print("drawing graph")
sanpu(count_genre_dict, label=True)
print("drawing graph success")
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
12857558
|
<reponame>ThaDeveloper/grind<filename>src/api/models/user.py
"""User model module"""
import jwt
from datetime import datetime, timedelta
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin
from django.conf import settings
from django.core.validators import RegexValidator
from .base_model import CommonFields
from api.models.user_manager import UserManager
class User(AbstractBaseUser, PermissionsMixin, CommonFields):
"""User model """
USER_TYPE = [
('professional', 'professional'),
('client', 'client')
]
USERNAME_REGEX = '^[a-zA-Z]{5,}$'
first_name = models.CharField(max_length=30, null=False)
last_name = models.CharField(max_length=30, null=False)
email = models.EmailField(unique=True, null=False)
username = models.CharField(
max_length=30,
validators=[
RegexValidator(
regex=USERNAME_REGEX,
message='Username must be 5 or more alphabetic characters',
code='invalid_username')],
unique=True, null=False)
password = models.CharField(max_length=128, null=False)
active = models.BooleanField(default=True)
admin = models.BooleanField(default=False)
staff = models.BooleanField(default=False)
user_type = models.CharField(max_length=20, choices=USER_TYPE, null=False)
date_joined = models.DateTimeField(auto_now_add=True)
last_login = models.DateTimeField(auto_now=True)
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username', ]
class Meta:
"""metadata options."""
ordering = ('pk',)
verbose_name = 'User'
def __str__(self):
"""Return object's string representation."""
return f'{self.first_name} {self.last_name}'
@property
def is_active(self):
"""Check if user is active."""
return self.active
@property
def is_staff(self):
"""Check whether user is a staff."""
return self.staff
@property
def is_superuser(self):
"""Check whether user is a super user."""
return self.admin
@property
def token(self):
"""
Get a user's token by calling `user.token`.
"""
return self._generate_jwt_token()
def _generate_jwt_token(self):
"""
Generates a JSON Web Token for access to auth endpoints
"""
dt = datetime.now() + timedelta(days=1)
token = jwt.encode({
'id': self.pk,
'username': self.username,
'email': self.email,
'exp': int(dt.strftime('%s'))
}, settings.SECRET_KEY, algorithm='HS256')
return token.decode('utf-8')
def get_full_name(self):
return ('%s %s') % (self.first_name, self.last_name)
def get_short_name(self):
return self.username
def has_perm(self, perm, obj=None):
return self.admin
|
StarcoderdataPython
|
1773286
|
from util.Docker import Docker
class Dredd:
image = 'weaveworksdemos/openapi'
container_name = ''
def test_against_endpoint(self, json_spec, endpoint_container_name, api_endpoint, mongo_endpoint_url,
mongo_container_name):
self.container_name = Docker().random_container_name('openapi')
command = ['docker', 'run',
'-h', 'openapi',
'--name', self.container_name,
'--link', mongo_container_name,
'--link', endpoint_container_name,
'--env', "MONGO_ENDPOINT={0}".format(mongo_endpoint_url),
Dredd.image,
"/usr/src/app/{0}".format(json_spec),
api_endpoint,
"-f",
"/usr/src/app/hooks.js"
]
out = Docker().execute(command)
Docker().kill_and_remove(self.container_name)
return out
|
StarcoderdataPython
|
3246552
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
from Commands.Keys import Button, Direction, Hat
from Commands.PythonCommandBase import PythonCommand
# import numpy as np
from scipy.sparse.csgraph import shortest_path # , floyd_warshall, dijkstra, bellman_ford, johnson
from scipy.sparse import csr_matrix
serial = {0: '-1',
1: '1', 2: '2', 3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9', 10: '0', 11: '@', 12: 'BS',
13: 'Q', 14: 'W', 15: 'E', 16: 'R', 17: 'T', 18: 'Y', 19: 'U', 20: 'I', 21: 'O', 22: 'P', 23: '=',
24: 'A', 25: 'S', 26: 'D', 27: 'F', 28: 'G', 29: 'H', 30: 'J', 31: 'K', 32: 'L', 33: '&', 34: ';',
35: 'Z', 36: 'X', 37: 'C', 38: 'V', 39: 'B', 40: 'N', 41: 'M', 42: '*', 43: '#', 44: '!', 45: '?',
46: 'SelectKeyboard', 47: 'Shift', 48: '#+=', 49: 'nl_1', 50: 'nl_2', 51: 'nl_3',
52: 'ok_1', 53: 'ok_2', 54: 'ok_3', 55: 'blank_1', 56: 'blank_2', 57: 'blank_3', 58: 'blank_4',
59: 'blank_5', 60: 'blank_6', 61: 'blank_7', 62: 'blank_8', }
serial_inv = {v: k for k, v in serial.items()}
serial_graph_list = [[],
# 1-5
[2, 13, 12], [1, 3, 14], [2, 4, 15], [3, 5, 16], [4, 6, 17],
# 6-10
[5, 7, 18], [6, 8, 19], [7, 9, 20], [8, 10, 21], [9, 11, 22],
# 11-15 @ ~ E
[10, 12, 23], [11, 49, 1], [1, 24, 14, 49], [2, 13, 15, 25], [3, 14, 16, 26],
# 16-20 R ~ I
[4, 15, 17, 27], [5, 16, 18, 28], [6, 17, 19, 29], [7, 18, 20, 30], [8, 19, 21, 31],
# 21-25 O ~ S
[9, 20, 22, 32], [10, 21, 23, 33], [11, 22, 34, 49], [13, 25, 35, 50], [14, 24, 26, 36],
# 26-30 D ~ J
[15, 25, 27, 37], [16, 26, 28, 38], [17, 27, 29, 39], [18, 28, 30, 40], [19, 29, 31, 41],
# 31-35 J ~ Z
[20, 30, 32, 42], [21, 31, 33, 43], [22, 32, 34, 44], [23, 33, 45, 50], [24, 46, 36, 53],
# 36-40 X ~ N
[25, 35, 37, 47], [26, 36, 38, 48], [27, 37, 39, 55], [28, 38, 40, 56], [29, 39, 41, 57],
# 41-45 M ~ ?
[30, 40, 42, 58], [31, 41, 43, 59], [32, 42, 44, 60], [33, 43, 45, 61], [34, 44, 62, 53],
# 46-50
[35, 47, 54], [36, 46, 48], [37, 47, 55], [12, 23, 13, 52], [12, 34, 24, 53],
# 51-56
[12, 34, 24, 54], [49, 45, 35], [45, 35, 50], [55, 46, 51], [38, 48, 54], [39, 48, 54],
# 57-62
[40, 48, 54], [41, 48, 54], [42, 48, 54], [43, 48, 54], [44, 48, 54], [45, 48, 54]]
class InputKeyboard(PythonCommand):
NAME = 'シリアル入力'
def __init__(self):
super().__init__()
self.s = 'F105LP98GMFCB3RA' # 入力したい文字列
self.now_dict = serial_graph_list
self.now_dict_ = serial
self.now_dict_inv = serial_inv
self.graph = None
self.d = None
self.p = None
self.n = None
self.MakeGraph()
self.pos = 1 # 初期位置
def MakeGraph(self):
self.n = len(self.now_dict)
self.graph = [[0] * self.n for _ in range(self.n)] # 隣接行列
for i, g_i in enumerate(self.now_dict):
for j in g_i:
self.graph[i][j] = 1
# for i in self.graph:
# print(" ".join(list(map(str, i))))
a = csr_matrix(self.graph)
self.d, self.p = shortest_path(a, return_predecessors=True)
def do(self):
input_char = 0
for i in self.s:
print(self.now_dict_[self.now_dict_inv[i]])
t = GetPath(self.pos, self.now_dict_inv[i], self.p)
print(t)
stick = False
stick = self.Move(t, stick)
if not stick:
self.press(Button.A, wait=0.03, duration=0.05)
input_char += 1
def Move(self, t, stick): # 移動のための関数
for j in range(len(t) - 1):
if t[j + 1] == 1 and t[j] == 12:
self.press(Direction.RIGHT, wait=0.03, duration=0.05)
if t[j + 1] == 12:
if t[j] in [49, 50, 51]:
self.press(Direction.UP, wait=0.03, duration=0.05)
elif t[j] == 1:
self.press(Direction.LEFT, wait=0.03, duration=0.05)
elif t[j] == 11:
self.press(Direction.RIGHT, wait=0.03, duration=0.05)
elif t[j + 1] == 13: # Q
if t[j] == 49:
self.press(Direction.RIGHT, wait=0.1, duration=0.05)
elif t[j] == 1:
self.press(Direction.DOWN, wait=0.03, duration=0.05)
elif t[j] == 14:
self.press(Direction.LEFT, wait=0.03, duration=0.05)
elif t[j] == 24:
self.press(Direction.UP, wait=0.03, duration=0.05)
elif t[j + 1] == 23: # =
if t[j] == 22:
self.press(Direction.RIGHT, wait=0.03, duration=0.05)
elif t[j] == 11:
self.press(Direction.DOWN, wait=0.03, duration=0.05)
elif t[j] == 49:
self.press(Direction.LEFT, wait=0.03, duration=0.05)
elif t[j] == 34:
self.press(Direction.UP, wait=0.03, duration=0.05)
elif t[j + 1] == 24: # A
if t[j] in [50, 51]:
self.press(Direction.RIGHT, wait=0.1, duration=0.05)
elif t[j] == 13:
self.press(Direction.DOWN, wait=0.03, duration=0.05)
elif t[j] == 25:
self.press(Direction.LEFT, wait=0.03, duration=0.05)
elif t[j] == 35:
self.press(Direction.UP, wait=0.03, duration=0.05)
elif t[j + 1] == 34: # ;
if t[j] == 33:
self.press(Direction.RIGHT, wait=0.03, duration=0.05)
elif t[j] == 23:
self.press(Direction.DOWN, wait=0.03, duration=0.05)
elif t[j] in [50, 51]:
self.press(Direction.LEFT, wait=0.03, duration=0.05)
elif t[j] == 45:
self.press(Direction.UP, wait=0.03, duration=0.05)
elif t[j + 1] == 35: # Z
if t[j] in [52, 53]:
self.press(Direction.RIGHT, wait=0.1, duration=0.05)
elif t[j] == 24:
self.press(Direction.DOWN, wait=0.03, duration=0.05)
elif t[j] == 36:
self.press(Direction.LEFT, wait=0.03, duration=0.05)
elif t[j] == 46:
self.press(Direction.UP, wait=0.03, duration=0.05)
elif t[j + 1] in [38, 39, 40, 41, 42, 43, 44] and t[j + 1] - t[j] == -17: # Z
self.press(Direction.UP, wait=0.03, duration=0.05)
elif t[j + 1] == 45: # ?
if t[j] == 44:
self.press(Direction.RIGHT, wait=0.03, duration=0.05)
elif t[j] == 34:
self.press(Direction.DOWN, wait=0.03, duration=0.05)
elif t[j] in [52, 53]:
self.press(Direction.LEFT, wait=0.03, duration=0.05)
elif t[j] == 62:
self.press(Direction.UP, wait=0.03, duration=0.05)
elif t[j + 1] == 48 and t[j] in [55, 56, 57, 58, 59, 60, 61, 62]:
self.press(Direction.LEFT, wait=0.03, duration=0.05)
elif t[j + 1] == 49:
if t[j] == 12:
self.press(Direction.DOWN, wait=0.03, duration=0.05)
elif t[j] == 23:
self.press(Direction.RIGHT, wait=0.03, duration=0.05)
elif t[j] == 13:
self.press(Direction.LEFT, wait=0.03, duration=0.05)
elif t[j] == 52:
self.press(Direction.UP, wait=0.03, duration=0.05)
elif t[j + 1] == 50:
if t[j] == 34:
self.press(Direction.RIGHT, wait=0.03, duration=0.05)
elif t[j] == 24:
self.press(Direction.LEFT, wait=0.03, duration=0.05)
elif t[j] == 53:
self.press(Direction.UP, wait=0.03, duration=0.05)
elif t[j + 1] == 51:
if t[j] == 54:
self.press(Direction.UP, wait=0.03, duration=0.05)
elif t[j + 1] == 52:
if t[j] == 49:
self.press(Direction.DOWN, wait=0.03, duration=0.05)
elif t[j + 1] == 53:
if t[j] == 45:
self.press(Direction.RIGHT, wait=0.03, duration=0.05)
elif t[j] == 35:
self.press(Direction.LEFT, wait=0.03, duration=0.05)
elif t[j] == 50:
self.press(Direction.DOWN, wait=0.03, duration=0.05)
elif t[j + 1] == 54:
if t[j] in [55, 56, 57, 58, 59, 60, 61, 62]:
self.press(Direction.RIGHT, wait=0.03, duration=0.05)
elif t[j] == 46:
self.press(Direction.LEFT, wait=0.03, duration=0.05)
elif t[j] == 51:
self.press(Direction.DOWN, wait=0.03, duration=0.05)
elif t[j + 1] == 55:
if t[j] == 48:
self.press(Direction.RIGHT, wait=0.03, duration=0.05)
elif t[j] == 54:
self.press(Direction.LEFT, wait=0.03, duration=0.05)
elif t[j] == 38:
self.press(Direction.DOWN, wait=0.03, duration=0.05)
elif t[j + 1] in [56, 57, 58, 59, 60, 61, 62] and t[j + 1] - t[j] == 17:
self.press(Direction.DOWN, wait=0.03, duration=0.05)
elif t[j + 1] - t[j] == 1:
self.press(Direction.RIGHT, wait=0.03, duration=0.05)
elif t[j + 1] - t[j] == -1:
self.press(Direction.LEFT, wait=0.03, duration=0.05)
elif t[j + 1] - t[j] in [11, 12]:
self.press(Direction.DOWN, wait=0.03, duration=0.05)
elif t[j + 1] - t[j] in [-11, -12]:
self.press(Direction.UP, wait=0.03, duration=0.05)
if t[j + 1] not in list(range(67, self.n)):
self.pos = self.now_dict_inv[self.now_dict_[t[j + 1]]]
return stick
def GetPath(start, goal, pred):
return GetPathRow(start, goal, pred[start])
def GetPathRow(start, goal, pred_row):
path = []
i = goal
while i != start and i >= 0:
path.append(i)
i = pred_row[i]
if i < 0:
return []
path.append(i)
return path[::-1]
|
StarcoderdataPython
|
1864620
|
<reponame>jmnguye/awx_work
# output only one occurence of an array
groups = ['awx_my_admins','awx_my_users','awx_my_admins']
printed_groups = []
for group in groups:
found = False
for printed_group in printed_groups:
if group == printed_group:
found = True
if found == False:
printed_groups.append(group)
for printed_group in printed_groups:
print(printed_group)
|
StarcoderdataPython
|
9673660
|
import os
os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES']=''
import numpy as np
from tensorflow.keras.layers import Input, Dense, SimpleRNN, GRU, LSTM, Bidirectional
from tensorflow.keras.models import Model
USE_TOY_WEIGHTS = True
REC_LAYER = GRU
sequence_length = 3
feature_dim = 1
features_in = Input(batch_shape=(1, sequence_length, feature_dim))
state_h_in = Input(batch_shape=(1, 1))
rnn_out = REC_LAYER(1, activation=None, use_bias=False, return_sequences=True, return_state=False, stateful=False)(features_in, initial_state=state_h_in)
stateless_model = Model(inputs=[features_in, state_h_in], outputs=rnn_out)
stateful_rnn_out = REC_LAYER(1, activation=None, use_bias=False, return_sequences=True, return_state=False, stateful=True)(features_in, initial_state=state_h_in)
stateful_model = Model(inputs=[features_in, state_h_in], outputs=stateful_rnn_out)
if USE_TOY_WEIGHTS:
if REC_LAYER == SimpleRNN:
toy_weights = [ np.asarray([[1.0]], dtype=np.float32), np.asarray([[-0.5]], dtype=np.float32)]
elif REC_LAYER == GRU:
# for a GRU, the first are the non-recurrent kernels W, and the second are the recurrent kernels U (V)
toy_weights = [np.asarray([[ 1.0, -2.0, 3.0 ]], dtype=np.float32), np.asarray([[ -0.5 , 2.0, -1.1 ]], dtype=np.float32)]
stateless_model.set_weights(toy_weights)
stateful_model.set_weights(toy_weights)
# w = stateless_model.get_weights()
# print(w)
stateless_model.save('temp_stateless.h5', include_optimizer=False)
stateful_model.save('temp_stateful.h5', include_optimizer=False)
x_in = np.zeros(sequence_length)
x_in[0] = 1
x_in = x_in.reshape( (1, sequence_length, feature_dim) )
initial_state = np.asarray( [10])
initial_state = initial_state.reshape((1,1))
def print_rnn_out(non_stateful_out, stateful_out):
fb = ['FWD::', 'BWD::']
print(f'non_stateful: {non_stateful_out}')
print(f'stateful: {stateful_out}')
print(f'delta: {stateful_out-non_stateful_out}')
non_stateful_out = stateless_model.predict([x_in, initial_state]).reshape((sequence_length))
stateful_out = stateful_model.predict([x_in, initial_state]).reshape((sequence_length))
print_rnn_out(non_stateful_out, stateful_out)
non_stateful_out = stateless_model.predict([x_in, initial_state]).reshape((sequence_length))
stateful_out = stateful_model.predict([x_in, initial_state]).reshape((sequence_length))
print_rnn_out(non_stateful_out, stateful_out)
print('\n** RESETING STATES in STATEFUL MODEL **\n')
stateful_model.reset_states()
non_stateful_out = stateless_model.predict([x_in, initial_state]).reshape((sequence_length))
stateful_out = stateful_model.predict([x_in, initial_state]).reshape((sequence_length))
print_rnn_out(non_stateful_out, stateful_out)
non_stateful_out = stateless_model.predict([x_in, initial_state]).reshape((sequence_length))
stateful_out = stateful_model.predict([x_in, initial_state]).reshape((sequence_length))
print_rnn_out(non_stateful_out, stateful_out)
|
StarcoderdataPython
|
8147409
|
<reponame>svilen-ivanov/sgp<filename>ep2/brute_force.py<gh_stars>1-10
def subset_sum(numbers, target, partial=[], partial_sum=0):
if partial_sum == target:
yield partial
if partial_sum >= target:
return
for i, n in enumerate(numbers):
remaining = numbers[:]
yield from subset_sum(remaining, target, partial + [n], partial_sum + n)
if __name__ == '__main__':
for x in subset_sum(list(reversed([1, 2, 5, 10, 20, 50])), 142):
if (len(x)) < 10:
x.sort()
print(x)
|
StarcoderdataPython
|
1969095
|
from .common import ViewBase
from pyramid.view import view_config
from datetime import datetime, timedelta
from ..schema import *
class AdminViews(ViewBase):
@view_config(route_name='maintenance', renderer='admin/maintenance.mak', permission='admin', request_method='GET')
def maintenance(self):
return self.return_dict(title='Maintenance')
@view_config(route_name='maintenance', renderer='admin/maintenance.mak', permission='admin', request_method='POST')
def maintenance_post(self):
if 'remove_old_users' in self.request.params:
clean_users()
self.request.flash('Old users removed.')
elif 'remove_old_versions' in self.request.params:
clean_versions()
self.request.flash('Old versions removed.')
return self.return_dict(title='Maintenance')
def clean_users():
too_old = datetime.now() - timedelta(days=45)
for user in User.objects:
# The user has never logged in or logged in more than 45 days ago
ltime = not user.last_login or user.last_login < too_old
# If the user is in the user group and has no mods, along with
# not having logged in recently, delete 'em
if not Mod.objects(owner=user) and user.group == 'user' and ltime:
user.delete()
def clean_versions():
for mod in Mod.objects:
occupied = {'stable': [], 'devel': []}
for version in mod.versions[::-1]:
occ_list = occupied['devel' if version.devel else 'stable']
if version.mc_version in occ_list:
print('Will delete {0} {1}'.format(mod.name, version.version))
else:
occ_list.append(version.mc_version)
|
StarcoderdataPython
|
3380753
|
import bs4
import base64
previous_scripts = set()
class Item:
def __init__(self, item):
self._item = item
def get_req_resp(self, reqresp):
try:
return base64.b64decode(reqresp.string).decode()
except UnicodeDecodeError:
print(self._item.host)
print(self._item.path)
return "<html></html>"
@property
def request(self):
return self.get_req_resp(self._item.request)
@property
def response(self):
return self.get_req_resp(self._item.response)
@property
def html(self):
index = self.response.find('<html')
html = self.response[index:]
return bs4.BeautifulSoup(html, 'html.parser')
@property
def scripts(self):
return self.html.find_all('script')
def __getattr__(self, name):
return getattr(self._item, name)
def scripts_item(item):
scripts = item.scripts
if not scripts:
return None
scripts_text = []
for script in scripts:
script_text = str(script)
if script_text not in previous_scripts:
previous_scripts.add(script_text)
scripts_text.append(script_text)
if not scripts_text:
return None
new_item = "<item>\n"
for child in item.children:
if child.name != "request" and child.name != "response":
new_item += str(child)
new_item += "<scripts>\n" + "\n".join(scripts_text) + "</scripts>\n</item>"
return new_item
def extract_js(input, output):
with open(input) as f:
contents = f.read()
soup = bs4.BeautifulSoup(contents, "xml")
items = soup.items.find_all('item')
new_items = []
for item in items:
new_item = scripts_item(Item(item))
if new_item:
new_items.append(new_item)
with open(output, 'w') as f:
string = "<items>\n" + "\n".join(new_items) + "</items>"
f.write(string)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("Extract JavaScript from burp export")
parser.add_argument("input_file", help="Export from burp items")
parser.add_argument("output_file", help="Output file")
args = parser.parse_args()
extract_js(args.input_file, args.output_file)
|
StarcoderdataPython
|
4859653
|
<reponame>sreenathmmenon/astttproject
from django.views import generic
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tabs
from horizon import tables
from astutedashboard.common import get_plan, \
get_user_sub_plans, \
get_user_billing_type
from astutedashboard.dashboards.project.subscribed_plans \
import tables as sub_plan_tables
class IndexView(generic.TemplateView):
#table_class = sub_plan_tables.SubscribedPlansTable
template_name = 'project/subscribed_plans/index.html'
page_title = _("Subscribed Plans")
def get_context_data(self, **kwargs):
context = super(IndexView, self).get_context_data(**kwargs)
billing_details = get_user_billing_type(self.request)
#At-least all users must be having payg plan by default
#Also a user will be having only 1 active billing plan at a time
context['plans'] = get_user_sub_plans(self.request)
return context
class UserSubPlanDetailsView(generic.TemplateView):
template_name = 'project/subscribed_plans/plan.html'
def get_context_data(self, **kwargs):
context = super(UserSubPlanDetailsView, self).get_context_data(**kwargs)
id = self.kwargs['id']
context['plan_details'] = get_plan(self.request, id)
return context
|
StarcoderdataPython
|
1993410
|
from PIL import Image
"""Wrapper module for converting an OpenGL (vispy) canvas to an image."""
def pixels_to_image(pixels, size, path):
"""Reads an array of pixels (RGBA) and outputs a png image.
Arguments:
pixels -- Array of pixel data to read.
size -- width and height of the image in a tuple.
path -- A filepath to save the output image to.
"""
img = Image.new("RGBA", size)
pix = img.load()
for y, row in enumerate(pixels):
for x, col in enumerate(row):
if y < size[1] and x < size[0]:
pix[x, y] = tuple(col)
img.save(path)
|
StarcoderdataPython
|
3361728
|
<filename>tests/unit/test_tempo_client.py<gh_stars>1-10
def test_tempo_client(tempo_client, tempo_request):
request = tempo_request.get('/foo')
response = tempo_client.get('/foo')
assert response.status_code == 200
assert request.called_once
assert request.last_request.headers['User-Agent'].startswith('tempocli python-requests')
assert request.last_request.headers['Authorization'] == 'Bearer {}'.format(tempo_client.token)
|
StarcoderdataPython
|
8114140
|
import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
tf.random.set_seed(2467)
def accuracy(output, target, topk=(1,)):
maxk = max(topk)
batch_size = target.shape[0]
pred = tf.math.top_k(output, maxk).indices
pred = tf.transpose(pred, perm=[1, 0])
target_ = tf.broadcast_to(target, pred.shape)
# [10, b]
correct = tf.equal(pred, target_)
res = []
for k in topk:
correct_k = tf.cast(tf.reshape(correct[:k], [-1]), dtype=tf.float32)
correct_k = tf.reduce_sum(correct_k)
acc = float(correct_k * (100.0 / batch_size))
res.append(acc)
return res
output = tf.random.normal([10, 6])
output = tf.math.softmax(output, axis=1)
target = tf.random.uniform([10], maxval=6, dtype=tf.int32)
print('prob:', output.numpy())
pred = tf.argmax(output, axis=1)
print('pred:', pred.numpy())
print('label:', target.numpy())
acc = accuracy(output, target, topk=(1, 2, 3, 4, 5, 6))
print('top-1-6 acc:', acc)
|
StarcoderdataPython
|
6638436
|
import pytest
import theano
import theano.tensor as tt
from operator import add
from unification import unify, reify, var, variables
from kanren.term import term, operator, arguments
from symbolic_pymc.meta import mt
from symbolic_pymc.utils import graph_equal
from symbolic_pymc.unify import (ExpressionTuple, etuple, tuple_expression)
def test_unification():
x, y, a, b = tt.dvectors('xyab')
x_s = tt.scalar('x_s')
y_s = tt.scalar('y_s')
c_tt = tt.constant(1, 'c')
d_tt = tt.constant(2, 'd')
# x_l = tt.vector('x_l')
# y_l = tt.vector('y_l')
# z_l = tt.vector('z_l')
x_l = var('x_l')
y_l = var('y_l')
z_l = var('z_l')
assert a == reify(x_l, {x_l: a}).reify()
test_expr = mt.add(1, mt.mul(2, x_l))
test_reify_res = reify(test_expr, {x_l: a})
assert graph_equal(test_reify_res.reify(), 1 + 2*a)
z = tt.add(b, a)
assert {x_l: z} == unify(x_l, z)
assert b == unify(mt.add(x_l, a), mt.add(b, a))[x_l].reify()
res = unify(mt.inv(mt.add(x_l, a)), mt.inv(mt.add(b, y_l)))
assert res[x_l].reify() == b
assert res[y_l].reify() == a
# TODO: This produces a `DimShuffle` so that the scalar constant `1`
# will match the dimensions of the vector `b`. That `DimShuffle` isn't
# handled by the logic variable form.
# assert unify(mt.add(x_l, 1), mt.add(b_l, 1))[x] == b
with variables(x):
assert unify(x + 1, b + 1)[x].reify() == b
assert unify(mt.add(x_l, a), mt.add(b, a))[x_l].reify() == b
with variables(x):
assert unify(x, b)[x] == b
assert unify([x], [b])[x] == b
assert unify((x,), (b,))[x] == b
assert unify(x + 1, b + 1)[x].reify() == b
assert unify(x + a, b + a)[x].reify() == b
with variables(x):
assert unify(a + b, a + x)[x].reify() == b
mt_expr_add = mt.add(x_l, y_l)
# The parameters are vectors
tt_expr_add_1 = tt.add(x, y)
assert graph_equal(tt_expr_add_1,
reify(mt_expr_add,
unify(mt_expr_add, tt_expr_add_1)).reify())
# The parameters are scalars
tt_expr_add_2 = tt.add(x_s, y_s)
assert graph_equal(tt_expr_add_2,
reify(mt_expr_add,
unify(mt_expr_add, tt_expr_add_2)).reify())
# The parameters are constants
tt_expr_add_3 = tt.add(c_tt, d_tt)
assert graph_equal(tt_expr_add_3,
reify(mt_expr_add,
unify(mt_expr_add, tt_expr_add_3)).reify())
def test_etuple():
"""Test basic `etuple` functionality.
"""
def test_op(*args):
return tuple(object() for i in range(sum(args)))
e1 = etuple(test_op, 1, 2)
assert not hasattr(e1, '_eval_obj')
with pytest.raises(ValueError):
e1.eval_obj = 1
e1_obj = e1.eval_obj
assert len(e1_obj) == 3
assert all(type(o) == object for o in e1_obj)
# Make sure we don't re-create the cached `eval_obj`
e1_obj_2 = e1.eval_obj
assert e1_obj == e1_obj_2
# Confirm that evaluation is recursive
e2 = etuple(add, (object(),), e1)
# Make sure we didn't convert this single tuple value to
# an `etuple`
assert type(e2[1]) == tuple
# Slices should be `etuple`s, though.
assert isinstance(e2[:1], ExpressionTuple)
assert e2[1] == e2[1:2][0]
e2_obj = e2.eval_obj
assert type(e2_obj) == tuple
assert len(e2_obj) == 4
assert all(type(o) == object for o in e2_obj)
# Make sure that it used `e1`'s original `eval_obj`
assert e2_obj[1:] == e1_obj
# Confirm that any combination of `tuple`s/`etuple`s in
# concatenation result in an `etuple`
e_radd = (1,) + etuple(2, 3)
assert isinstance(e_radd, ExpressionTuple)
assert e_radd == (1, 2, 3)
e_ladd = etuple(1, 2) + (3,)
assert isinstance(e_ladd, ExpressionTuple)
assert e_ladd == (1, 2, 3)
def test_etuple_term():
"""Test `tuple_expression` and `etuple` interaction with `term`
"""
# Make sure that we don't lose underlying `eval_obj`s
# when taking apart and re-creating expression tuples
# using `kanren`'s `operator`, `arguments` and `term`
# functions.
e1 = etuple(add, (object(),), (object(),))
e1_obj = e1.eval_obj
e1_dup = (operator(e1),) + arguments(e1)
assert isinstance(e1_dup, ExpressionTuple)
assert e1_dup.eval_obj == e1_obj
e1_dup_2 = term(operator(e1), arguments(e1))
assert e1_dup_2 == e1_obj
# Take apart an already constructed/evaluated meta
# object.
e2 = mt.add(mt.vector(), mt.vector())
e2_et = tuple_expression(e2)
assert isinstance(e2_et, ExpressionTuple)
e2_et_expect = etuple(
mt.add,
etuple(mt.TensorVariable,
etuple(mt.TensorType,
'float64', (False,), None),
None, None, None),
etuple(mt.TensorVariable,
etuple(mt.TensorType,
'float64', (False,), None),
None, None, None),
)
assert e2_et == e2_et_expect
assert e2_et.eval_obj is e2
# Make sure expression expansion works from Theano objects, too.
# First, do it manually.
tt_expr = tt.vector() + tt.vector()
mt_expr = mt(tt_expr)
assert mt_expr.obj is tt_expr
assert mt_expr.reify() is tt_expr
e3 = tuple_expression(mt_expr)
assert e3 == e2_et
assert e3.eval_obj is mt_expr
assert e3.eval_obj.reify() is tt_expr
# Now, through `tuple_expression`
e2_et_2 = tuple_expression(tt_expr)
assert e2_et_2 == e3 == e2_et
assert isinstance(e2_et_2, ExpressionTuple)
assert e2_et_2.eval_obj.reify() == tt_expr
|
StarcoderdataPython
|
5056586
|
from mock import Mock, \
MagicMock, \
patch, \
mock_open
import itertools
from django.test import TestCase
from config.settings import PUBLIC_ROLE
from core.db.backend.pg import connection_pools, \
_pool_for_credentials, \
PGBackend
class MockingMixin(object):
"""A mixin for mock helper methods"""
def create_patch(self, name, **kwargs):
"""
Returns a started patch which stops itself on test cleanup.
Any kwargs pass directly into patch().
"""
patcher = patch(name, **kwargs)
thing = patcher.start()
self.addCleanup(patcher.stop)
return thing
class PoolHelperFunctions(MockingMixin, TestCase):
"""Tests helper functions in pg.py, but not in the PGBackend class."""
def setUp(self):
self.mock_ThreadedConnectionPool = self.create_patch(
'core.db.backend.pg.ThreadedConnectionPool')
def test_pool_for_credentials(self):
n = len(connection_pools)
_pool_for_credentials('foo', 'password', 'repo_base')
self.assertEqual(len(connection_pools), n + 1)
_pool_for_credentials('bar', 'password', 'repo_base',
create_if_missing=True)
self.assertEqual(len(connection_pools), n + 2)
_pool_for_credentials('baz', 'password', 'repo_base',
create_if_missing=False)
self.assertEqual(len(connection_pools), n + 2)
_pool_for_credentials('bar', 'wordpass', 'repo_base')
self.assertEqual(len(connection_pools), n + 3)
# psycopg2 doesn't expose any good way to test that close_all_connections
# works. You can't ask for the list of existing connections and check that
# they're closed.
# def test_close_all_connections(self):
class PGBackendHelperMethods(MockingMixin, TestCase):
"""Tests connections, validation and execution methods in PGBackend."""
def setUp(self):
# some words to test out
self.good_nouns = ['good', 'good_noun', 'goodNoun', 'good1']
# some words that should throw validation errors
self.bad_nouns = ['_foo', 'foo_', '-foo', 'foo-', 'foo bar', '1foo',
'injection;attack', ';injection', 'injection;',
]
self.username = "username"
self.password = "password"
# mock connection pools so nothing gets a real db connection
self.mock_pool_for_cred = self.create_patch(
'core.db.backend.pg._pool_for_credentials')
# mock open connection, only to check if it ever gets called directly
self.mock_connect = self.create_patch(
'core.db.backend.pg.psycopg2.connect')
# open mocked connection
self.backend = PGBackend(self.username,
self.password,
repo_base=self.username)
def tearDown(self):
# Make sure connections are only ever acquired via pools
self.assertFalse(self.mock_connect.called)
def test_check_for_injections(self):
"""Tests validation against some sql injection attacks."""
for noun in self.bad_nouns:
with self.assertRaises(ValueError):
self.backend._check_for_injections(noun)
for noun in self.good_nouns:
try:
self.backend._check_for_injections(noun)
except ValueError:
self.fail('_check_for_injections failed to verify a good name')
def test_validate_table_names(self):
"""Tests validation against some invalid table names."""
good_tables = ['table', '_dbwipes_cache', 'my_repo1',
'asdfl_fsdvbrbhg_______jkhadsc']
bad_tables = [' table', '1table', 'table;select * from somewhere',
'table-table']
for noun in bad_tables:
with self.assertRaises(ValueError):
self.backend._validate_table_name(noun)
for noun in good_tables:
try:
self.backend._validate_table_name(noun)
except ValueError:
self.fail('_validate_table_name failed to verify a good name')
def test_check_open_connections(self):
mock_get_conn = self.mock_pool_for_cred.return_value.getconn
mock_set_isol_level = mock_get_conn.return_value.set_isolation_level
self.assertTrue(self.mock_pool_for_cred.called)
self.assertTrue(mock_get_conn.called)
self.assertTrue(mock_set_isol_level.called)
def test_execute_sql_strips_queries(self):
query = ' This query needs stripping; '
params = ('param1', 'param2')
mock_cursor = self.backend.connection.cursor
mock_execute = mock_cursor.return_value.execute
mock_cursor.return_value.fetchall.return_value = 'sometuples'
mock_cursor.return_value.rowcount = 1000
mock_query_rewriter = MagicMock()
mock_query_rewriter.apply_row_level_security.side_effect = lambda x: x
self.backend.query_rewriter = mock_query_rewriter
res = self.backend.execute_sql(query, params)
self.assertTrue(mock_query_rewriter.apply_row_level_security.called)
self.assertTrue(mock_cursor.called)
self.assertTrue(mock_execute.called)
self.assertEqual(res['tuples'], 'sometuples')
self.assertEqual(res['status'], True)
self.assertEqual(res['row_count'], 1000)
class SchemaListCreateDeleteShare(MockingMixin, TestCase):
"""
Tests that items reach the execute_sql method in pg.py.
Does not test execute_sql itself.
"""
def setUp(self):
# some words to test out
self.good_nouns = ['good', 'good_noun', 'good-noun']
# some words that shoudl throw validation errors
self.bad_nouns = ['_foo', 'foo_', '-foo', 'foo-', 'foo bar',
'injection;attack', ';injection', 'injection;',
]
self.username = "username"
self.password = "<PASSWORD>"
# mock the execute_sql function
self.mock_execute_sql = self.create_patch(
'core.db.backend.pg.PGBackend.execute_sql')
self.mock_execute_sql.return_value = True
# mock the mock_check_for_injections, which checks for injection
# attacks
self.mock_check_for_injections = self.create_patch(
'core.db.backend.pg.PGBackend._check_for_injections')
self.mock_validate_table_name = self.create_patch(
'core.db.backend.pg.PGBackend._validate_table_name')
# mock open connection, or else it will try to
# create a real db connection
self.mock_open_connection = self.create_patch(
'core.db.backend.pg.PGBackend.__open_connection__')
# mock the psycopg2.extensions.AsIs - many of the pg.py methods use it
# Its return value (side effect) is the call value
self.mock_as_is = self.create_patch('core.db.backend.pg.AsIs')
self.mock_as_is.side_effect = lambda x: x
# create an instance of PGBackend
self.backend = PGBackend(self.username,
self.password,
repo_base=self.username)
def reset_mocks(self):
# clears the mock call arguments and sets their call counts to 0
self.mock_as_is.reset_mock()
self.mock_execute_sql.reset_mock()
self.mock_check_for_injections.reset_mock()
# testing externally called methods in PGBackend
def test_create_repo(self):
create_repo_sql = 'CREATE SCHEMA IF NOT EXISTS %s AUTHORIZATION %s'
reponame = 'reponame'
self.mock_execute_sql.return_value = {'status': True, 'row_count': -1,
'tuples': [], 'fields': []}
res = self.backend.create_repo(reponame)
self.assertEqual(
self.mock_execute_sql.call_args[0][0], create_repo_sql)
self.assertEqual(
self.mock_execute_sql.call_args[0][1][0], reponame)
self.assertEqual(
self.mock_execute_sql.call_args[0][1][1], self.username)
self.assertTrue(self.mock_as_is.called)
self.assertTrue(self.mock_check_for_injections.called)
self.assertEqual(res, True)
def test_list_repos(self):
# the user is already logged in, so there's not much to be tested here
# except that the arguments are passed correctly
list_repo_sql = ('SELECT schema_name AS repo_name '
'FROM information_schema.schemata '
'WHERE schema_owner != %s')
mock_settings = self.create_patch("core.db.backend.pg.settings")
mock_settings.DATABASES = {'default': {'USER': 'postgres'}}
self.mock_execute_sql.return_value = {
'status': True, 'row_count': 1, 'tuples': [
('test_table',)],
'fields': [{'type': 1043, 'name': 'table_name'}]}
params = (mock_settings.DATABASES['default']['USER'],)
res = self.backend.list_repos()
self.assertEqual(
self.mock_execute_sql.call_args[0][0], list_repo_sql)
self.assertEqual(
self.mock_execute_sql.call_args[0][1], params)
self.assertEqual(res, ['test_table'])
def test_rename_repo(self):
alter_repo_sql = 'ALTER SCHEMA %s RENAME TO %s'
self.mock_execute_sql.return_value = {
'status': True, 'row_count': 1, 'tuples': [
('test_table',)],
'fields': [{'type': 1043, 'name': 'table_name'}]}
params = ('old_name', 'new_name')
res = self.backend.rename_repo('old_name', 'new_name')
self.assertEqual(res, True)
self.assertEqual(
self.mock_execute_sql.call_args[0][0], alter_repo_sql)
self.assertEqual(
self.mock_execute_sql.call_args[0][1], params)
self.assertTrue(self.mock_execute_sql.called)
self.assertEqual(self.mock_check_for_injections.call_count, 2)
def test_delete_repo_happy_path_cascade(self):
drop_schema_sql = 'DROP SCHEMA %s %s'
repo_name = 'repo_name'
self.mock_execute_sql.return_value = {'status': True, 'row_count': -1,
'tuples': [], 'fields': []}
res = self.backend.delete_repo(repo=repo_name, force=True)
self.assertEqual(
self.mock_execute_sql.call_args[0][0], drop_schema_sql)
self.assertEqual(
self.mock_execute_sql.call_args[0][1][0], repo_name)
self.assertEqual(
self.mock_execute_sql.call_args[0][1][1], 'CASCADE')
self.assertTrue(self.mock_as_is.called)
self.assertTrue(self.mock_check_for_injections)
self.assertEqual(res, True)
def test_delete_repo_no_cascade(self):
drop_schema_sql = 'DROP SCHEMA %s %s'
repo_name = 'repo_name'
self.mock_execute_sql.return_value = {'status': True, 'row_count': -1,
'tuples': [], 'fields': []}
res = self.backend.delete_repo(repo=repo_name, force=False)
self.assertEqual(
self.mock_execute_sql.call_args[0][0], drop_schema_sql)
self.assertEqual(
self.mock_execute_sql.call_args[0][1][0], repo_name)
self.assertEqual(
self.mock_execute_sql.call_args[0][1][1], '')
self.assertTrue(self.mock_as_is.called)
self.assertTrue(self.mock_check_for_injections.called)
self.assertEqual(res, True)
def test_add_collaborator(self):
privileges = ['SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE',
'REFERENCES', 'TRIGGER', 'CREATE', 'CONNECT',
'TEMPORARY', 'EXECUTE', 'USAGE']
add_collab_query = ('BEGIN;'
'GRANT USAGE ON SCHEMA %s TO %s;'
'GRANT %s ON ALL TABLES IN SCHEMA %s TO %s;'
'ALTER DEFAULT PRIVILEGES IN SCHEMA %s '
'GRANT %s ON TABLES TO %s;'
'COMMIT;'
)
self.mock_execute_sql.return_value = {'status': True, 'row_count': -1,
'tuples': [], 'fields': []}
product = itertools.product(self.good_nouns, self.good_nouns,
privileges)
# test every combo here. For now, don't test combined privileges
for repo, receiver, privilege in product:
params = (repo, receiver, privilege, repo, receiver,
repo, privilege, receiver)
res = self.backend.add_collaborator(
repo=repo, collaborator=receiver, db_privileges=[privilege])
self.assertEqual(
self.mock_execute_sql.call_args[0][0], add_collab_query)
self.assertEqual(self.mock_execute_sql.call_args[0][1], params)
self.assertEqual(self.mock_as_is.call_count, len(params))
self.assertEqual(self.mock_check_for_injections.call_count, 3)
self.assertEqual(res, True)
self.reset_mocks()
def test_add_collaborator_concatenates_privileges(self):
privileges = ['SELECT', 'USAGE']
repo = 'repo'
receiver = 'receiver'
self.mock_execute_sql.return_value = {'status': True, 'row_count': -1,
'tuples': [], 'fields': []}
self.backend.add_collaborator(repo=repo,
collaborator=receiver,
db_privileges=privileges)
# make sure that the privileges are passed as a string in params
self.assertTrue(
'SELECT, USAGE' in self.mock_execute_sql.call_args[0][1])
def test_delete_collaborator(self):
delete_collab_sql = ('BEGIN;'
'REVOKE ALL ON ALL TABLES IN SCHEMA %s '
'FROM %s CASCADE;'
'REVOKE ALL ON SCHEMA %s FROM %s CASCADE;'
'ALTER DEFAULT PRIVILEGES IN SCHEMA %s '
'REVOKE ALL ON TABLES FROM %s;'
'COMMIT;'
)
self.mock_execute_sql.return_value = {'status': True, 'row_count': -1,
'tuples': [], 'fields': []}
repo = 'repo_name'
username = 'delete_me_user_name'
params = (repo, username, repo, username, repo, username)
res = self.backend.delete_collaborator(
repo=repo, collaborator=username)
self.assertEqual(
self.mock_execute_sql.call_args[0][0], delete_collab_sql)
self.assertEqual(self.mock_execute_sql.call_args[0][1], params)
self.assertEqual(self.mock_as_is.call_count, len(params))
self.assertEqual(self.mock_check_for_injections.call_count, 2)
self.assertEqual(res, True)
def test_create_table(self):
repo = 'repo'
table = 'table'
params = [
{'column_name': 'id', 'data_type': 'integer'},
{'column_name': 'words', 'data_type': 'text'}
]
expected_params = ('repo', 'table', 'id integer, words text')
create_table_query = ('CREATE TABLE %s.%s (%s)')
self.mock_execute_sql.return_value = {
'status': True, 'row_count': -1, 'tuples': [], 'fields': []}
res = self.backend.create_table(repo, table, params)
# checks repo, table, and all param values for injections
self.assertEqual(self.mock_check_for_injections.call_count, 5)
self.assertEqual(self.mock_validate_table_name.call_count, 1)
final_query = self.mock_execute_sql.call_args[0][0]
final_params = self.mock_execute_sql.call_args[0][1]
self.assertEqual(final_query, create_table_query)
self.assertEqual(final_params, expected_params)
self.assertEqual(res, True)
# create table test_repo.test_table (id integer, words text)
def test_list_tables(self):
repo = 'repo'
list_tables_query = ('SELECT table_name FROM '
'information_schema.tables '
'WHERE table_schema = %s '
'AND table_type = \'BASE TABLE\';')
params = (repo,)
# execute sql should return this:
self.mock_execute_sql.return_value = {
'status': True, 'row_count': 1, 'tuples': [
('test_table',)],
'fields': [{'type': 1043, 'name': 'table_name'}]}
# mocking out execute_sql's complicated return JSON
mock_list_repos = self.create_patch(
'core.db.backend.pg.PGBackend.list_repos')
mock_list_repos.return_value = [repo]
res = self.backend.list_tables(repo)
self.assertEqual(
self.mock_execute_sql.call_args[0][0], list_tables_query)
self.assertEqual(self.mock_execute_sql.call_args[0][1], params)
self.assertEqual(self.mock_check_for_injections.call_count, 1)
self.assertEqual(res, ['test_table'])
def test_describe_table_without_detail(self):
repo = 'repo'
table = 'table'
detail = False
query = ("SELECT %s "
"FROM information_schema.columns "
"WHERE table_schema = %s and table_name = %s;")
params = ('column_name, data_type', repo, table)
self.mock_execute_sql.return_value = {
'status': True, 'row_count': 2,
'tuples': [(u'id', u'integer'), (u'words', u'text')],
'fields': [
{'type': 1043, 'name': 'column_name'},
{'type': 1043, 'name': 'data_type'}
]
}
res = self.backend.describe_table(repo, table, detail)
self.assertEqual(self.mock_execute_sql.call_args[0][0], query)
self.assertEqual(self.mock_execute_sql.call_args[0][1], params)
self.assertEqual(res, [(u'id', u'integer'), (u'words', u'text')])
def test_describe_table_query_in_detail(self):
repo = 'repo'
table = 'table'
detail = True
query = ("SELECT %s "
"FROM information_schema.columns "
"WHERE table_schema = %s and table_name = %s;")
params = ('*', repo, table)
self.mock_execute_sql.return_value = {
'status': True, 'row_count': 2,
'tuples': [
(u'id', u'integer'), (u'words', u'text'), ('foo', 'bar')
],
'fields': [
{'type': 1043, 'name': 'column_name'},
{'type': 1043, 'name': 'data_type'}
]
}
res = self.backend.describe_table(repo, table, detail)
self.assertEqual(self.mock_execute_sql.call_args[0][0], query)
self.assertEqual(self.mock_execute_sql.call_args[0][1], params)
self.assertEqual(
res, [(u'id', u'integer'), (u'words', u'text'), ('foo', 'bar')])
def test_list_table_permissions(self):
repo = 'repo'
table = 'table'
query = ("select privilege_type from "
"information_schema.role_table_grants where table_schema=%s "
"and table_name=%s and grantee=%s")
params = ('repo', 'table', self.username)
self.mock_execute_sql.return_value = {
'status': True, 'row_count': 2,
'tuples': [
(u'SELECT'), (u'UPDATE')],
'fields': [{'type': 1043, 'name': 'privilege_type'}]}
self.backend.list_table_permissions(repo, table)
self.assertEqual(self.mock_execute_sql.call_args[0][0], query)
self.assertEqual(self.mock_execute_sql.call_args[0][1], params)
def test_delete_table(self):
repo = 'repo_name'
table = 'table_name'
force = False
expected_query = 'DROP TABLE %s.%s.%s %s'
expected_params = ('username', 'repo_name', 'table_name', 'RESTRICT')
self.mock_execute_sql.return_value = {
'status': True, 'row_count': -1, 'tuples': [], 'fields': []}
res = self.backend.delete_table(repo, table, force)
final_query = self.mock_execute_sql.call_args[0][0]
final_params = self.mock_execute_sql.call_args[0][1]
self.assertEqual(self.mock_check_for_injections.call_count, 1)
self.assertEqual(self.mock_validate_table_name.call_count, 1)
self.assertEqual(final_query, expected_query)
self.assertEqual(final_params, expected_params)
self.assertEqual(res, True)
def test_clone_table(self):
repo = 'repo_name'
table = 'table_name'
new_table = 'new_table_name'
expected_query = ('CREATE TABLE %s.%s AS SELECT * FROM %s.%s')
expected_params = (repo, new_table, repo, table)
self.mock_execute_sql.return_value = {'status': True}
res = self.backend.clone_table(repo, table, new_table)
final_query = self.mock_execute_sql.call_args[0][0]
final_params = self.mock_execute_sql.call_args[0][1]
self.assertEqual(self.mock_check_for_injections.call_count, 0)
self.assertEqual(self.mock_validate_table_name.call_count, 2)
self.assertEqual(final_query, expected_query)
self.assertEqual(final_params, expected_params)
self.assertEqual(res, True)
def test_list_views(self):
repo = 'repo'
list_views_query = ('SELECT table_name FROM information_schema.tables '
'WHERE table_schema = %s '
'AND table_type = \'VIEW\';')
params = (repo,)
# mocking out execute_sql's complicated return JSON
self.mock_execute_sql.return_value = {
'status': True, 'row_count': 1, 'tuples': [
('test_view',)],
'fields': [{'type': 1043, 'name': 'view_name'}]}
# list_views depends on list_repos, which is being mocked out
mock_list_repos = self.create_patch(
'core.db.backend.pg.PGBackend.list_repos')
mock_list_repos.return_value = [repo]
res = self.backend.list_views(repo)
self.assertEqual(
self.mock_execute_sql.call_args[0][0], list_views_query)
self.assertEqual(self.mock_execute_sql.call_args[0][1], params)
self.assertEqual(self.mock_check_for_injections.call_count, 1)
self.assertEqual(res, ['test_view'])
def test_delete_view(self):
repo = 'repo_name'
view = 'view_name'
force = False
expected_query = ('DROP VIEW %s.%s.%s %s')
expected_params = ('username', 'repo_name', 'view_name', 'RESTRICT')
self.mock_execute_sql.return_value = {
'status': True, 'row_count': -1, 'tuples': [], 'fields': []}
res = self.backend.delete_view(repo, view, force)
final_query = self.mock_execute_sql.call_args[0][0]
final_params = self.mock_execute_sql.call_args[0][1]
self.assertEqual(self.mock_check_for_injections.call_count, 1)
self.assertEqual(self.mock_validate_table_name.call_count, 1)
self.assertEqual(final_query, expected_query)
self.assertEqual(final_params, expected_params)
self.assertEqual(res, True)
def test_create_view(self):
repo = 'repo_name'
view = 'view_name'
sql = 'SELECT * FROM table'
expected_params = ('repo_name', 'view_name', 'SELECT * FROM table')
create_view_query = ('CREATE VIEW %s.%s AS (%s)')
self.mock_execute_sql.return_value = {
'status': True, 'row_count': -1, 'tuples': [], 'fields': []}
res = self.backend.create_view(repo, view, sql)
# checks repo and view for injections
self.assertEqual(self.mock_check_for_injections.call_count, 1)
self.assertEqual(self.mock_validate_table_name.call_count, 1)
final_query = self.mock_execute_sql.call_args[0][0]
final_params = self.mock_execute_sql.call_args[0][1]
self.assertEqual(final_query, create_view_query)
self.assertEqual(final_params, expected_params)
self.assertEqual(res, True)
def test_describe_view_without_detail(self):
repo = 'repo_name'
view = 'view_name'
detail = False
query = ("SELECT %s "
"FROM information_schema.columns "
"WHERE table_schema = %s and table_name = %s;")
params = ('column_name, data_type', repo, view)
self.mock_execute_sql.return_value = {
'status': True, 'row_count': 2,
'tuples': [(u'id', u'integer'), (u'words', u'text')],
'fields': [
{'type': 1043, 'name': 'column_name'},
{'type': 1043, 'name': 'data_type'}
]
}
res = self.backend.describe_view(repo, view, detail)
self.assertEqual(self.mock_execute_sql.call_args[0][0], query)
self.assertEqual(self.mock_execute_sql.call_args[0][1], params)
self.assertEqual(res, [(u'id', u'integer'), (u'words', u'text')])
def test_get_schema(self):
self.mock_execute_sql.return_value = {
'status': True, 'row_count': 2,
'tuples': [(u'id', u'integer'), (u'words', u'text')],
'fields': [
{'type': 1043, 'name': 'column_name'},
{'type': 1043, 'name': 'data_type'}
]
}
repo = 'repo'
table = 'table'
get_schema_query = ('SELECT column_name, data_type '
'FROM information_schema.columns '
'WHERE table_name = %s '
'AND table_schema = %s;'
)
params = ('table', 'repo')
self.backend.get_schema(repo, table)
self.assertEqual(
self.mock_execute_sql.call_args[0][0], get_schema_query)
self.assertEqual(self.mock_execute_sql.call_args[0][1], params)
self.assertEqual(self.mock_check_for_injections.call_count, 1)
self.assertEqual(self.mock_validate_table_name.call_count, 1)
def test_create_public_user_no_create_db(self):
create_user_query = ('CREATE ROLE %s WITH LOGIN '
'NOCREATEDB NOCREATEROLE NOCREATEUSER '
'PASSWORD %s')
username = PUBLIC_ROLE
password = 'password'
self.backend.create_user(username, password, create_db=False)
params = (username, password)
mock_create_user_database = self.create_patch(
'core.db.backend.pg.PGBackend.create_user_database')
# import pdb; pdb.set_trace()
self.assertEqual(
self.mock_execute_sql.call_args[0][0], create_user_query)
self.assertEqual(self.mock_execute_sql.call_args[0][1], params)
self.assertEqual(self.mock_as_is.call_count, 1)
self.assertEqual(self.mock_check_for_injections.call_count, 1)
self.assertFalse(mock_create_user_database.called)
def test_create_normal_user_no_create_db(self):
create_user_query = 'GRANT %s to %s'
username = 'username'
password = 'password'
self.backend.create_user(username, password, create_db=False)
params = (PUBLIC_ROLE, username)
mock_create_user_database = self.create_patch(
'core.db.backend.pg.PGBackend.create_user_database')
# import pdb; pdb.set_trace()
self.assertEqual(
self.mock_execute_sql.call_args[0][0], create_user_query)
self.assertEqual(self.mock_execute_sql.call_args[0][1], params)
self.assertEqual(self.mock_as_is.call_count, 3)
self.assertEqual(self.mock_check_for_injections.call_count, 1)
self.assertFalse(mock_create_user_database.called)
def test_create_user_calls_create_db(self):
username = 'username'
password = 'password'
mock_create_user_database = self.create_patch(
'core.db.backend.pg.PGBackend.create_user_database')
self.backend.create_user(
username=username, password=password, create_db=True)
self.assertTrue(mock_create_user_database.called)
def test_create_user_db(self):
create_db_query_1 = 'CREATE DATABASE %s; '
create_db_query_2 = 'ALTER DATABASE %s OWNER TO %s; '
username = 'username'
self.backend.create_user_database(username)
params_1 = (username,)
params_2 = (username, username)
call_args_1 = self.mock_execute_sql.call_args_list[0][0]
self.assertEqual(call_args_1[0], create_db_query_1)
self.assertEqual(call_args_1[1], params_1)
call_args_2 = self.mock_execute_sql.call_args_list[1][0]
self.assertEqual(call_args_2[0], create_db_query_2)
self.assertEqual(call_args_2[1], params_2)
self.assertEqual(self.mock_as_is.call_count, len(params_1 + params_2))
self.assertEqual(self.mock_check_for_injections.call_count, 1)
def test_remove_user(self):
query = 'DROP ROLE %s;'
username = "username"
params = (username,)
self.backend.remove_user(username)
self.assertEqual(
self.mock_execute_sql.call_args[0][0], query)
self.assertEqual(self.mock_execute_sql.call_args[0][1], params)
self.assertEqual(self.mock_as_is.call_count, len(params))
self.assertEqual(self.mock_check_for_injections.call_count, 1)
def test_remove_database(self):
# mock out list_all_users
mock_list_all_users = self.create_patch(
'core.db.backend.pg.PGBackend.list_all_users')
mock_list_all_users.return_value = ['tweedledee', 'tweedledum']
self.backend.remove_database(self.username)
# revoke statement stuff
revoke_query = 'REVOKE ALL ON DATABASE %s FROM %s;'
revoke_params_1 = (self.username, 'tweedledee')
revoke_params_2 = (self.username, 'tweedledum')
self.assertEqual(
self.mock_execute_sql.call_args_list[0][0][0], revoke_query)
self.assertEqual(
self.mock_execute_sql.call_args_list[0][0][1], revoke_params_1)
self.assertEqual(
self.mock_execute_sql.call_args_list[1][0][0], revoke_query)
self.assertEqual(
self.mock_execute_sql.call_args_list[1][0][1], revoke_params_2)
# drop statement stuff
drop_query = 'DROP DATABASE %s;'
drop_params = (self.username,)
self.assertEqual(
self.mock_execute_sql.call_args_list[2][0][0], drop_query)
self.assertEqual(
self.mock_execute_sql.call_args_list[2][0][1], drop_params)
self.assertEqual(self.mock_as_is.call_count, 5)
self.assertEqual(self.mock_check_for_injections.call_count, 1)
def test_change_password(self):
query = 'ALTER ROLE %s WITH PASSWORD %s;'
params = (self.username, self.password)
self.backend.change_password(self.username, self.password)
self.assertEqual(
self.mock_execute_sql.call_args[0][0], query)
self.assertEqual(self.mock_execute_sql.call_args[0][1], params)
self.assertEqual(self.mock_as_is.call_count, 1)
self.assertEqual(self.mock_check_for_injections.call_count, 1)
def test_list_collaborators(self):
query = 'SELECT unnest(nspacl) FROM pg_namespace WHERE nspname=%s;'
repo = 'repo_name'
params = (repo, )
self.mock_execute_sql.return_value = {
'status': True, 'row_count': 2,
'tuples': [
('al_carter=UC/al_carter',),
('foo_bar=U/al_carter',)
],
'fields': [{'type': 1033, 'name': 'unnest'}]}
expected_result = [
{'username': 'al_carter', 'db_permissions': 'UC'},
{'username': 'foo_bar', 'db_permissions': 'U'}
]
res = self.backend.list_collaborators(repo)
self.assertEqual(
self.mock_execute_sql.call_args[0][0], query)
self.assertEqual(
self.mock_execute_sql.call_args[0][1], params)
self.assertFalse(self.mock_as_is.called)
self.assertEqual(res, expected_result)
def test_list_all_users(self):
query = 'SELECT usename FROM pg_catalog.pg_user WHERE usename != %s'
params = (self.username,)
self.mock_execute_sql.return_value = {
'status': True, 'row_count': 2,
'tuples': [(u'delete_me_alpha_user',), (u'delete_me_beta_user',)],
'fields': [{'type': 19, 'name': 'usename'}]
}
res = self.backend.list_all_users()
self.assertEqual(
self.mock_execute_sql.call_args[0][0], query)
self.assertEqual(
self.mock_execute_sql.call_args[0][1], params)
self.assertFalse(self.mock_as_is.called)
self.assertEqual(res, ['delete_me_alpha_user', 'delete_me_beta_user'])
def test_has_base_privilege(self):
query = 'SELECT has_database_privilege(%s, %s);'
privilege = 'CONNECT'
params = (self.username, privilege)
self.mock_execute_sql.return_value = {'status': True, 'row_count': -1,
'tuples': [[True]], 'fields': []}
res = self.backend.has_base_privilege(
login=self.username, privilege=privilege)
self.assertEqual(
self.mock_execute_sql.call_args[0][0], query)
self.assertEqual(self.mock_execute_sql.call_args[0][1], params)
self.assertEqual(self.mock_as_is.call_count, 0)
self.assertEqual(res, True)
def test_has_repo_db_privilege(self):
query = 'SELECT has_schema_privilege(%s, %s, %s);'
repo = 'repo'
privilege = 'CONNECT'
params = (self.username, repo, privilege)
self.mock_execute_sql.return_value = {'status': True, 'row_count': -1,
'tuples': [[True]], 'fields': []}
res = self.backend.has_repo_db_privilege(
login=self.username, repo=repo, privilege=privilege)
self.assertEqual(
self.mock_execute_sql.call_args[0][0], query)
self.assertEqual(self.mock_execute_sql.call_args[0][1], params)
self.assertEqual(self.mock_as_is.call_count, 0)
self.assertEqual(res, True)
def test_has_table_privilege(self):
query = 'SELECT has_table_privilege(%s, %s, %s);'
table = 'table'
privilege = 'CONNECT'
params = (self.username, table, privilege)
self.mock_execute_sql.return_value = {'status': True, 'row_count': -1,
'tuples': [[True]], 'fields': []}
res = self.backend.has_table_privilege(
login=self.username, table=table, privilege=privilege)
self.assertEqual(
self.mock_execute_sql.call_args[0][0], query)
self.assertEqual(self.mock_execute_sql.call_args[0][1], params)
self.assertEqual(self.mock_as_is.call_count, 0)
self.assertEqual(res, True)
def test_has_column_privilege(self):
query = 'SELECT has_column_privilege(%s, %s, %s, %s);'
table = 'table'
column = 'column'
privilege = 'CONNECT'
params = (self.username, table, column, privilege)
self.mock_execute_sql.return_value = {'status': True, 'row_count': -1,
'tuples': [[True]], 'fields': []}
res = self.backend.has_column_privilege(
login=self.username, table=table,
column=column, privilege=privilege)
self.assertEqual(
self.mock_execute_sql.call_args[0][0], query)
self.assertEqual(self.mock_execute_sql.call_args[0][1], params)
self.assertEqual(self.mock_as_is.call_count, 0)
self.assertEqual(res, True)
@patch('core.db.backend.pg.os.makedirs')
@patch('core.db.backend.pg.os.remove')
@patch('core.db.backend.pg.shutil.move')
def test_export_table_with_header(self, *args):
table_name = 'repo_name.table_name'
table_name_prepped = 'SELECT * FROM %s' % table_name
file_path = 'file_path'
file_format = 'file_format'
delimiter = ','
header = True
query = ('COPY (SELECT * FROM repo_name.table_name) '
'TO STDOUT WITH CSV HEADER DELIMITER \',\';')
self.backend.connection = Mock()
mock_connection = self.backend.connection
mock_mogrify = mock_connection.cursor.return_value.mogrify
mock_mogrify.return_value = query
mock_copy_expert = mock_connection.cursor.return_value.copy_expert
with patch("__builtin__.open", mock_open()):
self.backend.export_table(table_name, file_path,
file_format, delimiter, header)
mock_mogrify.assert_called_once_with(
'COPY (%s) TO STDOUT WITH %s %s DELIMITER %s;',
(table_name_prepped, file_format,
'HEADER' if header else '', delimiter))
# Kind of a meaningless check since we have to mock the return value
# of mogrify, but at least it ensures the result of mogrify is passed
# into copy_expert as is.
self.assertEqual(mock_copy_expert.call_args[0][0], query)
self.assertEqual(self.mock_as_is.call_count, 3)
self.assertEqual(self.mock_check_for_injections.call_count, 4)
self.assertEqual(self.mock_validate_table_name.call_count, 1)
@patch('core.db.backend.pg.os.makedirs')
@patch('core.db.backend.pg.os.remove')
@patch('core.db.backend.pg.shutil.move')
def test_export_table_with_no_header(self, *args):
table_name = 'repo_name.table_name'
table_name_prepped = 'SELECT * FROM %s' % table_name
file_path = 'file_path'
file_format = 'file_format'
delimiter = ','
header = False
query = ('COPY (SELECT * FROM repo_name.table_name) '
'TO STDOUT WITH CSV DELIMITER \',\';')
self.backend.connection = Mock()
mock_connection = self.backend.connection
mock_mogrify = mock_connection.cursor.return_value.mogrify
mock_mogrify.return_value = query
mock_copy_expert = mock_connection.cursor.return_value.copy_expert
with patch("__builtin__.open", mock_open()):
self.backend.export_table(table_name, file_path,
file_format, delimiter, header)
mock_mogrify.assert_called_once_with(
'COPY (%s) TO STDOUT WITH %s %s DELIMITER %s;',
(table_name_prepped, file_format,
'HEADER' if header else '', delimiter))
# Kind of a meaningless check since we have to mock the return value
# of mogrify, but at least it ensures the result of mogrify is passed
# into copy_expert as is.
self.assertEqual(mock_copy_expert.call_args[0][0], query)
self.assertEqual(self.mock_as_is.call_count, 3)
self.assertEqual(self.mock_check_for_injections.call_count, 4)
self.assertEqual(self.mock_validate_table_name.call_count, 1)
@patch('core.db.backend.pg.os.makedirs')
@patch('core.db.backend.pg.os.remove')
@patch('core.db.backend.pg.shutil.move')
def test_export_view(self, *args):
view_name = 'repo_name.view_name'
view_name_prepped = 'SELECT * FROM %s' % view_name
file_path = 'file_path'
file_format = 'file_format'
delimiter = ','
header = True
query = ('COPY (SELECT * FROM repo_name.view_name) '
'TO STDOUT WITH CSV HEADER DELIMITER \',\';')
self.backend.connection = Mock()
mock_connection = self.backend.connection
mock_mogrify = mock_connection.cursor.return_value.mogrify
mock_mogrify.return_value = query
mock_copy_expert = mock_connection.cursor.return_value.copy_expert
with patch("__builtin__.open", mock_open()):
self.backend.export_view(view_name, file_path,
file_format, delimiter, header)
mock_mogrify.assert_called_once_with(
'COPY (%s) TO STDOUT WITH %s %s DELIMITER %s;',
(view_name_prepped, file_format,
'HEADER' if header else '', delimiter))
# Kind of a meaningless check since we have to mock the return value
# of mogrify, but at least it ensures the result of mogrify is passed
# into copy_expert as is.
self.assertEqual(mock_copy_expert.call_args[0][0], query)
self.assertEqual(self.mock_as_is.call_count, 3)
self.assertEqual(self.mock_check_for_injections.call_count, 4)
self.assertEqual(self.mock_validate_table_name.call_count, 1)
@patch('core.db.backend.pg.os.makedirs')
@patch('core.db.backend.pg.os.remove')
@patch('core.db.backend.pg.shutil.move')
def _export_query_test_helper(self, *args, **kwargs):
passed_query = kwargs['passed_query']
passed_query_cleaned = kwargs.get(
'passed_query_cleaned', passed_query)
file_path = kwargs['file_path']
file_format = kwargs['file_format']
delimiter = kwargs['delimiter']
header = kwargs['header']
query = kwargs['query']
self.backend.connection = Mock()
mock_connection = self.backend.connection
mock_mogrify = mock_connection.cursor.return_value.mogrify
mock_mogrify.return_value = query
mock_copy_expert = mock_connection.cursor.return_value.copy_expert
with patch("__builtin__.open", mock_open()):
self.backend.export_query(passed_query, file_path,
file_format, delimiter, header)
mock_mogrify.assert_called_once_with(
'COPY (%s) TO STDOUT WITH %s %s DELIMITER %s;',
(passed_query_cleaned, file_format,
'HEADER' if header else '', delimiter))
# Kind of a meaningless check since we have to mock the return value
# of mogrify, but at least it ensures the result of mogrify is passed
# into copy_expert as is.
self.assertEqual(mock_copy_expert.call_args[0][0], query)
self.assertEqual(self.mock_as_is.call_count, 3)
self.assertEqual(self.mock_check_for_injections.call_count, 2)
def test_export_query_with_header(self, *args):
self._export_query_test_helper(
passed_query='myquery',
file_path='file_path',
file_format='CSV',
delimiter=',',
header=True,
query=('COPY (myquery) '
'TO STDOUT WITH CSV HEADER DELIMITER \',\';'))
def test_export_query_with_no_header(self, *args):
self._export_query_test_helper(
passed_query='myquery',
file_path='file_path',
file_format='CSV',
delimiter=',',
header=False,
query=('COPY (myquery) '
'TO STDOUT WITH CSV DELIMITER \',\';'))
def test_export_query_only_executes_text_before_semicolon(self, *args):
self._export_query_test_helper(
passed_query=' text before semicolon; text after; ',
passed_query_cleaned='text before semicolon',
file_path='file_path',
file_format='CSV',
delimiter=',',
header=False,
query=('COPY (text before semicolon) '
'TO STDOUT WITH CSV DELIMITER \',\';'))
def test_import_file_with_header(self):
query = 'COPY %s FROM %s WITH %s %s DELIMITER %s ENCODING %s QUOTE %s;'
table_name = 'user_name.repo_name.table_name'
file_path = 'file_path'
file_format = 'file_format'
delimiter = ','
header = True
encoding = 'ISO-8859-1'
quote_character = '"'
params = (table_name, file_path, file_format,
'HEADER', delimiter, encoding, quote_character)
self.backend.import_file(table_name, file_path, file_format, delimiter,
header, encoding, quote_character)
self.assertEqual(self.mock_execute_sql.call_args[0][0], query)
self.assertEqual(self.mock_execute_sql.call_args[0][1], params)
self.assertEqual(self.mock_as_is.call_count, 3)
self.assertEqual(self.mock_check_for_injections. call_count, 3)
self.assertEqual(self.mock_validate_table_name.call_count, 1)
def test_import_table_with_no_header(self):
table_name = 'table_name'
file_path = 'file_path'
file_format = 'file_format'
delimiter = ','
header = False
encoding = 'ISO-8859-1'
quote_character = '"'
params = (table_name, file_path, file_format,
'', delimiter, encoding, quote_character)
self.backend.import_file(table_name, file_path, file_format, delimiter,
header, encoding, quote_character)
self.assertEqual(self.mock_execute_sql.call_args[0][1], params)
def test_import_file_w_dbtruck(self):
# DBTruck is not tested for safety/security... At all.
# The method does so little
# that it doesn't even make much sense to test it.
pass
def test_can_user_access_rls_table(self):
mock_settings = self.create_patch("core.db.backend.pg.settings")
mock_settings.POLICY_SCHEMA = 'SCHEMA'
mock_settings.POLICY_TABLE = 'TABLE'
self.mock_execute_sql.return_value = {
'status': True, 'row_count': 1,
'tuples': [
(True,),
]}
username = 'delete_me_user'
permissions = ['select', 'update']
expected_query = (
"SELECT exists("
"SELECT * FROM %s.%s where grantee=lower(%s) and ("
"lower(policy_type)=lower(%s) or lower(policy_type)=lower(%s)"
"))")
expected_params = ('SCHEMA', 'TABLE', 'delete_me_user', 'select',
'update')
self.backend.can_user_access_rls_table(username, permissions)
self.assertEqual(self.mock_execute_sql.call_args[0][0], expected_query)
self.assertEqual(
self.mock_execute_sql.call_args[0][1], expected_params)
|
StarcoderdataPython
|
356317
|
<reponame>nitrictech/python-sdk
#
# Copyright (c) 2021 Nitric Technologies Pty Ltd.
#
# This file is part of Nitric Python 3 SDK.
# See https://github.com/nitrictech/python-sdk for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import List, Union
from grpclib import GRPCError
from nitric.api.exception import FailedPreconditionException, exception_from_grpc_error, InvalidArgumentException
from nitric.utils import new_default_channel, _struct_from_dict, _dict_from_struct
from nitricapi.nitric.queue.v1 import QueueServiceStub, NitricTask, FailedTask as WireFailedTask
from dataclasses import dataclass, field
@dataclass(frozen=True, order=True)
class Task(object):
"""A task to be sent to a Queue."""
id: str = field(default=None)
payload_type: str = field(default=None)
payload: dict = field(default_factory=dict)
@dataclass(frozen=True, order=True)
class ReceivedTask(object):
"""A reference to a task received from a Queue, with a lease."""
id: str = field(default=None)
payload_type: str = field(default=None)
payload: dict = field(default_factory=dict)
lease_id: str = field(default=None)
_queueing: Queues = field(default=None)
_queue: Queue = field(default=None)
async def complete(self):
"""
Mark this task as complete and remove it from the queue.
Only callable for tasks that have been received from a Queue.
"""
if self._queueing is None or self._queue is None or self.lease_id is None:
raise FailedPreconditionException(
"Task is missing internal client or lease id, was it returned from " "queue.receive?"
)
try:
await self._queueing._queue_stub.complete(queue=self._queue.name, lease_id=self.lease_id)
except GRPCError as grpc_err:
raise exception_from_grpc_error(grpc_err)
@dataclass(frozen=True, order=True)
class FailedTask(Task):
"""Represents a failed queue publish."""
message: str = field(default="")
def _task_to_wire(task: Task) -> NitricTask:
"""
Convert a Nitric Task to a Nitric Queue Task.
:param task: to convert
:return: converted task
"""
return NitricTask(
id=task.id,
payload_type=task.payload_type,
payload=_struct_from_dict(task.payload),
)
def _wire_to_received_task(task: NitricTask, queueing: Queues = None, queue: Queue = None) -> ReceivedTask:
"""
Convert a Nitric Queue Task (protobuf) to a Nitric Task (python SDK).
:param task: to convert
:return: converted task
"""
return ReceivedTask(
id=task.id,
payload_type=task.payload_type,
payload=_dict_from_struct(task.payload),
lease_id=task.lease_id,
_queueing=queueing,
_queue=queue,
)
def _wire_to_failed_task(failed_task: WireFailedTask) -> FailedTask:
"""
Convert a queue task that failed to push into a Failed Task object.
:param failed_task: the failed task
:return: the Failed Task with failure message
"""
task = _wire_to_received_task(failed_task.task)
return FailedTask(
id=task.id,
payload_type=task.payload_type,
payload=task.payload,
message=failed_task.message,
)
@dataclass(frozen=True, order=True)
class Queue(object):
"""A reference to a queue from a queue service, used to perform operations on that queue."""
_queueing: Queues
name: str
async def send(
self, tasks: Union[Task, dict, List[Union[Task, dict]]] = None
) -> Union[Task, List[Union[Task, FailedTask]]]:
"""
Send one or more tasks to this queue.
If a list of tasks is provided this function will return a list containing any tasks that failed to be sent to
the queue.
:param tasks: A task or list of tasks to send to the queue.
"""
if isinstance(tasks, list):
return await self._send_batch(tasks)
task = tasks
if task is None:
task = Task()
if isinstance(task, dict):
# TODO: handle tasks that are just a payload
task = Task(**task)
try:
await self._queueing._queue_stub.send(queue=self.name, task=_task_to_wire(task))
except GRPCError as grpc_err:
raise exception_from_grpc_error(grpc_err)
async def _send_batch(self, tasks: List[Union[Task, dict]], raise_on_failure: bool = True) -> List[FailedTask]:
"""
Push a collection of tasks to a queue, which can be retrieved by other services.
:param tasks: The tasks to push to the queue
:param raise_on_failure: Whether to raise an exception when one or more tasks fails to send
:return: PushResponse containing a list containing details of any messages that failed to publish.
"""
if tasks is None or len(tasks) < 1:
raise InvalidArgumentException("No tasks provided, nothing to send.")
wire_tasks = [_task_to_wire(Task(**task) if isinstance(task, dict) else task) for task in tasks]
try:
response = await self._queueing._queue_stub.send_batch(queue=self.name, tasks=wire_tasks)
return [_wire_to_failed_task(failed_task) for failed_task in response.failed_tasks]
except GRPCError as grpc_err:
raise exception_from_grpc_error(grpc_err)
async def receive(self, limit: int = None) -> List[Task]:
"""
Pop 1 or more items from the specified queue up to the depth limit.
Queue items are Nitric Tasks that are leased for a limited period of time, where they may be worked on.
Once complete or failed they must be acknowledged using the request specific leaseId.
If the lease on a queue item expires before it is acknowledged or the lease is extended the task will be
returned to the queue for reprocessing.
:param limit: The maximum number of queue items to return. Default: 1, Min: 1.
:return: Queue items popped from the specified queue.
"""
# Set the default and minimum depth to 1.
if limit is None or limit < 1:
limit = 1
try:
response = await self._queueing._queue_stub.receive(queue=self.name, depth=limit)
# Map the response protobuf response items to Python SDK Nitric Tasks
return [_wire_to_received_task(task=task, queueing=self._queueing, queue=self) for task in response.tasks]
except GRPCError as grpc_err:
raise exception_from_grpc_error(grpc_err)
class Queues(object):
"""Queueing client, providing access to Queue and Task references and operations on those entities."""
def __init__(self):
"""Construct a Nitric Queue Client."""
self.channel = new_default_channel()
self._queue_stub = QueueServiceStub(channel=self.channel)
def __del__(self):
# close the channel when this client is destroyed
if self.channel is not None:
self.channel.close()
def queue(self, name: str):
"""Return a reference to a queue from the connected queue service."""
return Queue(_queueing=self, name=name)
|
StarcoderdataPython
|
4865031
|
from .instance import shared_transnet_instance
from .account import Account
from .exceptions import ProposalDoesNotExistException
from .blockchainobject import BlockchainObject
import logging
log = logging.getLogger(__name__)
class Proposal(BlockchainObject):
""" Read data about a Proposal Balance in the chain
:param str id: Id of the proposal
:param transnet transnet_instance: Transnet() instance to use when accesing a RPC
"""
type_id = 10
def refresh(self):
proposal = self.transnet.rpc.get_objects([self.identifier])
if not any(proposal):
raise ProposalDoesNotExistException
super(Proposal, self).__init__(proposal[0], transnet_instance=self.transnet)
@property
def proposed_operations(self):
yield from self["proposed_transaction"]["operations"]
class Proposals(list):
""" Obtain a list of pending proposals for an account
:param str account: Account name
:param transnet transnet_instance: Transnet() instance to use when accesing a RPC
"""
def __init__(self, account, transnet_instance=None):
self.transnet = transnet_instance or shared_transnet_instance()
account = Account(account, transnet_instance=self.transnet)
proposals = self.transnet.rpc.get_proposed_transactions(account["id"])
super(Proposals, self).__init__(
[
Proposal(x, transnet_instance=self.transnet)
for x in proposals
]
)
|
StarcoderdataPython
|
332769
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for util.py."""
from __future__ import absolute_import
import unittest
from apache_beam.io.gcp.datastore.v1 import util
class MovingSumTest(unittest.TestCase):
TIMESTAMP = 1500000000
def test_bad_bucket_size(self):
with self.assertRaises(ValueError):
_ = util.MovingSum(1, 0)
def test_bad_window_size(self):
with self.assertRaises(ValueError):
_ = util.MovingSum(1, 2)
def test_no_data(self):
ms = util.MovingSum(10, 1)
self.assertEqual(0, ms.sum(MovingSumTest.TIMESTAMP))
self.assertEqual(0, ms.count(MovingSumTest.TIMESTAMP))
self.assertFalse(ms.has_data(MovingSumTest.TIMESTAMP))
def test_one_data_point(self):
ms = util.MovingSum(10, 1)
ms.add(MovingSumTest.TIMESTAMP, 5)
self.assertEqual(5, ms.sum(MovingSumTest.TIMESTAMP))
self.assertEqual(1, ms.count(MovingSumTest.TIMESTAMP))
self.assertTrue(ms.has_data(MovingSumTest.TIMESTAMP))
def test_aggregates_within_window(self):
ms = util.MovingSum(10, 1)
ms.add(MovingSumTest.TIMESTAMP, 5)
ms.add(MovingSumTest.TIMESTAMP+1, 3)
ms.add(MovingSumTest.TIMESTAMP+2, 7)
self.assertEqual(15, ms.sum(MovingSumTest.TIMESTAMP+3))
self.assertEqual(3, ms.count(MovingSumTest.TIMESTAMP+3))
def test_data_expires_from_moving_window(self):
ms = util.MovingSum(5, 1)
ms.add(MovingSumTest.TIMESTAMP, 5)
ms.add(MovingSumTest.TIMESTAMP+3, 3)
ms.add(MovingSumTest.TIMESTAMP+6, 7)
self.assertEqual(10, ms.sum(MovingSumTest.TIMESTAMP+7))
self.assertEqual(2, ms.count(MovingSumTest.TIMESTAMP+7))
class DynamicWriteBatcherTest(unittest.TestCase):
def setUp(self):
self._batcher = util.DynamicBatchSizer()
# If possible, keep these test cases aligned with the Java test cases in
# DatastoreV1Test.java
def test_no_data(self):
self.assertEqual(util.WRITE_BATCH_INITIAL_SIZE,
self._batcher.get_batch_size(0))
def test_fast_queries(self):
self._batcher.report_latency(0, 1000, 200)
self._batcher.report_latency(0, 1000, 200)
self.assertEqual(util.WRITE_BATCH_MAX_SIZE,
self._batcher.get_batch_size(0))
def test_slow_queries(self):
self._batcher.report_latency(0, 10000, 200)
self._batcher.report_latency(0, 10000, 200)
self.assertEqual(100, self._batcher.get_batch_size(0))
def test_size_not_below_minimum(self):
self._batcher.report_latency(0, 30000, 50)
self._batcher.report_latency(0, 30000, 50)
self.assertEqual(util.WRITE_BATCH_MIN_SIZE,
self._batcher.get_batch_size(0))
def test_sliding_window(self):
self._batcher.report_latency(0, 30000, 50)
self._batcher.report_latency(50000, 5000, 200)
self._batcher.report_latency(100000, 5000, 200)
self.assertEqual(200, self._batcher.get_batch_size(150000))
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
341803
|
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Mag. <NAME> All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. <EMAIL>
#
#++
# Name
# auto_imports
#
# Purpose
# Automatically import MOM-related modules that are needed during
# sphinx run (import when a specific module is documented is too late!)
#
# Revision Dates
# 17-Aug-2015 (CT) Creation
# ««revision-date»»···
#--
from _MOM import MOM
from _GTW._OMP._Auth import Auth
from _GTW._OMP._EVT import EVT
from _GTW._OMP._PAP import PAP
from _GTW._OMP._SRM import SRM
from _GTW._OMP._SWP import SWP
MOM._Import_All ()
MOM.Attr._Import_All ()
Auth._Import_All ()
EVT._Import_All ()
PAP._Import_All ()
SRM._Import_All ()
SWP._Import_All ()
### __END__ auto_imports
|
StarcoderdataPython
|
3560952
|
<reponame>michaelmernin/kafka-topics-message-browser<gh_stars>1-10
from confluent_kafka.schema_registry.avro import AvroDeserializer
import constants
from config_handler import ConnectionConfig
from error_handler import ErrorHandler
class Deserializer:
directory_avro_schemas = constants.DIRECTORY_AVRO_SCHEMAS
def __init__(self, registry_client):
self.config_avro_location = ConnectionConfig.avro_topics
self.registry_client = registry_client
def create_avro_deserializer(self, topic_name):
schema_string = self.load_avro_schema_string(topic_name)
return AvroDeserializer(schema_string, self.registry_client)
def load_avro_schema_string(self, topic_name):
if topic_name not in self.config_avro_location:
raise ErrorHandler("Error. Application does not have avro schema for requested topic")
try:
with open(self.directory_avro_schemas + "/" + self.config_avro_location.get(topic_name),
'r') as schema_file:
return schema_file.read().replace('\n', '')
except Exception as e:
raise ErrorHandler("Error. Unable to load schema: " + str(e))
|
StarcoderdataPython
|
3500942
|
<reponame>chamsrut/Plagiarism-detector
from __future__ import print_function
import argparse
import os
import pandas as pd
# sklearn.externals.joblib is deprecated in 0.21 and will be removed in 0.23.
# from sklearn.externals import joblib
# Import joblib package directly
import joblib
## TODO: Import any additional libraries you need to define a model
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
# Provided model load function
def model_fn(model_dir):
"""Load model from the model_dir. This is the same model that is saved
in the main if statement.
"""
print("Loading model.")
# load using joblib
model = joblib.load(os.path.join(model_dir, "model.joblib"))
print("Done loading model.")
return model
## TODO: Complete the main code
if __name__ == '__main__':
# All of the model parameters and training parameters are sent as arguments
# when this script is executed, during a training job
# Here we set up an argument parser to easily access the parameters
parser = argparse.ArgumentParser()
# SageMaker parameters, like the directories for training data and saving models; set automatically
# Do not need to change
parser.add_argument('--output-data-dir', type=str, default=os.environ['SM_OUTPUT_DATA_DIR'])
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--data-dir', type=str, default=os.environ['SM_CHANNEL_TRAIN'])
## TODO: Add any additional arguments that you will need to pass into your model
# args holds all passed-in arguments
args = parser.parse_args()
# Read in csv training file
training_dir = args.data_dir
train_data = pd.read_csv(os.path.join(training_dir, "train.csv"), header=None, names=None)
# Labels are in the first column
train_y = train_data.iloc[:,0]
train_x = train_data.iloc[:,1:]
## --- Your code here --- ##
# random state for reproducibility
random_state=1
# scale and create a validation set with 7% of the data for model selection
# (1/0 label ratio is 60/40 so it's pretty balanced)
X_train, X_val, y_train, y_val = train_test_split(train_x, train_y, test_size=.07, random_state=random_state)
# models from which we will choose the best one
models = [KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025, random_state=random_state),
SVC(gamma=2, C=1, random_state=random_state),
GaussianProcessClassifier(1.0 * RBF(1.0), random_state=random_state),
DecisionTreeClassifier(max_depth=5, random_state=random_state),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1, random_state=random_state),
MLPClassifier(alpha=1, max_iter=1000, random_state=random_state),
AdaBoostClassifier(n_estimators=500, random_state=random_state),
GaussianNB(),
QuadraticDiscriminantAnalysis()]
# initialize the best score to 0 and the index of the best model to 0
best_score = 0
best_model_idx = 0
# model selection process -> based on the best score on validation set
for i in range(len(models)):
models[i].fit(X_train, y_train)
score = models[i].score(X_val, y_val)
if score > best_score:
best_score = score
best_model_idx = i
# assigning best model
model = models[best_model_idx]
## --- End of your code --- ##
# Save the trained model
joblib.dump(model, os.path.join(args.model_dir, "model.joblib"))
|
StarcoderdataPython
|
8097956
|
class TestBucketeerCLI:
"""Tests for Bucketeer CLI."""
def test_true(self):
"""Tests that pytest is setup properly."""
assert True
|
StarcoderdataPython
|
9655747
|
# -*- coding: utf-8 -*-
# เรียกใช้งานโมดูล
file_name="data2"
import codecs
from pythainlp.tokenize import word_tokenize
#import deepcut
from pythainlp.tag import pos_tag
from nltk.tokenize import RegexpTokenizer
import glob
import nltk
import re
# thai cut
thaicut="newmm"
# เตรียมตัวตัด tag ด้วย re
pattern = r'\[(.*?)\](.*?)\[\/(.*?)\]'
tokenizer = RegexpTokenizer(pattern) # ใช้ nltk.tokenize.RegexpTokenizer เพื่อตัด [TIME]8.00[/TIME] ให้เป็น ('TIME','ไง','TIME')
# จัดการกับ tag ที่ไม่ได้ tag
def toolner_to_tag(text):
text=text.strip().replace("FACILITY","LOCATION").replace("[AGO]","").replace("[/AGO]","")
text=re.sub("<[^>]*>","",text)
text=re.sub("(\[\/(.*?)\])","\\1***",text)#.replace('(\[(.*?)\])','***\\1')# text.replace('>','>***') # ตัดการกับพวกไม่มี tag word
text=re.sub("(\[\w+\])","***\\1",text)
text2=[]
for i in text.split('***'):
if "[" in i:
text2.append(i)
else:
text2.append("[word]"+i+"[/word]")
text="".join(text2)#re.sub("[word][/word]","","".join(text2))
return text.replace("[word][/word]","")
# แปลง text ให้เป็น conll2002
def text2conll2002(text,pos=True):
"""
ใช้แปลงข้อความให้กลายเป็น conll2002
"""
text=toolner_to_tag(text)
text=text.replace("''",'"')
text=text.replace("’",'"').replace("‘",'"')#.replace('"',"")
tag=tokenizer.tokenize(text)
j=0
conll2002=""
for tagopen,text,tagclose in tag:
word_cut=word_tokenize(text,engine=thaicut) # ใช้ตัวตัดคำ newmm
i=0
txt5=""
while i<len(word_cut):
if word_cut[i]=="''" or word_cut[i]=='"':pass
elif i==0 and tagopen!='word':
txt5+=word_cut[i]
txt5+='\t'+'B-'+tagopen
elif tagopen!='word':
txt5+=word_cut[i]
txt5+='\t'+'I-'+tagopen
else:
txt5+=word_cut[i]
txt5+='\t'+'O'
txt5+='\n'
#j+=1
i+=1
conll2002+=txt5
if pos==False:
return conll2002
return postag(conll2002)
# ใช้สำหรับกำกับ pos tag เพื่อใช้กับ NER
# print(text2conll2002(t,pos=False))
def postag(text):
listtxt=[i for i in text.split('\n') if i!='']
list_word=[]
for data in listtxt:
list_word.append(data.split('\t')[0])
#print(text)
list_word=pos_tag(list_word,engine='perceptron')
text=""
i=0
for data in listtxt:
text+=data.split('\t')[0]+'\t'+list_word[i][1]+'\t'+data.split('\t')[1]+'\n'
i+=1
return text
# เขียนไฟล์ข้อมูล conll2002
def write_conll2002(file_name,data):
"""
ใช้สำหรับเขียนไฟล์
"""
with codecs.open(file_name, "w", "utf-8-sig") as temp:
temp.write(data)
return True
# อ่านข้อมูลจากไฟล์
def get_data(fileopen):
"""
สำหรับใช้อ่านทั้งหมดทั้งในไฟล์ทีละรรทัดออกมาเป็น list
"""
with codecs.open(fileopen, 'r',encoding='utf-8-sig') as f:
lines = f.read().splitlines()
return lines
def alldata(lists):
text=""
for data in lists:
text+=text2conll2002(data)
text+='\n'
return text
def alldata_list(lists):
data_all=[]
for data in lists:
data_num=[]
try:
txt=text2conll2002(data,pos=True).split('\n')
for d in txt:
tt=d.split('\t')
if d!="":
if len(tt)==3:
data_num.append((tt[0],tt[1],tt[2]))
else:
data_num.append((tt[0],tt[1]))
#print(data_num)
data_all.append(data_num)
except:
print(data)
#print(data_all)
return data_all
def alldata_list_str(lists):
string=""
for data in lists:
string1=""
for j in data:
string1+=j[0]+" "+j[1]+" "+j[2]+"\n"
string1+="\n"
string+=string1
return string
def get_data_tag(listd):
list_all=[]
c=[]
for i in listd:
if i !='':
c.append((i.split("\t")[0],i.split("\t")[1],i.split("\t")[2]))
else:
list_all.append(c)
c=[]
return list_all
def getall(lista):
ll=[]
for i in lista:
o=True
for j in ll:
if re.sub("\[(.*?)\]","",i)==re.sub("\[(.*?)\]","",j):
o=False
break
if o==True:
ll.append(i)
return ll
data1=getall(get_data(file_name+".txt"))
import dill
with open('datatrain.data', 'rb') as file:
datatofile = dill.load(file)
datatofile=alldata_list(data1)
tt=[]
#datatofile.reverse()
import random
#random.shuffle(datatofile)
print(len(datatofile))
training_samples = datatofile[:int(len(datatofile) * 0.8)]
test_samples = datatofile[int(len(datatofile) * 0.8):]
'''training_samples = datatofile[:2822]
test_samples = datatofile[2822:]'''
print(test_samples[0])
#tag=TrainChunker(training_samples,test_samples) # Train
#run(training_samples,test_samples)
from sklearn_crfsuite import scorers
from sklearn_crfsuite import metrics
import sklearn_crfsuite
from pythainlp.corpus import stopwords
stopwords = stopwords.words('thai')
def isThai(chr):
cVal = ord(chr)
if(cVal >= 3584 and cVal <= 3711):
return True
return False
def isThaiWord(word):
t=True
for i in word:
l=isThai(i)
if l!=True and i!='.':
t=False
break
return t
def is_stopword(word):
return word in stopwords
def is_s(word):
if word == " " or word =="\t" or word=="":
return True
else:
return False
def lennum(word,num):
if len(word)==num:
return True
return False
def doc2features0(doc, i):
word = doc[i][0]
postag = doc[i][1]
# Features from current word
features={
'word.word': word,
'word.stopword': is_stopword(word),
'word.isthai':isThaiWord(word),
'word.isspace':word.isspace(),
'postag':postag,
'postag[:2]': postag[:2],
'word.isdigit()': word.isdigit(),
'word[-3:]': word[-3:],
'word[-2:]': word[-2:]
}
if word.isdigit() and len(word)==5:
features['word.islen5']=True
#if postag=='NCNM':
# features['word.islenten']=len(word)==10
# Features from previous word
if i > 0:
prevword = doc[i-1][0]
postag1 = doc[i-1][1]
features['word.prevword'] = prevword
features['word.previsspace']=prevword.isspace()
features['word.previsthai']=isThaiWord(prevword)
features['word.prevstopword']=is_stopword(prevword)
features['word.prepostag'] = postag1
features['-1:postag[:2]']: postag1[:2]
features['word.prevwordisdigit'] = prevword.isdigit()
else:
features['BOS'] = True # Special "Beginning of Sequence" tag
# Features from next word
if i < len(doc)-1:
nextword = doc[i+1][0]
postag1 = doc[i+1][1]
features['word.nextword'] = nextword
features['word.nextisspace']=nextword.isspace()
features['word.nextpostag'] = postag1
features['word.nextisthai']=isThaiWord(nextword)
features['word.nextstopword']=is_stopword(nextword)
features['word.nextwordisdigit'] = nextword.isdigit()
features['+1:postag[:2]']: postag1[:2]
else:
features['EOS'] = True # Special "End of Sequence" tag
return features
def extract_features0(doc):
return [doc2features0(doc, i) for i in range(len(doc))]
def get_labels(doc):
return [tag for (token,postag,tag) in doc]
X = [extract_features0(doc) for doc in training_samples]
y = [get_labels(doc) for doc in training_samples]
X_test = [extract_features0(doc) for doc in test_samples]
y_test = [get_labels(doc) for doc in test_samples]
#print(X[0])
print(len(training_samples))
def is_stopword(word):
return word in stopwords
def doc2features2(doc, i):
word = doc[i]
postag = doc[i][1]
# Features from current word
features={
'word.word': word,
'word.stopword': is_stopword(word),
'postag':postag,
'postag[:2]': postag[:2],
'word.isdigit()': word.isdigit(),
'word[-3:]': word[-3:],
'word[-2:]': word[-2:]
}
# Features from previous word
if i > 0:
prevword = doc[i-1]
postag1 = doc[i-1][1]
features['word.prevword'] = prevword
features['word.prepostag'] = doc[i-1][1]
features['-1:postag[:2]']: postag1[:2]
features['word.prevwordisdigit'] = prevword.isdigit()
else:
features['BOS'] = True # Special "Beginning of Sequence" tag
# Features from next word
if i < len(doc)-1:
nextword = doc[i+1]
postag1 = doc[i+1][1]
features['word.nextword'] = nextword
features['+1:postag[:2]']: postag1[:2]
features['-1:postag[:2]']: postag1[:2]
features['word.nextwordisdigit'] = nextword.isdigit()
else:
features['EOS'] = True # Special "End of Sequence" tag
return features
def extract_features2(tag):
i=0
l=[]
while i<len(tag):
l.append(doc2features2(tag,i))
i+=1
return l
crf = sklearn_crfsuite.CRF(
algorithm='lbfgs',
c1=0.1,
c2=0.1,
max_iterations=500,
all_possible_transitions=True,
model_filename=file_name+"-pos-new.model2"
)
crf.fit(X, y);
labels = list(crf.classes_)
labels.remove('O')
y_pred = crf.predict(X_test)
e=metrics.flat_f1_score(y_test, y_pred,
average='weighted', labels=labels)
print(e)
sorted_labels = sorted(
labels,
key=lambda name: (name[1:], name[0])
)
print(metrics.flat_classification_report(
y_test, y_pred, labels=sorted_labels, digits=3
))
import dill
with open("datatrain.data", "wb") as dill_file:
dill.dump(datatofile, dill_file)
|
StarcoderdataPython
|
1681151
|
<gh_stars>0
from scrapy.crawler import CrawlerProcess
from scrapy.settings import Settings
from jobparser import settings
from jobparser.spiders.hh import HhSpider
from jobparser.spiders.sj import SjSpider
def get_sj_query(string: str):
return string.replace(" ", '%20')
def get_hh_query(string: str):
return string.replace(" ", '+')
if __name__ == '__main__':
print('UPLOAD VACANCY:')
search = input()
crawler_settings = Settings()
crawler_settings.setmodule(settings)
process = CrawlerProcess(settings=crawler_settings)
process.crawl(HhSpider, search=get_hh_query(search))
process.crawl(SjSpider, search=get_sj_query(search))
process.start()
print('DONE')
|
StarcoderdataPython
|
4909304
|
import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# 创建一个变量v
v = tf.Variable([1, 2])
# 创建一个常量c
c = tf.constant([3, 3])
# 增加一个减法OP
sub = tf.subtract(v, c)
# 增加一个加法OP
add = tf.add(v, sub)
# 初始化变量操作
init = tf.global_variables_initializer()
with tf.Session() as sess:
# 下面这一句不能省略,需要执行
sess.run(init)
# 打印结果
print(sess.run(sub)) # [-2 -1]
print(sess.run(add)) # [-1 1]
# 定义一个变量state, name为count
state = tf.Variable(0, name="count")
# 将state的值增加1
new_value = tf.add(state, 1)
# 赋值操作,将new_value的值赋给state
update = tf.assign(state, new_value)
# 初始化变量
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
print(sess.run(state))
for _ in range(5):
print(sess.run(update))
|
StarcoderdataPython
|
1909259
|
<filename>ktrain/tests/testenv.py
import os
#os.environ['TF_KERAS'] = '1'
#os.environ['TF_EAGER'] = '0'
import sys
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID";
os.environ["CUDA_VISIBLE_DEVICES"]="0"
sys.path.insert(0,'../..')
|
StarcoderdataPython
|
4974922
|
from .pathway import Pathway
from .gene import Gene
from .pathwayentry import PathwayEntry
|
StarcoderdataPython
|
1887275
|
import asyncio
import random
from utilities import *
async def fairies(lights):
background_color = rgb(32, 32, 255)
fairy_colors = [pretty, warm_white]
for light in lights:
light.set_state(background_color)
loop = PeriodicLoop(0.15, 120)
indices = [0, 1]
while not loop.done():
for i in range(len(indices)):
lights[indices[i]].set_state(background_color)
indices[i] = random.choice(list(neighbors[indices[i]]))
lights[indices[i]].set_state(fairy_colors[i])
await loop.next()
|
StarcoderdataPython
|
3209151
|
import json
def main():
with open("./_data/componentes-curriculares.json", "r") as file:
componentes = json.load(file)
for componente in componentes:
codigo = componente['codigo']
print('Gerando Componente', componente['codigo'], ' - ', componente['nome'])
text = f"---\ncodigo: {codigo}\nlayout: componente\n---\n"
local = f"./curso/componentes/{codigo.lower()}.html"
with open(local, "w") as file:
file.write(text)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
4973347
|
<reponame>shibing624/rater
# -*- coding: utf-8 -*-
"""
@author:XuMing(<EMAIL>)
@description: 基于用户的协同过滤算法
"""
import math
import os
import random
from sklearn.model_selection import train_test_split
import rater
from rater.utils.logger import timer
class Dataset:
"""
load data and split data
"""
def __init__(self, fp):
# fp: data file path
self.data = self.load_data(fp)
@timer
def load_data(self, fp):
data = []
with open(fp) as f:
for l in f:
data.append(tuple(map(int, l.strip().split('\t')[:2])))
return data
@timer
def split_data(self, seed=0):
"""
:params: data, 加载的所有(user, item)数据条目
:params: seed, random的种子数,对于不同的k应设置成一样的
:return: train, test
"""
train, test = train_test_split(self.data, test_size=0.2, random_state=seed)
# 处理成字典的形式,user->set(items)
def convert_dict(data):
data_dict = {}
for user, item in data:
if user not in data_dict:
data_dict[user] = set() # 物品集合,去重
data_dict[user].add(item)
data_dict = {k: list(data_dict[k]) for k in data_dict} # 物品集合转为列表
return data_dict
return convert_dict(train), convert_dict(test)
# ### 2. 评价指标
# 1. Precision
# 2. Recall
# 3. Coverage
# 4. Popularity(Novelty)
# TopN推荐,不关心用户具体的评分,只预测用户是否会对某部电影评分
class Metric:
def __init__(self, train, test, get_recommendation):
"""
:params: train, 训练数据,在定义覆盖率、新颖度时会用到
:params: test, 测试数据
:params: GetRecommendation, 为某个用户获取推荐物品的接口函数
"""
self.train = train
self.test = test
self.get_recommendation = get_recommendation
self.recs = self.get_test_rec()
# 为test中的每个用户进行推荐
def get_test_rec(self):
recs = {}
for user in self.test:
rank = self.get_recommendation(user) # 推荐列表
recs[user] = rank
return recs
# 定义精确率指标计算方式
def precision(self):
total, hit = 0, 0
for user in self.test:
test_items = set(self.test[user]) # true list
rank = self.recs[user] # recommend list
for item, score in rank:
if item in test_items:
hit += 1 # 命中
total += len(rank)
return round(hit / total, 4)
# 定义召回率指标计算方式
def recall(self):
total, hit = 0, 0
for user in self.test:
test_items = set(self.test[user])
rank = self.recs[user]
for item, score in rank:
if item in test_items:
hit += 1
total += len(test_items)
return round(hit / total, 4)
# 定义覆盖率指标计算方式:覆盖率反映了推荐算法发掘长尾的能力
def coverage(self):
all_item, recom_item = set(), set()
for user in self.test:
for item in self.train[user]:
all_item.add(item) # 注意all_item只能累计训练集的item
rank = self.recs[user]
for item, score in rank:
recom_item.add(item)
return round(len(recom_item) / len(all_item), 4)
# 定义新颖度指标计算方式:平均流行度越低,新颖性越高(物品的流行度是指 对物品产生过行为的用户总数)
def popularity(self):
# 计算物品的流行度
item_pop = {}
for user in self.train:
for item in self.train[user]:
if item not in item_pop:
item_pop[item] = 0
item_pop[item] += 1
num, pop = 0, 0
for user in self.test:
rank = self.recs[user]
for item, score in rank:
# 物品的流行度满足长尾分布,取对数后流行度的平均值更加稳定,防止被流行物品所主导(削弱流行物品的影响)
pop += math.log(1 + item_pop[item])
num += 1 # 对推荐列表计数
return round(pop / num, 4) # 平均流行度
def eval(self):
metric = {'Precision': self.precision(),
'Recall': self.recall(),
'Coverage': self.coverage(),
'Popularity': self.popularity()}
print('Metric:', metric)
return metric
# ## 二. 算法实现
# 1. Random
# 2. MostPopular
# 3. UserCF:UserCollaborationFilter
# 4. UserIIF
# In[5]:
# 1. 随机推荐
def random_alg(train, K, N):
"""
:params: train, 训练数据集
:params: K, 可忽略
:params: N, 超参数,设置取TopN推荐物品数目
:return: recommendation,推荐接口函数
"""
items = {}
for user in train:
for item in train[user]:
items[item] = 1
def recommendation(user):
# 随机推荐N个未见过的
user_items = set(train[user]) if user in train else set()
rec_items = {k: items[k] for k in items if k not in user_items}
rec_items = list(rec_items.items())
random.shuffle(rec_items)
return rec_items[:N]
return recommendation
# 2. 热门推荐
def most_popular_alg(train, K, N):
"""
:params: train, 训练数据集
:params: K, 可忽略
:params: N, 超参数,设置取TopN推荐物品数目
:return: GetRecommendation, 推荐接口函数
"""
items = {}
for user in train:
for item in train[user]:
if item not in items:
items[item] = 0
items[item] += 1 # 统计物品频率
def recommendation(user):
# 推荐N个没见过的最热门的
user_items = set(train[user])
rec_items = {k: items[k] for k in items if k not in user_items}
rec_items = list(sorted(rec_items.items(), key=lambda x: x[1], reverse=True))
return rec_items[:N] # topN最热门
return recommendation
# 3. 基于用户余弦相似度的推荐
def user_cf_alg(train, K, N):
"""
:params: train, 训练数据集
:params: K, 超参数,设置取TopK相似用户数目
:params: N, 超参数,设置取TopN推荐物品数目
:return: recommendation, 推荐接口函数
"""
# 计算item->user的倒排索引
item_users = {}
for user in train:
for item in train[user]:
if item not in item_users:
item_users[item] = set() # 集合,去重
item_users[item].add(user)
item_users = {k: list(v) for k, v in item_users.items()}
# 计算用户相似度矩阵:calculate co-rated items between users
sim = {}
num = {}
for item in item_users:
users = item_users[item]
for i in range(len(users)):
u = users[i]
if u not in num:
num[u] = 0
num[u] += 1
if u not in sim:
sim[u] = {}
for j in range(len(users)):
if j == i: continue
v = users[j]
if v not in sim[u]:
sim[u][v] = 0
sim[u][v] += 1
for u in sim:
for v in sim[u]:
sim[u][v] /= math.sqrt(num[u] * num[v])
# 按照相似度排序
sorted_user_sim = {k: list(sorted(v.items(), key=lambda x: x[1], reverse=True)) for k, v in sim.items()}
# 获取接口函数:给user推荐与其最相似的K个用户喜欢的物品i(排除掉user已见的),按照喜欢物品i的用户u与user的累计相似度排序
def recommendation(user):
items = {}
seen_items = set(train[user])
for u, _ in sorted_user_sim[user][:K]:
for item in train[u]:
# 要去掉用户见过的
if item not in seen_items:
if item not in items:
items[item] = 0
items[item] += sim[user][u] # 累计用户相似度
recs = list(sorted(items.items(), key=lambda x: x[1],
reverse=True))[:N]
return recs
return recommendation
# 4. 基于改进的用户余弦相似度的推荐:两个用户对冷门物品采取过同样的行为更能说明他们兴趣的相似度,按物品的流行度进行惩罚
# IIF:inverse item frequency
def user_iif_alg(train, K, N):
"""
:params: train, 训练数据集
:params: K, 超参数,设置取TopK相似用户数目
:params: N, 超参数,设置取TopN推荐物品数目
:return: recommendation, 推荐接口函数
"""
# 计算item->user的倒排索引
item_users = {}
for user in train:
for item in train[user]:
if item not in item_users:
item_users[item] = set() # 集合,去重
item_users[item].add(user)
item_users = {k: list(v) for k, v in item_users.items()}
# 计算用户相似度矩阵
sim = {}
num = {}
for item in item_users:
users = item_users[item]
for i in range(len(users)):
u = users[i]
if u not in num:
num[u] = 0
num[u] += 1
if u not in sim:
sim[u] = {}
for j in range(len(users)):
if j == i: continue
v = users[j]
if v not in sim[u]:
sim[u][v] = 0
# 相比UserCF,主要是改进了这里, len(users)表示u,v共同爱好的物品一共有多少人喜欢(流行度)
# 如果该物品本身就很热门,则无法说明u,v的相似性
# 反之,如果该物品很冷门,则更能说明u,v的相似性
sim[u][v] += 1 / math.log(1 + len(users))
for u in sim:
for v in sim[u]:
sim[u][v] /= math.sqrt(num[u] * num[v])
# 按照相似度排序
sorted_user_sim = {k: list(sorted(v.items(), key=lambda x: x[1], reverse=True)) for k, v in sim.items()}
# 获取接口函数
def recommendation(user):
items = {}
seen_items = set(train[user])
for u, _ in sorted_user_sim[user][:K]:
for item in train[u]:
# 要去掉用户见过的
if item not in seen_items:
if item not in items:
items[item] = 0
items[item] += sim[user][u]
recs = list(sorted(items.items(), key=lambda x: x[1], reverse=True))[:N]
return recs
return recommendation
# ## 三. 实验
# 1. Random实验
# 2. MostPopular实验
# 3. UserCF实验,K=[5, 10, 20, 40, 80, 160]
# 4. UserIIF实验, K=80
class Experiment:
def __init__(self, M, K, N, fp='', name='UserCF'):
'''
:params: M, 进行多少次实验
:params: K, TopK相似用户的个数
:params: N, TopN推荐物品的个数
:params: fp, 数据文件路径
:params: rt, 推荐算法类型
'''
self.M = M
self.K = K
self.N = N
self.fp = fp
self.name = name
self.algs = {'Random': random_alg, 'MostPopular': most_popular_alg, 'UserCF': user_cf_alg,
'UserIIF': user_iif_alg}
from rater.datasets.movielens import Movielens
data = Movielens()
self.dataset = Dataset(data.ratings_file)
# 定义单次实验
@timer
def worker(self, train, test):
"""
:params: train, 训练数据集
:params: test, 测试数据集
:return: 各指标的值
"""
recommendation = self.algs[self.name](train, self.K, self.N)
metric = Metric(train, test, recommendation)
return metric.eval()
# 多次实验取平均
@timer
def run(self):
metrics = {'Precision': 0, 'Recall': 0,
'Coverage': 0, 'Popularity': 0}
for i in range(self.M):
train, test = self.dataset.split_data(i)
print('Experiment {}:'.format(i))
metric = self.worker(train, test)
metrics = {k: metrics[k] + metric[k] for k in metrics}
metrics = {k: metrics[k] / self.M for k in metrics}
print('Average Result (M={}, K={}, N={}): {}'.format(self.M, self.K, self.N, metrics))
if __name__ == '__main__':
# 1. random实验:precision和recall很低,覆盖率100%
print('*' * 42)
print('random:')
M, N = 1, 10
K = 10 # 为保持一致而设置,随便填一个值
random_exp = Experiment(M, K, N, name='Random')
random_exp.run() # 注意随机推荐的覆盖率应该是100%,实验结果中有的超过了100是因为在all_items中只统计了训练集
# 2. MostPopular实验:precision和recall较高,但覆盖率很低,流行度很高
print('*' * 42)
print('most popular:')
M, N = 1, 10
K = 10 # 为保持一致而设置,随便填一个值
mp_exp = Experiment(M, K, N, name='MostPopular')
mp_exp.run()
# 3. UserCF实验:注意K值的影响
print('*' * 42)
print('user cf:')
M, N = 1, 10
for K in [5, 10, 20, 40, 80]:
cf_exp = Experiment(M, K, N, name='UserCF')
cf_exp.run()
# 4. UserIIF实验
print('*' * 42)
print('user iif:')
M, N = 1, 10
K = 80 # 与书中保持一致
iif_exp = Experiment(M, K, N, name='UserIIF')
iif_exp.run()
# ## 四. 实验结果
#
# 1. Random实验
#
# Running time: 185.54872608184814
#
# Average Result (M=8, K=0, N=10):
# {'Precision': 0.61, 'Recall': 0.29,
# 'Coverage': 100.0, 'Popularity': 4.38958}
#
# 2. MostPopular实验
#
# Running time: 103.3697898387909
#
# Average Result (M=8, K=0, N=10):
# {'Precision': 12.83, 'Recall': 6.16,
# 'Coverage': 2.43, 'Popularity': 7.72326}
#
# 3. UserCF实验
#
# Running time: 1456.9617431163788
#
# Average Result (M=8, K=5, N=10):
# {'Precision': 16.89, 'Recall': 8.11,
# 'Coverage': 52.09, 'Popularity': 6.8192915}
#
# Running time: 1416.0529160499573
#
# Average Result (M=8, K=10, N=10):
# {'Precision': 20.46, 'Recall': 9.83,
# 'Coverage': 41.64, 'Popularity': 6.979140375}
#
# Running time: 1463.8790090084076
#
# Average Result (M=8, K=20, N=10):
# {'Precision': 22.99, 'Recall': 11.04,
# 'Coverage': 32.78, 'Popularity': 7.102363}
#
# Running time: 1540.0677690505981
#
# Average Result (M=8, K=40, N=10):
# {'Precision': 24.54, 'Recall': 11.78,
# 'Coverage': 25.89, 'Popularity': 7.20221475}
#
# Running time: 1643.4831750392914
#
# Average Result (M=8, K=80, N=10):
# {'Precision': 25.11, 'Recall': 12.06,
# 'Coverage': 20.25, 'Popularity': 7.288118125}
#
# Running time: 1891.5019328594208
#
# Average Result (M=8, K=160, N=10):
# {'Precision': 24.81, 'Recall': 11.91,
# 'Coverage': 15.39, 'Popularity': 7.367559}
#
# 4. UserIIF实验
#
# Running time: 3006.6924328804016
#
# Average Result (M=8, K=80, N=10):
# {'Precision': 25.22, 'Recall': 12.11,
# 'Coverage': 21.32, 'Popularity': 7.258887}
|
StarcoderdataPython
|
115091
|
import enpix
import numpy as np
matrix = np.random.rand(341,765,3)
# print(matrix)
key="firstname.lastname@<EMAIL>.com-nameofuser-mobilenumber"
time=1000000
pic = enpix.encrypt(matrix,key,time)
# print(pic)
pic2 = enpix.decrypt(pic,key,time)
# print(pic2)
print((matrix==pic2).all())
|
StarcoderdataPython
|
3228877
|
from asyncio import open_connection
import json
numbers = [2, 3, 5, 7, 11, 13]
filename = 'chapter_10/numbers.json'
with open(filename, 'w') as f_object:
json.dump(numbers, f_object)
|
StarcoderdataPython
|
3203287
|
from gym.envs.registration import register
# Pybullet environment + fixed goal + gym environment
register(
id='widowx_reacher-v1',
entry_point='widowx_env.envs.1_widowx_pybullet_fixed_gymEnv:WidowxEnv',
max_episode_steps=100)
# Pybullet environment + fixed goal + goal environment
register(
id='widowx_reacher-v2',
entry_point='widowx_env.envs.2_widowx_pybullet_fixed_goalEnv:WidowxEnv',
max_episode_steps=100)
# Pybullet environment + random goal + gym environment
register(
id='widowx_reacher-v3',
entry_point='widowx_env.envs.3_widowx_pybullet_random_gymEnv:WidowxEnv',
max_episode_steps=100)
# Pybullet environment + random goal + goal environment
register(
id='widowx_reacher-v4',
entry_point='widowx_env.envs.4_widowx_pybullet_random_goalEnv:WidowxEnv',
max_episode_steps=100)
# # Pybullet environment + fixed goal + gym environment + obs2
# register(id='widowx_reacher-v2',
# entry_point='widowx_env.envs.1_widowx_pybullet_fixed_gymEnv_obs2:WidowxEnv',
# max_episode_steps=100
# )
# # Pybullet environment + fixed goal + gym environment + obs3
# register(id='widowx_reacher-v3',
# entry_point='widowx_env.envs.1_widowx_pybullet_fixed_gymEnv_obs3:WidowxEnv',
# max_episode_steps=100
# )
# # Pybullet environment + fixed goal + gym environment + obs4
# register(id='widowx_reacher-v4',
# entry_point='widowx_env.envs.1_widowx_pybullet_fixed_gymEnv_obs4:WidowxEnv',
# max_episode_steps=100
# )
# # Pybullet environment + fixed goal + gym environment + obs5
# register(id='widowx_reacher-v5',
# entry_point='widowx_env.envs.1_widowx_pybullet_fixed_gymEnv_obs5:WidowxEnv',
# max_episode_steps=100
# )
# #############
# # Pybullet environment + fixed goal + gym environment + reward 2
# register(id='widowx_reacher-v9',
# entry_point='widowx_env.envs.5_widowx_pybullet_fixed_gymEnv_reward2:WidowxEnv',
# max_episode_steps=100
# )
|
StarcoderdataPython
|
9626289
|
<reponame>SaVoAMP/stumpy
import numpy as np
import numpy.testing as npt
from stumpy import aamp_stimp, aamp_stimped
from dask.distributed import Client, LocalCluster
import pytest
import naive
T = [
np.array([584, -11, 23, 79, 1001, 0, -19], dtype=np.float64),
np.random.uniform(-1000, 1000, [64]).astype(np.float64),
]
n = [9, 10, 16]
@pytest.fixture(scope="module")
def dask_cluster():
cluster = LocalCluster(n_workers=2, threads_per_worker=2)
yield cluster
cluster.close()
@pytest.mark.parametrize("T", T)
def test_aamp_stimp_1_percent(T):
threshold = 0.2
percentage = 0.01
min_m = 3
n = T.shape[0] - min_m + 1
seed = np.random.randint(100000)
np.random.seed(seed)
pan = aamp_stimp(
T,
min_m=min_m,
max_m=None,
step=1,
percentage=percentage,
pre_scraamp=True,
)
for i in range(n):
pan.update()
ref_PAN = np.full((pan.M_.shape[0], T.shape[0]), fill_value=np.inf)
np.random.seed(seed)
for idx, m in enumerate(pan.M_[:n]):
zone = int(np.ceil(m / 4))
s = zone
tmp_P, tmp_I = naive.prescraamp(T, m, T, s=s, exclusion_zone=zone)
ref_mp = naive.scraamp(T, m, T, percentage, zone, True, s)
for i in range(ref_mp.shape[0]):
if tmp_P[i] < ref_mp[i, 0]:
ref_mp[i, 0] = tmp_P[i]
ref_mp[i, 1] = tmp_I[i]
ref_PAN[pan._bfs_indices[idx], : ref_mp.shape[0]] = ref_mp[:, 0]
# Compare raw pan
cmp_PAN = pan._PAN
naive.replace_inf(ref_PAN)
naive.replace_inf(cmp_PAN)
npt.assert_almost_equal(ref_PAN, cmp_PAN)
# Compare transformed pan
cmp_pan = pan.PAN_
ref_pan = naive.transform_pan(
pan._PAN,
pan._M,
threshold,
pan._bfs_indices,
pan._n_processed,
np.min(T),
np.max(T),
)
naive.replace_inf(ref_pan)
naive.replace_inf(cmp_pan)
npt.assert_almost_equal(ref_pan, cmp_pan)
@pytest.mark.parametrize("T", T)
def test_aamp_stimp_max_m(T):
threshold = 0.2
percentage = 0.01
min_m = 3
max_m = 5
n = T.shape[0] - min_m + 1
seed = np.random.randint(100000)
np.random.seed(seed)
pan = aamp_stimp(
T,
min_m=min_m,
max_m=max_m,
step=1,
percentage=percentage,
pre_scraamp=True,
)
for i in range(n):
pan.update()
ref_PAN = np.full((pan.M_.shape[0], T.shape[0]), fill_value=np.inf)
np.random.seed(seed)
for idx, m in enumerate(pan.M_[:n]):
zone = int(np.ceil(m / 4))
s = zone
tmp_P, tmp_I = naive.prescraamp(T, m, T, s=s, exclusion_zone=zone)
ref_mp = naive.scraamp(T, m, T, percentage, zone, True, s)
for i in range(ref_mp.shape[0]):
if tmp_P[i] < ref_mp[i, 0]:
ref_mp[i, 0] = tmp_P[i]
ref_mp[i, 1] = tmp_I[i]
ref_PAN[pan._bfs_indices[idx], : ref_mp.shape[0]] = ref_mp[:, 0]
# Compare raw pan
cmp_PAN = pan._PAN
naive.replace_inf(ref_PAN)
naive.replace_inf(cmp_PAN)
npt.assert_almost_equal(ref_PAN, cmp_PAN)
# Compare transformed pan
cmp_pan = pan.PAN_
ref_pan = naive.transform_pan(
pan._PAN,
pan._M,
threshold,
pan._bfs_indices,
pan._n_processed,
np.min(T),
np.max(T),
)
naive.replace_inf(ref_pan)
naive.replace_inf(cmp_pan)
npt.assert_almost_equal(ref_pan, cmp_pan)
@pytest.mark.parametrize("T", T)
def test_aamp_stimp_100_percent(T):
threshold = 0.2
percentage = 1.0
min_m = 3
n = T.shape[0] - min_m + 1
pan = aamp_stimp(
T,
min_m=min_m,
max_m=None,
step=1,
percentage=percentage,
pre_scraamp=True,
)
for i in range(n):
pan.update()
ref_PAN = np.full((pan.M_.shape[0], T.shape[0]), fill_value=np.inf)
for idx, m in enumerate(pan.M_[:n]):
zone = int(np.ceil(m / 4))
ref_mp = naive.aamp(T, m, T_B=None, exclusion_zone=zone)
ref_PAN[pan._bfs_indices[idx], : ref_mp.shape[0]] = ref_mp[:, 0]
# Compare raw pan
cmp_PAN = pan._PAN
naive.replace_inf(ref_PAN)
naive.replace_inf(cmp_PAN)
npt.assert_almost_equal(ref_PAN, cmp_PAN)
# Compare transformed pan
cmp_pan = pan.PAN_
ref_pan = naive.transform_pan(
pan._PAN,
pan._M,
threshold,
pan._bfs_indices,
pan._n_processed,
np.min(T),
np.max(T),
)
naive.replace_inf(ref_pan)
naive.replace_inf(cmp_pan)
npt.assert_almost_equal(ref_pan, cmp_pan)
@pytest.mark.filterwarnings("ignore:numpy.dtype size changed")
@pytest.mark.filterwarnings("ignore:numpy.ufunc size changed")
@pytest.mark.filterwarnings("ignore:numpy.ndarray size changed")
@pytest.mark.filterwarnings("ignore:\\s+Port 8787 is already in use:UserWarning")
@pytest.mark.parametrize("T", T)
def test_aamp_stimped(T, dask_cluster):
with Client(dask_cluster) as dask_client:
threshold = 0.2
min_m = 3
n = T.shape[0] - min_m + 1
pan = aamp_stimped(
dask_client,
T,
min_m=min_m,
max_m=None,
step=1,
)
for i in range(n):
pan.update()
ref_PAN = np.full((pan.M_.shape[0], T.shape[0]), fill_value=np.inf)
for idx, m in enumerate(pan.M_[:n]):
zone = int(np.ceil(m / 4))
ref_mp = naive.aamp(T, m, T_B=None, exclusion_zone=zone)
ref_PAN[pan._bfs_indices[idx], : ref_mp.shape[0]] = ref_mp[:, 0]
# Compare raw pan
cmp_PAN = pan._PAN
naive.replace_inf(ref_PAN)
naive.replace_inf(cmp_PAN)
npt.assert_almost_equal(ref_PAN, cmp_PAN)
# Compare transformed pan
cmp_pan = pan.PAN_
ref_pan = naive.transform_pan(
pan._PAN,
pan._M,
threshold,
pan._bfs_indices,
pan._n_processed,
np.min(T),
np.max(T),
)
naive.replace_inf(ref_pan)
naive.replace_inf(cmp_pan)
npt.assert_almost_equal(ref_pan, cmp_pan)
|
StarcoderdataPython
|
320191
|
<filename>metric-collector/service-readiness/kubeflow-readiness.py<gh_stars>1-10
import argparse
from time import sleep, time
import logging
import google.auth
import google.auth.app_engine
import google.auth.compute_engine.credentials
import google.auth.iam
from google.auth.transport.requests import Request
import google.oauth2.credentials
import google.oauth2.service_account
from prometheus_client import start_http_server, Gauge
import requests
from kubernetes import client, config
from kubernetes.client import V1Event, V1ObjectMeta
IAM_SCOPE = 'https://www.googleapis.com/auth/iam'
OAUTH_TOKEN_URI = 'https://www.googleapis.com/oauth2/v4/token'
METHOD = 'GET'
KUBEFLOW_AVAILABILITY = Gauge('kubeflow_availability', 'Signal of whether IAP protected kubeflow is available')
def metric_update(args, google_open_id_connect_token):
resp = requests.request(
METHOD, args.url,
headers={'Authorization': 'Bearer {}'.format(
google_open_id_connect_token)})
if resp.status_code == 200:
KUBEFLOW_AVAILABILITY.set(1)
return 1
else:
KUBEFLOW_AVAILABILITY.set(0)
return 0
def main(unparsed_args=None):
parser = argparse.ArgumentParser(
description="Output signal of kubeflow service readiness.")
parser.add_argument(
"--url",
default="",
type=str,
help="kubeflow IAP-protected url")
parser.add_argument(
"--client_id",
default="",
type=str,
help="Service account json credential file")
args = parser.parse_args(args=unparsed_args)
if args.url == "" or args.client_id == "":
logging.info("Url or client_id is empty, exit")
return
# Figure out what environment we're running in and get some preliminary
# information about the service account.
credentials, _ = google.auth.default(
scopes=[IAM_SCOPE])
if isinstance(credentials,
google.oauth2.credentials.Credentials):
raise Exception('make_iap_request is only supported for service '
'accounts.')
# For service account's using the Compute Engine metadata service,
# service_account_email isn't available until refresh is called.
credentials.refresh(Request())
signer_email = credentials.service_account_email
if isinstance(credentials,
google.auth.compute_engine.credentials.Credentials):
# Since the Compute Engine metadata service doesn't expose the service
# account key, we use the IAM signBlob API to sign instead.
# In order for this to work:
#
# 1. Your VM needs the https://www.googleapis.com/auth/iam scope.
# You can specify this specific scope when creating a VM
# through the API or gcloud. When using Cloud Console,
# you'll need to specify the "full access to all Cloud APIs"
# scope. A VM's scopes can only be specified at creation time.
#
# 2. The VM's default service account needs the "Service Account Actor"
# role. This can be found under the "Project" category in Cloud
# Console, or roles/iam.serviceAccountActor in gcloud.
signer = google.auth.iam.Signer(
Request(), credentials, signer_email)
else:
# A Signer object can sign a JWT using the service account's key.
signer = credentials.signer
# Construct OAuth 2.0 service account credentials using the signer
# and email acquired from the bootstrap credentials.
service_account_credentials = google.oauth2.service_account.Credentials(
signer, signer_email, token_uri=OAUTH_TOKEN_URI, additional_claims={
'target_audience': args.client_id
})
token_refresh_time = 0
last_status = -1
config.load_incluster_config()
coreApi = client.CoreV1Api()
while True:
if time() > token_refresh_time:
# service_account_credentials gives us a JWT signed by the service
# account. Next, we use that to obtain an OpenID Connect token,
# which is a JWT signed by Google.
google_open_id_connect_token = get_google_open_id_connect_token(
service_account_credentials)
token_refresh_time = time() + 1800
url_status = metric_update(args, google_open_id_connect_token)
if url_status != last_status:
last_status = url_status
# get service centraldashboard, attach event to it.
svcs = coreApi.list_namespaced_service('kubeflow', label_selector="app=centraldashboard")
while len(svcs.to_dict()['items']) == 0:
logging.info("Service centraldashboard not ready...")
sleep(10)
svcs = coreApi.list_namespaced_service('kubeflow', label_selector="app=centraldashboard")
uid = svcs.to_dict()['items'][0]['metadata']['uid']
kf_status = "up" if url_status == 1 else "down"
new_event = V1Event(
action="Kubeflow service status update: " + kf_status,
api_version="v1",
kind="Event",
message="Service " + kf_status + "; service url: " + args.url,
reason="Kubeflow Service is " + kf_status,
involved_object=client.V1ObjectReference(
api_version="v1",
kind="Service",
name="centraldashboard",
namespace="kubeflow",
uid=uid
),
metadata=V1ObjectMeta(
generate_name='kubeflow-service.',
),
type="Normal"
)
event = coreApi.create_namespaced_event("kubeflow", new_event)
print("New status event created. action='%s'" % str(event.action))
# Update status every 10 sec
sleep(10)
def get_google_open_id_connect_token(service_account_credentials):
"""Get an OpenID Connect token issued by Google for the service account.
This function:
1. Generates a JWT signed with the service account's private key
containing a special "target_audience" claim.
2. Sends it to the OAUTH_TOKEN_URI endpoint. Because the JWT in #1
has a target_audience claim, that endpoint will respond with
an OpenID Connect token for the service account -- in other words,
a JWT signed by *Google*. The aud claim in this JWT will be
set to the value from the target_audience claim in #1.
For more information, see
https://developers.google.com/identity/protocols/OAuth2ServiceAccount .
The HTTP/REST example on that page describes the JWT structure and
demonstrates how to call the token endpoint. (The example on that page
shows how to get an OAuth2 access token; this code is using a
modified version of it to get an OpenID Connect token.)
"""
service_account_jwt = (
service_account_credentials._make_authorization_grant_assertion())
request = google.auth.transport.requests.Request()
body = {
'assertion': service_account_jwt,
'grant_type': google.oauth2._client._JWT_GRANT_TYPE,
}
token_response = google.oauth2._client._token_endpoint_request(
request, OAUTH_TOKEN_URI, body)
return token_response['id_token']
if __name__ == '__main__':
start_http_server(8000)
main()
|
StarcoderdataPython
|
8161051
|
from battery.models import db, User, Entry, Comment
from flask import render_template, request, session, flash, redirect, url_for
from flask import g, jsonify, abort, Blueprint, current_app, send_file
from functools import wraps
from sqlalchemy.exc import IntegrityError
from werkzeug.utils import secure_filename
from datetime import datetime
from imghdr import test_png, test_jpeg, test_bmp, test_gif
from calendar import monthrange
from collections import defaultdict
import os
# There is no merit to use blueprint now
# I just don't want views.py depend on specific app object
bp = Blueprint("app", __name__)
def login_required(fn):
@wraps(fn)
def decorated_view(*args, **kwargs):
if g.user is None:
flash("You must log in first", "error")
return redirect(url_for("app.login", next=request.path))
return fn(*args, **kwargs)
return decorated_view
@bp.before_request
def load_user():
user_id = session.get("user_id")
if user_id is None:
g.user = None
else:
g.user = User.query.get(session["user_id"])
@bp.route("/")
def index():
entries = Entry.query.order_by(Entry.created_at.desc()).all()
return render_template("index.html", entries=entries)
@bp.route("/login", methods=["GET", "POST"])
def login():
if request.method == "GET":
return render_template("login.html")
username = request.form["username"]
password = request.form["password"]
if not username or not password:
flash("Username or password is empty", "error")
return render_template("login.html")
user, authenticated = User.authenticate(db.session.query,
request.form["username"],
request.form["password"])
if authenticated:
session["user_id"] = user.id
flash("You were logged in", "info")
return redirect(url_for("app.index"))
else:
flash("Invalid username or password", "error")
return render_template("login.html")
@bp.route("/logout")
def logout():
session.pop("user_id", None)
flash("You were logged out", "info")
return redirect(url_for("app.index"))
@bp.route("/entry/new/", methods=["GET", "POST"])
@login_required
def post_entry():
if request.method == "GET":
return render_template("editor.html", destination=url_for("app.post_entry"))
title = request.form["title"]
content = request.form["content"]
save_as_draft = request.form.get("save-as-draft", False)
if not title:
flash("Title is empty", "error")
return render_template("editor.html",
content=content,
destination=url_for("app.post_entry"))
entry = Entry(title=request.form["title"],
content=request.form["content"],
user_id=session["user_id"],
is_public=not save_as_draft)
db.session.add(entry)
db.session.commit()
return redirect(url_for("app.show_entry", entry_id=entry.id))
@bp.route("/entry/<int:entry_id>/edit/", methods=["GET", "POST"])
@login_required
def edit_entry(entry_id):
entry = Entry.query.get(entry_id)
if entry is None:
flash("Entry not found", "error")
return redirect(url_for("app.index"))
if g.user.id != entry.user_id:
flash("Entry not found", "error")
return redirect(url_for("app.index"))
if request.method == "GET":
return render_template("editor.html",
destination=url_for("app.edit_entry", entry_id=entry_id),
title=entry.title,
content=entry.content)
title = request.form["title"]
content = request.form["content"]
save_as_draft = request.form.get("save-as-draft", False)
if not title:
flash("Title is empty", "error")
return render_template("editor.html",
content=content,
destination=url_for("app.edit_entry", entry_id=entry_id))
entry.title = request.form["title"]
entry.content = request.form["content"]
entry.is_public = not save_as_draft
db.session.commit()
return redirect(url_for("app.show_entry", entry_id=entry.id))
@bp.route("/entry/<int:entry_id>/")
def show_entry(entry_id):
entry = Entry.query.get(entry_id)
if entry is None:
flash("Entry not found", "error")
return redirect(url_for("app.index"))
if not entry.is_public and (g.user is None or g.user.id != entry.user_id):
flash("Entry not found", "error")
return redirect(url_for("app.index"))
comments = Comment.query.filter_by(entry_id=entry_id)
return render_template("entry.html", entry=entry, comments=comments)
@bp.route("/entry/<int:entry_id>/delete/")
@login_required
def delete_entry(entry_id):
entry = Entry.query.get(entry_id)
if entry is None:
flash("Entry not found", "error")
return redirect(url_for("app.index"))
if g.user.id != entry.user_id:
flash("Entry not found", "error")
return redirect(url_for("app.index"))
db.session.delete(entry)
db.session.commit()
return redirect(url_for("app.index"))
@bp.route("/entry/<int:entry_id>/comment/", methods=["POST"])
def post_comment(entry_id):
author = request.form["author"]
if not author:
author = None
comment = Comment(author=author,
content=request.form["content"],
entry_id=entry_id)
db.session.add(comment)
db.session.commit()
return redirect(url_for("app.show_entry", entry_id=entry_id))
@bp.route("/entry/<int:entry_id>/comment/<int:comment_id>/delete/")
@login_required
def delete_comment(entry_id, comment_id):
entry = Entry.query.get(entry_id)
if entry is None:
flash("Entry not found", "error")
return redirect(url_for("app.index"))
if g.user.id != entry.user_id:
flash("Entry not found", "error")
return redirect(url_for("app.index"))
comment = Comment.query.get(comment_id)
if comment is None:
flash("Comment not found", "error")
else:
db.session.delete(comment)
db.session.commit()
return redirect(url_for("app.show_entry", entry_id=entry_id))
@bp.route("/entry/search/")
def search_entries():
q = request.args["q"]
entries = Entry.query.filter(Entry.content.contains(q))
return render_template("index.html", entries=entries)
@bp.route("/entry/preview/", methods=["POST"])
@login_required
def show_preview():
title = request.form["title"]
content = request.form["content"]
return render_template("preview.html", title=title, content=content)
@bp.route("/data/<string:file_name>/", methods=["GET"])
def dowload_img(file_name):
upload_dir = current_app.config["UPLOAD_DIR"]
file_path = os.path.join(upload_dir, secure_filename(file_name))
return send_file(file_path)
@bp.route("/data/upload/", methods=["GET", "POST"])
def upload_img():
upload_dir = current_app.config["UPLOAD_DIR"]
if request.method == "GET":
files = os.listdir(upload_dir)
return render_template("upload.html", files=files)
file = request.files["file"]
head = file.read(20)
valid_type = False
for test in (test_png, test_jpeg, test_bmp, test_gif):
# I don't know whether the 2nd arg is valid or not.
# these test function is defined at:
# https://hg.python.org/cpython/file/3.6/Lib/imghdr.py
valid_type = valid_type or test(head, None)
if not valid_type:
flash("Invalid file type", "error")
else:
file_name = datetime.now().strftime("%Y%m%d_%H%M%S_") + secure_filename(file.filename)
file.seek(0) # really need this?
file.save(os.path.join(upload_dir, file_name))
return redirect(url_for("app.upload_img"))
@bp.route("/data/delete/<string:file_name>/", methods=["GET"])
def delete_img(file_name):
upload_dir = current_app.config["UPLOAD_DIR"]
file_path = os.path.join(upload_dir, file_name)
os.remove(file_path)
return redirect(url_for("app.upload_img"))
@bp.route("/about/", methods=["GET"])
def about():
return render_template("about.html")
def calc_archive_range(year, month=None, day=None):
# set month range
start_month = end_month = month
if month is None:
start_month = 1
end_month = 12
# set day range
start_day = end_day = day
if day is None:
start_day = 1
end_day = 31 if month is None else monthrange(year, month)[1]
return (start_month, start_day, end_month, end_day)
@bp.route("/archive/", methods=["GET"])
def archive():
entries = Entry.query.filter(Entry.is_public).order_by(Entry.created_at).all()
if not entries:
return render_template("archive.html", n_entries={})
oldest_datetime = entries[0].created_at
newest_datetime = entries[-1].created_at
n_entries = defaultdict(lambda: [])
for year in range(newest_datetime.year, oldest_datetime.year-1, -1):
for month in range(12, 0, -1):
start_day = 1
_,end_day = monthrange(year, month)
start_datetime = datetime(year, month, start_day, 0, 0, 0, 0)
end_datetime = datetime(year, month, end_day, 23, 59, 59, 999)
n = Entry.query.filter(start_datetime <= Entry.created_at,
Entry.created_at <= end_datetime).count()
if n == 0:
continue
n_entries[year].append((month, n))
return render_template("archive.html", n_entries=n_entries)
@bp.route("/archive/<int:year>/<int:month>/<int:day>", methods=["GET"])
@bp.route("/archive/<int:year>/<int:month>", methods=["GET"])
@bp.route("/archive/<int:year>/", methods=["GET"])
def archive_with_datetime(year, month=None, day=None):
start_month, start_day, end_month, end_day = calc_archive_range(year, month, day)
# create datetime object
start_datetime = datetime(year, start_month, start_day, 0, 0, 0, 0)
end_datetime = datetime(year, end_month, end_day, 23, 59, 59, 999)
# find entries
entries = Entry.query.filter(start_datetime <= Entry.created_at,
Entry.created_at <= end_datetime,
Entry.is_public).all()
return render_template("index.html", entries=entries)
|
StarcoderdataPython
|
6567800
|
<reponame>Thom1729/st_package_reviewer<filename>st_package_reviewer/check/repo/check_tags.py
import re
from . import RepoChecker
class CheckSemverTags(RepoChecker):
def check(self):
if not self.semver_tags:
msg = "No semantic version tags found"
if not self.tags:
msg += " (no tags found at all)"
for tag in self.tags:
if re.search(r"(v|^)\d+\.\d+$", tag.name):
msg += " (semantic versions consist of exactly three numeric parts)"
break
self.fail(msg)
class CheckOnlyPrereleaseTags(RepoChecker):
def check(self):
if not self.semver_tags:
return
for sem_tag in self.semver_tags:
if sem_tag.version.prerelease is None:
break
else:
self.warn("Only found pre-release tags.")
|
StarcoderdataPython
|
390029
|
<reponame>catseye/Xoomonk
#!/usr/bin/env python
"""Reference interpreter for Xoomonk 1.0.
"""
from optparse import OptionParser
import re
import sys
DOLLAR_STORE = None
class XoomonkError(ValueError):
pass
class AST(object):
def __init__(self, type, children=None, value=None):
self.type = type
self.value = value
if children is not None:
self.children = children
else:
self.children = []
def add_child(self, item):
self.children.append(item)
def __repr__(self):
if self.value is None:
return 'AST(%r,%r)' % (self.type, self.children)
return 'AST(%r,value=%r)' % (self.type, self.value)
class Scanner(object):
"""A Scanner provides facilities for extracting successive
Xoomonk tokens from a string.
>>> a = Scanner(" {:= } foo ")
>>> a.token
'{'
>>> a.type
'operator'
>>> a.scan()
>>> a.on(":=")
True
>>> a.on_type('operator')
True
>>> a.check_type('identifier')
Traceback (most recent call last):
...
SyntaxError: Expected identifier, but found operator (':=')
>>> a.scan()
>>> a.consume(".")
False
>>> a.consume("}")
True
>>> a.expect("foo")
>>> a.type
'EOF'
>>> a.expect("bar")
Traceback (most recent call last):
...
SyntaxError: Expected 'bar', but found 'None'
"""
def __init__(self, text):
self.text = text
self.token = None
self.type = None
self.scan()
def scan_pattern(self, pattern, type, token_group=1, rest_group=2):
pattern = r'^(' + pattern + r')(.*?)$'
match = re.match(pattern, self.text, re.DOTALL)
if not match:
return False
else:
self.type = type
self.token = match.group(token_group)
self.text = match.group(rest_group)
return True
def scan(self):
self.scan_pattern(r'[ \t\n\r]*', 'whitespace')
if not self.text:
self.token = None
self.type = 'EOF'
return
if self.scan_pattern(r':=|\;|\{|\}|\*|\.|\$', 'operator'):
return
if self.scan_pattern(r'\d+', 'integer literal'):
return
if self.scan_pattern(r'\"(.*?)\"', 'string literal',
token_group=2, rest_group=3):
return
if self.scan_pattern(r'\w+', 'identifier'):
return
if self.scan_pattern(r'.', 'unknown character'):
return
else:
raise ValueError("this should never happen, self.text=(%s)" % self.text)
def expect(self, token):
if self.token == token:
self.scan()
else:
raise SyntaxError("Expected '%s', but found '%s'" %
(token, self.token))
def on(self, token):
return self.token == token
def on_type(self, type):
return self.type == type
def check_type(self, type):
if not self.type == type:
raise SyntaxError("Expected %s, but found %s ('%s')" %
(type, self.type, self.token))
def consume(self, token):
if self.token == token:
self.scan()
return True
else:
return False
# Parser
class Parser(object):
"""A Parser provides facilities for recognizing various
parts of a Xoomonk program based on Xoomonk's grammar.
>>> a = Parser("123")
>>> a.expr()
AST('IntLit',value=123)
>>> a = Parser("{ a := 123 }")
>>> a.expr()
AST('Block',[AST('Assignment',[AST('Ref',[AST('Identifier',value='a')]), AST('IntLit',value=123)])])
>>> a = Parser("a:=5 c:=4")
>>> a.program()
AST('Program',[AST('Assignment',[AST('Ref',[AST('Identifier',value='a')]), AST('IntLit',value=5)]), AST('Assignment',[AST('Ref',[AST('Identifier',value='c')]), AST('IntLit',value=4)])])
>>> a = Parser("a := { b := 1 }")
>>> a.program()
AST('Program',[AST('Assignment',[AST('Ref',[AST('Identifier',value='a')]), AST('Block',[AST('Assignment',[AST('Ref',[AST('Identifier',value='b')]), AST('IntLit',value=1)])])])])
"""
def __init__(self, text):
self.scanner = Scanner(text)
def program(self):
p = AST('Program')
while self.scanner.type != 'EOF':
p.add_child(self.stmt())
return p
def stmt(self):
if self.scanner.on("print"):
return self.print_stmt()
else:
return self.assign()
def assign(self):
r = self.ref()
self.scanner.expect(":=")
e = self.expr()
return AST('Assignment', [r, e])
def print_stmt(self):
s = None
self.scanner.expect("print")
if self.scanner.consume("string"):
self.scanner.check_type("string literal")
st = self.scanner.token
self.scanner.scan()
s = AST('PrintString', value=st)
elif self.scanner.consume("char"):
e = self.expr()
s = AST('PrintChar', [e])
else:
e = self.expr()
s = AST('Print', [e])
newline = True
if self.scanner.consume(";"):
newline = False
if newline:
s = AST('Newline', [s])
return s
def expr(self):
v = None
if self.scanner.on("{"):
v = self.block()
elif self.scanner.on_type('integer literal'):
v = AST('IntLit', value=int(self.scanner.token))
self.scanner.scan()
else:
v = self.ref()
if self.scanner.consume("*"):
v = AST('CopyOf', [v])
return v
def block(self):
b = AST('Block')
self.scanner.expect("{")
while not self.scanner.on("}"):
b.add_child(self.stmt())
self.scanner.expect("}")
return b
def ref(self):
r = AST('Ref')
r.add_child(self.name())
while self.scanner.consume("."):
r.add_child(self.name())
return r
def name(self):
if self.scanner.consume("$"):
return AST('Identifier', value='$')
else:
self.scanner.check_type("identifier")
id = self.scanner.token
self.scanner.scan()
return AST('Identifier', value=id)
# Runtime support for Xoomonk.
def demo(store):
print("demo!")
class MalingeringStore(object):
"""
>>> a = MalingeringStore(['a','b'], [], demo)
demo!
>>> a['a'] = 7
>>> print a['a']
7
>>> a['c'] = 7
Traceback (most recent call last):
...
XoomonkError: Attempt to assign undefined variable c
>>> a = MalingeringStore(['a','b'], ['a'], demo)
>>> a['a'] = 7
demo!
>>> a = MalingeringStore(['a','b'], ['b'], demo)
>>> a['b']
Traceback (most recent call last):
...
XoomonkError: Attempt to access unassigned variable b
"""
def __init__(self, variables, unassigned, fun):
self.dict = {}
self.variables = variables
for variable in self.variables:
self.dict[variable] = 0
self.unassigned = unassigned
self.fun = fun
if not self.unassigned:
self.run()
def run(self):
self.fun(self)
def copy(self):
new = MalingeringStore(
set(self.variables), set(self.unassigned), self.fun
)
new.dict = self.dict.copy()
return new
def __getitem__(self, name):
if name not in self.variables:
raise XoomonkError("Attempt to access undefined variable %s" % name)
if name in self.unassigned:
raise XoomonkError("Attempt to access unassigned variable %s" % name)
return self.dict[name]
def __setitem__(self, name, value):
if name not in self.variables:
raise XoomonkError("Attempt to assign undefined variable %s" % name)
if name in self.unassigned:
self.dict[name] = value
self.unassigned.remove(name)
if not self.unassigned:
self.run()
else:
# either the variable being set is assigned within the
# block, or the store is saturated, so go ahead
self.dict[name] = value
def __str__(self):
l = []
for name in sorted(self.variables):
if name in self.unassigned:
value = '?'
else:
value = self.dict[name]
l.append("%s=%s" % (name, value))
return '[%s]' % ','.join(l)
# Analysis
def find_used_variables(ast, s):
type = ast.type
if type == 'Program':
for child in ast.children:
find_used_variables(child, s)
elif type == 'Assignment':
find_used_variables(ast.children[1], s)
elif type == 'PrintChar':
find_used_variables(ast.children[1], s)
elif type == 'Print':
find_used_variables(ast.children[0], s)
elif type == 'Newline':
find_used_variables(ast.children[0], s)
elif type == 'Ref':
name = ast.children[0].value
if name != '$':
s.add(name)
elif type == 'Block':
for child in ast.children:
find_used_variables(child, s)
def find_assigned_variables(ast, s):
type = ast.type
if type == 'Program':
for child in ast.children:
find_assigned_variables(child, s)
elif type == 'Assignment':
name = ast.children[0].children[0].value
s.add(name)
elif type == 'Block':
for child in ast.children:
find_assigned_variables(child, s)
# Evaluation
def eval_xoomonk(ast, state):
type = ast.type
if type == 'Program':
for node in ast.children:
eval_xoomonk(node, state)
return 0
elif type == 'Assignment':
ref = ast.children[0]
store_to_use = state
num_children = len(ref.children)
if num_children > 1:
i = 0
while i <= num_children - 2:
name = ref.children[i].value
if name == '$':
store_to_use = DOLLAR_STORE
else:
store_to_use = store_to_use[name]
i += 1
name = ref.children[-1].value
if name == '$':
raise XoomonkError('Cannot assign to $')
value = eval_xoomonk(ast.children[1], state)
store_to_use[name] = value
return value
elif type == 'PrintString':
sys.stdout.write(ast.value)
elif type == 'PrintChar':
value = eval_xoomonk(ast.children[0], state)
sys.stdout.write(chr(value))
return 0
elif type == 'Print':
value = eval_xoomonk(ast.children[0], state)
sys.stdout.write(str(value))
return 0
elif type == 'Newline':
eval_xoomonk(ast.children[0], state)
sys.stdout.write('\n')
return 0
elif type == 'Ref':
store_to_use = state
num_children = len(ast.children)
if num_children > 1:
i = 0
while i <= num_children - 2:
name = ast.children[i].value
if name == '$':
store_to_use = DOLLAR_STORE
else:
store_to_use = store_to_use[name]
i += 1
name = ast.children[-1].value
if name == '$':
return DOLLAR_STORE
else:
try:
return store_to_use[name]
except KeyError as e:
raise XoomonkError('Attempt to access undefined variable %s' % name)
elif type == 'IntLit':
return ast.value
elif type == 'CopyOf':
value = eval_xoomonk(ast.children[0], state)
return value.copy()
elif type == 'Block':
# OK! What we need to do is to analyze the block to see what
# variables in it are assigned values in it.
# If all variables in the block are assigned values somewhere
# in the block, it is a saturated store, and we can evaluate
# the code in it immediately.
# If not, we create a MalingeringStore, and associate the
# code of the block with it. This object will cause the code
# of the block to be executed when the store finally does
# become saturated through assignments.
used_variables = set()
find_used_variables(ast, used_variables)
assigned_variables = set()
find_assigned_variables(ast, assigned_variables)
if assigned_variables >= used_variables:
return eval_block(ast, state)
else:
all_variables = used_variables | assigned_variables
unassigned_variables = used_variables - assigned_variables
store = MalingeringStore(
all_variables, unassigned_variables,
lambda self: eval_malingered_block(ast, self)
)
return store
else:
raise NotImplementedError("not an AST type I know: %s" % type)
def eval_block(block, enclosing_state):
state = {}
for child in block.children:
value = eval_xoomonk(child, state)
store = MalingeringStore(state.keys(), [], lambda store: store)
for varname in state:
store[varname] = state[varname]
return store
def eval_malingered_block(block, store):
for child in block.children:
value = eval_xoomonk(child, store)
return store
def open_dollar_store():
global DOLLAR_STORE
def add(store):
store['result'] = store['x'] + store['y']
return store
def sub(store):
store['result'] = store['x'] - store['y']
return store
def mul(store):
store['result'] = store['x'] * store['y']
return store
def div(store):
store['result'] = store['x'] // store['y']
return store
def gt(store):
if store['x'] > store['y']:
store['result'] = 1
else:
store['result'] = 0
def not_(store):
if store['x'] == 0:
store['result'] = 1
else:
store['result'] = 0
def if_(store):
if store['cond'] != 0:
store['then']['x'] = store['cond']
else:
store['else']['x'] = store['cond']
def loop(store):
done = False
while not done:
do = store['do'].copy()
do['x'] = 0
done = (do['continue'] == 0)
DOLLAR_STORE = {
'add': MalingeringStore(['x', 'y', 'result'], ['x', 'y'], add),
'sub': MalingeringStore(['x', 'y', 'result'], ['x', 'y'], sub),
'mul': MalingeringStore(['x', 'y', 'result'], ['x', 'y'], mul),
'div': MalingeringStore(['x', 'y', 'result'], ['x', 'y'], div),
'gt': MalingeringStore(['x', 'y', 'result'], ['x', 'y'], gt),
'not': MalingeringStore(['x', 'result'], ['x'], not_),
'if': MalingeringStore(['cond', 'then', 'else'],
['cond', 'then', 'else'], if_),
'loop':MalingeringStore(['do'], ['do'], loop),
}
def main(argv):
optparser = OptionParser(__doc__)
optparser.add_option("-a", "--show-ast",
action="store_true", dest="show_ast", default=False,
help="show parsed AST before evaluation")
optparser.add_option("-e", "--raise-exceptions",
action="store_true", dest="raise_exceptions",
default=False,
help="don't convert exceptions to error messages")
optparser.add_option("-t", "--test",
action="store_true", dest="test", default=False,
help="run test cases and exit")
(options, args) = optparser.parse_args(argv[1:])
if options.test:
import doctest
(fails, something) = doctest.testmod()
if fails == 0:
print("All tests passed.")
sys.exit(0)
else:
sys.exit(1)
file = open(args[0])
text = file.read()
file.close()
p = Parser(text)
ast = p.program()
if options.show_ast:
print(repr(ast))
open_dollar_store()
try:
result = eval_xoomonk(ast, {})
except XoomonkError as e:
if options.raise_exceptions:
raise
sys.stderr.write(str(e))
sys.stderr.write("\n")
sys.exit(1)
sys.exit(0)
if __name__ == "__main__":
main(sys.argv)
|
StarcoderdataPython
|
8038943
|
<gh_stars>0
import json
import datetime
from collections import OrderedDict
def thodar_form_alter(form, post, entity = None):
entitier = IN.entitier
texter = IN.texter
thodar_name = IN.APP.config.thodar['name']
s_thodar_name = s(thodar_name)
if entity:
current_entity_id = entity.id
current_entity_type = entity.__type__
else:
current_entity_id = 0
current_entity_type = 'Content'
set = form.add('FieldSet', {
'id' : 'thodarset',
'title' : s(thodar_name),
'css' : ['i-form-row'],
'weight' : 40,
})
db_thodar = False
parent_entity_type = ''
parent_entity_id = 0
weight = 0
level = 0
if entity:
try:
thodar_info = IN.thodar.get_thodar_info(current_entity_type, current_entity_id)
if thodar_info:
parent_entity_type = thodar_info['parent_entity_type']
parent_entity_id = thodar_info['parent_entity_id']
weight = thodar_info['weight']
level = thodar_info['level']
db_thodar = True
except Exception as e:
IN.logger.debug()
if post:
thodar = post.get('thodar', 0) == '1'
else:
thodar = db_thodar
option_entity = None
if thodar:
thodar_type = post.get('thodar_type', None)
thodarcontent = post.get('thodarcontent', None)
thodar_weight = post.get('thodar_weight', None)
if thodar_type is None:
if not entity:
thodar_type = 'new'
else:
if not parent_entity_type or not parent_entity_id:
thodar_type = 'new'
else:
try:
option_entity = IN.thodar.get_previous_entity(parent_entity_type, parent_entity_id, current_entity_type, current_entity_id)
if not option_entity:
# use parent
option_entity = entitier.load_single(parent_entity_type, parent_entity_id)
if option_entity:
thodar_type = 'sub'
else:
thodar_type = 'thodar'
except Exception as e:
IN.logger.debug()
elif thodar_type == 'sub' or thodar_type == 'thodar':
# use the form posted value if available
if thodarcontent and thodarcontent.isnumeric():
t_entity = entitier.load_single(current_entity_type, thodarcontent)
if t_entity:
option_entity = t_entity
# or use db value
if not option_entity and parent_entity_type and parent_entity_id:
if thodar_type == 'sub':
option_entity = entitier.load_single(parent_entity_type, parent_entity_id)
elif thodar_type == 'thodar':
option_entity = IN.thodar.get_previous_entity(parent_entity_type, parent_entity_id, current_entity_type, current_entity_id)
if thodar_type is None:
# something went wrong
thodar = False
set.add('CheckBox', {
'label' : s('{thodar} content?', {'thodar' : s_thodar_name}),
'id' : 'thodar',
'value' : 1, # returned value if checked
'checked' : thodar,
'info' : s('Check this if this content is a {thodar}.', {'thodar' : s_thodar_name}),
'weight' : 0,
'css' : ['ajax'],
'attributes' : {'data-ajax_partial' : 1},
})
form.ajax_elements.append('thodarset')
if thodar:
set = set.add('FieldSet', {
'id' : 'thodartypeset',
'css' : ['i-form-row i-margin-left'],
'weight' : 1,
})
options = OrderedDict()
options['new'] = {
'label' : s('This is a new {thodar}', {'thodar' : s_thodar_name}),
}
options['thodar'] = {
'label' : s('Add this as a continuity to another content'),
'info' : s('Part 3 may be a continuity to Part 2')
}
options['sub'] = {
'label' : s('Add this as a sub of another content'),
'info' : s('Part 3.1 may be a sub of Part 3')
}
set.add('RadioBoxes', {
'id' : 'thodar_type',
'name' : 'thodar_type',
'options' : options,
'value' : thodar_type,
'css' : ['i-width-1-1 i-form-large'],
'weight' : 1,
'child_additional_data' : {
'css' : ['ajax'],
'attributes' : {'data-ajax_partial' : 1},
}
})
if thodar_type != 'new':
set = set.add('FieldSet', {
'id' : 'thodarcontentset',
'css' : ['i-form-row i-margin-left'],
'weight' : 3,
})
if thodar_type == 'sub':
thodarcontent_info = s('The content will be added under the selected content.')
thodarcontent_title = s('Under which content?')
else:
thodarcontent_info = s('The content will be the continuity of the selected content.')
thodarcontent_title = s('Continuity of which content?')
thodarcontent_options = {}
thodarcontent_value = None
#if thodarcontent and thodarcontent.isnumeric():
#t_entity = entitier.load_single(current_entity_type, thodarcontent)
#if t_entity:
#option_entity = t_entity
if option_entity:
thodarcontent_value = option_entity.id
thodarcontent_options = {
option_entity.id : texter.format(entitier.entity_title(option_entity), 'nochange')
}
if entity:
extra = {
'content_type' : current_entity_type,
'nabar_id' : entity.nabar_id,
'exclude' : {
'content_type' : current_entity_type,
'entity_id' : current_entity_id,
}
}
else:
extra = {
'content_type' : current_entity_type,
}
set.add('HTMLSelect', {
'id' : 'thodarcontent',
'name' : 'thodarcontent',
'title' : thodarcontent_title,
'value' : thodarcontent_value,
'options' : thodarcontent_options,
'css' : ['autocomplete i-width-1-1'], # ajax
'multiple' : False,
'required' : True,
'validation_rule' : ['NotEmpty', 'Content is required.'],
'attributes' : {
#'data-ajax_partial' : 1,
'data-autocomplete_max_items' : 1,
'data-autocomplete_create' : '0',
'data-autocomplete_url' : ''.join(('/thodar/autocomplete')),
'data-autocomplete_url_data' : json.dumps(extra, skipkeys = True, ensure_ascii = False),
},
'info' : thodarcontent_info,
'weight' : 1,
})
if thodar_type == 'sub':
if weight == 0 and thodar_weight is None and parent_entity_type and parent_entity_id:
#'weight' # max weight + 1
cursor = IN.db.select({
'table' : 'entity.thodar',
'columns' : ['max(weight) as weight'],
'where' : [
['parent_entity_type', parent_entity_type],
['parent_entity_id', parent_entity_id],
],
}).execute()
if cursor.rowcount == 1:
thodar_weight = cursor.fetchone()['weight'] or -1
thodar_weight += 1
set.add('TextBoxNumber', {
'id' : 'thodar_weight',
'title' : s('Weight'),
'value' : thodar_weight or weight,
'css' : ['i-form-large'],
'weight' : 2,
'info' : s('In which position this content should be added?'),
})
def thodar_form_process_submit(form, post, entity):
''''''
try :
if form.has_errors:
return
thodar = form['thodarset']['thodar'].checked
db = IN.db
if not thodar:
# remove thodar
cursor = db.delete({
'table' : 'entity.thodar',
'where' : [
['entity_type', entity.__type__],
['entity_id', entity.id],
]
}).execute()
db.connection.commit()
return
parent_entity_id = 0
weight = 0
level = 0
thodar_type = form['thodarset']['thodartypeset']['thodar_type'].value
if thodar_type != 'new':
thodarcontent_id = form['thodarset']['thodartypeset']['thodarcontentset']['thodarcontent'].value
if not thodarcontent_id:
thodarcontent_id = 0
if thodar_type == 'sub':
weight = form['thodarset']['thodartypeset']['thodarcontentset']['thodar_weight'].value
if weight is None:
weight = 0
elif type(weight) is str:
if weight.isnumeric():
weight = int(weight)
else:
weight = 0
else:
thodarcontent_id = 0
# test if exists
cursor = IN.db.select({
'table' : 'entity.thodar',
'columns' : [
'parent_entity_type',
'parent_entity_id',
'weight',
'level',
],
'where' : [
['entity_type', entity.__type__],
['entity_id', entity.id],
],
}).execute()
if cursor.rowcount > 0:
# update
set = [
['parent_entity_type', entity.__type__],
]
if thodar_type == 'new':
set += [
['parent_entity_id', parent_entity_id],
['weight', weight],
['level', level],
]
elif thodar_type == 'sub':
parent_entity_id = thodarcontent_id
parent_thodar_info = IN.thodar.get_thodar_info(entity.__type__, parent_entity_id)
if parent_thodar_info:
level = parent_thodar_info['level'] + 1
set += [
['parent_entity_id', parent_entity_id], # sub of selected's
['weight', weight],
['level', level],
]
else:
thodar_thodar_info = IN.thodar.get_thodar_info(entity.__type__, thodarcontent_id)
if thodar_thodar_info:
parent_entity_id = thodar_thodar_info['parent_entity_id']
weight = thodar_thodar_info['weight'] + 1
level = thodar_thodar_info['level']
# if no parent, add as sub
if not parent_entity_id:
parent_entity_id = thodarcontent_id
level += 1
#'weight' # total child + 1
cursor = IN.db.select({
'table' : 'entity.thodar',
'columns' : ['max(weight) as weight'],
'where' : [
['parent_entity_type', entity.__type__],
['parent_entity_id', parent_entity_id],
],
}).execute()
if cursor.rowcount == 1:
weight = cursor.fetchone()['weight'] or -1
weight += 1
set += [
['parent_entity_id', parent_entity_id], # sub of selected's parent
['weight', weight],
['level', level],
]
cursor = IN.db.update({
'table' : 'entity.thodar',
'set' : set,
'where' : [
['entity_type', entity.__type__],
['entity_id', entity.id],
],
}).execute()
db.connection.commit()
else:
# insert
now = datetime.datetime.now()
values = [
entity.__type__, #'type',
now, #'created',
1, #'status',
entity.nabar_id, #'nabar_id',
entity.__type__, #'entity_type',
entity.id, #'entity_id',
entity.__type__, #'parent_entity_type',
]
if thodar_type == 'sub':
#'parent_entity_id',
parent_entity_id = thodarcontent_id
parent_thodar_info = IN.thodar.get_thodar_info(entity.__type__, parent_entity_id)
if parent_thodar_info:
#'level',
level = parent_thodar_info['level'] + 1
##'weight' # max weight + 1
#cursor = IN.db.select({
#'table' : 'entity.thodar',
#'columns' : ['max(weight) as weight'],
#'where' : [
#['parent_entity_type', entity.__type__],
#['parent_entity_id', parent_entity_id],
#],
#}).execute()
#if cursor.rowcount == 1:
#weight = cursor.fetchone()['weight'] or -1
#weight += 1
elif thodar_type == 'thodar':
thodar_thodar_info = IN.thodar.get_thodar_info(entity.__type__, thodarcontent_id)
if thodar_thodar_info:
#'parent_entity_id',
parent_entity_id = thodar_thodar_info['parent_entity_id']
#'weight'
weight = thodar_thodar_info['weight'] + 1
#'level',
level = thodar_thodar_info['level']
# if no parent, add as sub
if not parent_entity_id:
parent_entity_id = thodarcontent_id
level += 1
#'weight' # total child + 1
cursor = IN.db.select({
'table' : 'entity.thodar',
'columns' : ['max(weight) as weight'],
'where' : [
['parent_entity_type', entity.__type__],
['parent_entity_id', parent_entity_id],
],
}).execute()
if cursor.rowcount == 1:
weight = cursor.fetchone()['weight'] or -1
weight += 1
else:
# new
pass
values += [parent_entity_id, weight, level]
cursor = IN.db.insert({
'table' : 'entity.thodar',
'columns' : [
'type',
'created',
'status',
'nabar_id',
'entity_type',
'entity_id',
'parent_entity_type',
'parent_entity_id',
'weight',
'level',
]
}).execute([values])
db.connection.commit()
except Exception as e:
IN.logger.debug()
@IN.hook
def form_load_ContentAddForm(form, post, args):
thodar_form_alter(form, post)
@IN.hook
def form_load_ContentEditForm(form, post, args):
thodar_form_alter(form, post, form.entity)
@IN.hook
def form_process_ContentAddForm_validate(form, post):
''''''
@IN.hook
def form_process_ContentEditForm_validate(form, post):
''''''
@IN.hook
def form_process_ContentAddForm_submit(form, post):
''''''
entity = form.processed_data['entity']
thodar_form_process_submit(form, post, entity)
@IN.hook
def form_process_ContentEditForm_submit(form, post):
''''''
entity = form.processed_data['entity']
thodar_form_process_submit(form, post, entity)
|
StarcoderdataPython
|
12536
|
from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer
from client.config import config as c, language as l
from discord.ext import commands, tasks
from client.external.hiscores import hiscores_xp
from PIL import Image, ImageDraw, ImageFont
import discord, locale
class xp_tracker(commands.Cog):
# ▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬
name = 'xp_tracker'
# ▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬
@staticmethod
async def fun_xptracker(ctx):
try:
path = c.GUILD_PATH['special_member.json'].format(ctx.guild.id)
guild_l = await origin.get_language(ctx.guild.id)
target_keys = ['user_id', 'user_status']
target_values = [ctx.author.id, c.USER_PERMISSIONS['organizer']]
if await permissions.get_user_permission(path, target_keys, target_values) or ctx.author.id == ctx.guild.owner.id or ctx.author.id == c.CLIENT_ADMINISTRATION_ID:
if ctx.message.content == '.xptracker':
path3 = c.ORIGIN_PATH['embed.tracker.json']
json_string = await json_manager.get_json(path3)
new_json_string = {'data': []}
for key, value in json_string[guild_l]['xp_tracker']['tracker'].items():
if int(key) == 1:
new_json_string['data'].append({
'name{}'.format(key): value['name'],
'value{}'.format(key): value['value']
})
else:
new_json_string['data'].append({
'name{}'.format(key): value['name'],
'value{}'.format(key): value['value']
})
await embed_creator.create_embed(ctx, discord.Color.dark_green(), False, ctx.guild.icon_url, c.CLIENT_ICON, l.xp_tracker[guild_l]['embed_1'].format(ctx.guild.name), new_json_string['data'], False)
else:
await ctx.author.send(l.user_permissions[guild_l]['msg_restricted_1'])
except Exception as error:
await exception.error(error)
async def fun_addxpevent(self, ctx):
try:
path1 = c.GUILD_PATH['special_member.json'].format(ctx.guild.id)
guild_l = await origin.get_language(ctx.guild.id)
target_keys = ['user_id', 'user_status']
target_values = [ctx.author.id, c.USER_PERMISSIONS['organizer']]
if await permissions.get_user_permission(path1, target_keys, target_values) or ctx.author.id == ctx.guild.owner.id or ctx.author.id == c.CLIENT_ADMINISTRATION_ID:
STRING = str(ctx.message.content).split(' ')
if len(STRING) >= 9:
path = c.CLIENT_PATH['guild'] + str(ctx.guild.id) + c.CLIENT_JSON['server']
path2 = c.GUILD_PATH['event.json'].format(ctx.guild.id)
server_config = await json_manager.get_json(path)
LIST1 = self.PRE
LIST2 = self.NAME
LIST3 = self.ICON
DATA1 = await json_manager.get_data(path2)
ID = await origin.randomize()
STATUS = True
STATUS2 = False
while STATUS:
for data in DATA1:
if data['id'] == ID:
STATUS2 = True
if not STATUS2:
STATUS = False
else:
ID = await origin.randomize()
EXTRA = ''
NAME = ''
for value in LIST2:
if str(value).lower() == STRING[2].lower():
NAME = str(value)
for index, event in enumerate(LIST1):
if STRING[2] == event:
RUSH = None
if STRING[1].isdigit() and int(STRING[1]) > 1:
RUSH = l.xp_tracker[guild_l]['configuration']['rush_point'].format(locale.format_string('%d', int(STRING[1]), grouping=True))
path4 = c.ORIGIN_PATH['embed.tracker.json']
DESCRIPTION = l.xp_tracker[guild_l]['description_1'].format(
ctx.author.mention,
STRING[4], STRING[6], NAME, STRING[5] if not RUSH else l.xp_tracker[guild_l]['extra_4'], STRING[7] if not RUSH else l.xp_tracker[guild_l]['extra_4'], RUSH if RUSH else ''
)
if len(STRING) >= 8:
for value in STRING[8:]:
EXTRA += '{} '.format(value)
json_string = await json_manager.get_json(path4)
new_json_string = {'data': []}
for key, value in json_string[guild_l]['xp_tracker']['addevent'].items():
if int(key) == 1:
new_json_string['data'].append({
'name{}'.format(key): value['name'],
'value{}'.format(key): str(value['value']).format(EXTRA)
})
if int(key) == 2:
new_json_string['data'].append({
'name{}'.format(key): value['name'],
'value{}'.format(key): value['value']
})
if STRING[1].isdigit():
mode_type = 0
if int(STRING[1]) == c.EVENT_MODE[0]:
mode_type = 1
elif int(STRING[1]) >= c.EVENT_MODE[1]:
mode_type = 2
EVENT_CHANNEL = await discord_manager.get_channel(self.client, ctx.guild.id, server_config['events'])
embed = await embed_creator.create_embed(ctx, discord.Color.dark_green(), False, ctx.guild.icon_url, LIST3[index], l.xp_tracker[guild_l]['embed_2'].format(ctx.guild.name), new_json_string['data'], False, False, EVENT_CHANNEL, DESCRIPTION)
json_string = {'id': ID, 'user_id': ctx.author.id, 'message_id': embed.id, 'event_name': STRING[2], 'xp_target': int(STRING[1]), 'prize_count': int(STRING[3]), 'date_start': STRING[4], 'date_end': STRING[5], 'time_start': int(STRING[6]), 'time_end': int(STRING[7]), 'participants': 0, 'status': 0, 'type': mode_type, 'win_message': 0}
await json_manager.create(path2, json_string)
await ctx.author.send(l.xp_tracker[guild_l]['msg_success_1'])
CHANNEL1 = await discord_manager.get_channel(self.client, ctx.guild.id, server_config['chat0'])
CHANNEL2 = await discord_manager.get_channel(self.client, ctx.guild.id, server_config['chat1'])
if CHANNEL1:
await CHANNEL1.send(l.xp_tracker[guild_l]['msg_post_1'].format(NAME, server_config['events']))
if CHANNEL2:
await CHANNEL2.send(l.xp_tracker[guild_l]['msg_post_1'].format(NAME, server_config['events']))
else:
await ctx.author.send(l.xp_tracker[guild_l]['msg_badformat_1'])
else:
await ctx.author.send(l.xp_tracker[guild_l]['msg_badformat_1'])
else:
await ctx.author.send(l.user_permissions[guild_l]['msg_restricted_1'])
except Exception as error:
await exception.error(error)
@staticmethod
async def fun_removeallxp(ctx, system=None):
try:
guild_l = await origin.get_language(ctx.guild.id if hasattr(ctx, 'guild') else ctx)
path1 = c.GUILD_PATH['special_member.json'].format(ctx.guild.id if hasattr(ctx, 'guild') else ctx)
path2 = c.GUILD_PATH['tracker.json'].format(ctx.guild.id if hasattr(ctx, 'guild') else ctx)
path3 = c.GUILD_PATH['event.json'].format(ctx.guild.id if hasattr(ctx, 'guild') else ctx)
LIST1 = await json_manager.get_data(path3)
NEW_LIST1 = {'data': []}
NEW_LIST2 = {'data': []}
if hasattr(ctx, 'guild'):
target_keys = ['user_id', 'user_status']
target_values = [ctx.author.id, c.USER_PERMISSIONS['organizer']]
if await permissions.get_user_permission(path1, target_keys, target_values) or ctx.author.id == ctx.guild.owner.id or ctx.author.id == c.CLIENT_ADMINISTRATION_ID:
for data in LIST1:
if data['type'] == 0 and data['status'] == 0:
NEW_LIST2['data'].append(data)
elif data['type'] == 3 and data['status'] >= 0:
NEW_LIST2['data'].append(data)
elif data['type'] == 4 and data['status'] >= 0:
NEW_LIST2['data'].append(data)
await json_manager.clear_and_update(path2, NEW_LIST1)
await json_manager.clear_and_update(path3, NEW_LIST2)
await ctx.author.send(l.xp_tracker[guild_l]['msg_success_2'])
else:
await ctx.author.send(l.user_permissions[guild_l]['msg_restricted_1'])
elif system == 1:
if LIST1:
for data in LIST1:
if data['type'] == 0 and data['status'] == 0:
NEW_LIST2['data'].append(data)
elif data['type'] == 3 and data['status'] >= 0:
NEW_LIST2['data'].append(data)
elif data['type'] == 4 and data['status'] >= 0:
NEW_LIST2['data'].append(data)
await json_manager.clear_and_update(path2, NEW_LIST1)
await json_manager.clear_and_update(path3, NEW_LIST2)
except Exception as error:
await exception.error(error)
async def fun_axp(self, ctx):
try:
guild_l = await origin.get_language(ctx.guild.id)
STRING = str(ctx.message.content).split(' ')
if len(STRING) >= 2:
path = c.CLIENT_PATH['guild'] + str(ctx.guild.id) + c.CLIENT_JSON['server']
path1 = c.GUILD_PATH['tracker.json'].format(ctx.guild.id)
path2 = c.GUILD_PATH['event.json'].format(ctx.guild.id)
server_config = await json_manager.get_json(path)
LIST1 = await json_manager.get_data(path1)
LIST2 = await json_manager.get_data(path2)
CHECK = True
user = self.client.get_user(ctx.author.id)
STATUS1 = False
STATUS2 = False
EVENT_NAME = []
EVENT_LIST_DATA = []
SAFE_CHECK = 0
userName = ''
for name in STRING[1:]:
userName += '{} '.format(name)
userName = userName.replace('_', ' ')
userName = userName.rstrip()
for value in LIST1:
if value['user_id'] == ctx.author.id or value['user_rsn'] == userName:
STATUS1 = True
if not STATUS1:
for value2 in LIST2:
if value2['type'] == 1 or value2['type'] == 2:
STATUS2 = True
EVENT_NAME.append(value2['event_name'])
SUM = value2['participants'] + 1
EVENT_LIST_DATA.append({'id': value2['id'], 'type': value2['type'], 'sum': SUM})
if STATUS2:
while CHECK:
USERNAME = userName.replace(' ', '%20')
USER = hiscores_xp.Hiscores(USERNAME, 'N')
USERNAME = USERNAME.replace('%20', ' ')
if USER.status != 404:
if hasattr(USER, 'stats'):
CHECK = False
json_string = {'user_id': ctx.author.id, 'user_username': ctx.author.mention, 'user_rsn': userName}
for value in EVENT_NAME:
json_string.update({value: USER.stats[value]['experience']})
json_string.update({'{}_current'.format(value): USER.stats[value]['experience']})
await json_manager.create(path1, json_string)
for event_data in EVENT_LIST_DATA:
await json_manager.update(path2, 'id', event_data['id'], 'participants', event_data['sum'])
path4 = c.GUILD_PATH['{}.ini'.format(self.name)].format(ctx.guild.id)
role_id = await ini_manager.get_data('SECTION1', 'EVENT_ROLE', path4)
role = await discord_manager.get_role(self.client, ctx.guild.id, int(role_id))
if role:
user = await discord_manager.get_member(self.client, ctx.guild.id, ctx.author.id)
await user.add_roles(role, reason='{}'.format(c.DISCORD_MESSAGES['event_role_added']), atomic=True)
await ctx.send(l.xp_tracker[guild_l]['msg_1'].format(USERNAME, server_config['events']))
else:
SAFE_CHECK += 1
if SAFE_CHECK >= 10:
CHECK = False
await user.send(l.xp_tracker[guild_l]['msg_error_1'])
else:
CHECK = False
await user.send(l.xp_tracker[guild_l]['msg_error_1'])
else:
await user.send(l.xp_tracker[guild_l]['msg_2'])
else:
EVENT_STATUS = False
MEMBER_DATA = None
for MEMBER in LIST1:
if ctx.author.id == MEMBER['user_id']:
MEMBER_DATA = MEMBER
for EVENT in LIST2:
for key, value in MEMBER_DATA.items():
if (EVENT['type'] == 1 or EVENT['type'] == 2) and key == EVENT['event_name']:
EVENT_STATUS = True
if not EVENT_STATUS and (EVENT['type'] == 1 or EVENT['type'] == 2):
EVENT_STATUS = False
CHECK = True
while CHECK:
USERNAME = userName.replace(' ', '%20')
USER = hiscores_xp.Hiscores(USERNAME, 'N')
if USER.status != 404:
if hasattr(USER, 'stats'):
CHECK = False
target_keys = ['{}'.format(EVENT['event_name']), '{}_current'.format(EVENT['event_name'])]
target_values = [USER.stats[EVENT['event_name']]['experience'], USER.stats[EVENT['event_name']]['experience']]
await json_manager.update(path1, 'user_id', ctx.author.id, target_keys, target_values)
await user.send(l.xp_tracker[guild_l]['msg_6'].format(str(EVENT['event_name']).capitalize()))
for value2 in LIST2:
if value2['type'] == 1 or value2['type'] == 2:
EVENT_NAME.append(value2['event_name'])
SUM = value2['participants'] + 1
EVENT_LIST_DATA.append({'id': value2['id'], 'type': value2['type'], 'sum': SUM})
for event_data in EVENT_LIST_DATA:
await json_manager.update(path2, 'id', event_data['id'], 'participants', event_data['sum'])
else:
SAFE_CHECK += 1
if SAFE_CHECK >= 10:
CHECK = False
await user.send(l.xp_tracker[guild_l]['msg_error_1'])
else:
CHECK = False
await user.send(l.xp_tracker[guild_l]['msg_error_1'])
else:
EVENT_STATUS = False
await user.send(l.xp_tracker[guild_l]['msg_7'])
else:
await ctx.send(l.xp_tracker[guild_l]['msg_badformat_2'].format(ctx.author.mention))
except Exception as error:
await exception.error(error)
async def fun_xpupdate(self, ctx):
try:
guild_l = await origin.get_language(ctx.guild.id)
guild_t = await origin.get_region(ctx.guild.id)
path1 = c.GUILD_PATH['tracker.json'].format(ctx.guild.id)
path2 = c.GUILD_PATH['event.json'].format(ctx.guild.id)
LIST1 = await json_manager.get_data(path1)
LIST2 = await json_manager.get_data(path2)
CHECK = True
user = self.client.get_user(ctx.author.id)
guild_current = await server_timer.get_current_time(guild_t)
STATUS1 = False
STATUS2 = False
EVENT_NAME = []
SAFE_CHECK = 0
MEMBER = None
userName = ''
for value in LIST1:
if value['user_id'] == ctx.author.id:
STATUS1 = True
userName = value['user_rsn']
MEMBER = value
if STATUS1:
for value2 in LIST2:
if value2['type'] == 1 or value2['type'] == 2:
STATUS2 = True
EVENT_NAME.append(value2['event_name'])
if STATUS2:
while CHECK:
USERNAME = userName.replace(' ', '%20')
USER = hiscores_xp.Hiscores(USERNAME, 'N')
if USER.status != 404:
if hasattr(USER, 'stats'):
CHECK = False
for value in EVENT_NAME:
await json_manager.update(path1, 'user_id', ctx.author.id, '{}_current'.format(value), USER.stats[value]['experience'])
client_message = 'Guild id: {} | Event: {} | RSN: {} | Registration XP: {} | Current XP: {} | Guild time: {} | Status: {}'.format(ctx.guild.id, value, userName, MEMBER[value], USER.stats[value]['experience'], guild_current.strftime('%H:%M'), 'XP self update')
await console_interface.console_message('XP self update', client_message)
await user.send(l.xp_tracker[guild_l]['msg_success_4'])
else:
SAFE_CHECK += 1
if SAFE_CHECK >= 10:
CHECK = False
await user.send(l.xp_tracker[guild_l]['msg_error_3'])
else:
CHECK = False
await user.send(l.xp_tracker[guild_l]['msg_error_4'].format(userName))
else:
await user.send(l.xp_tracker[guild_l]['msg_2'])
else:
await user.send(l.xp_tracker[guild_l]['msg_error_5'])
except Exception as error:
await exception.error(error)
async def fun_xprank(self, ctx):
try:
guild_l = await origin.get_language(ctx.guild.id)
path = c.CLIENT_PATH['guild'] + str(ctx.guild.id) + c.CLIENT_JSON['server']
path1 = c.GUILD_PATH['{}.ini'.format(self.name)].format(ctx.guild.id)
path2 = c.GUILD_PATH['event.json'].format(ctx.guild.id)
path3 = c.GUILD_PATH['tracker.json'].format(ctx.guild.id)
ini = await ini_manager.get_ini(path1)
LIST1 = self.PNG
LIST2 = self.PRE
INFO_PANEL_IMAGE = self.INFO_PANEL_IMAGE
INFO_PANEL_FIRST_IMAGE = self.INFO_PANEL_FIRST_IMAGE
INFO_PANEL_SECOND_IMAGE = self.INFO_PANEL_SECOND_IMAGE
INFO_PANEL_THIRD_IMAGE = self.INFO_PANEL_THIRD_IMAGE
COLOR_PLACE_FIRST = (255, 30, 215)
COLOR_PLACE_SECOND = (0, 174, 255)
COLOR_PLACE_THIRD = (255, 31, 31)
COLOR_PLACE_DEFAULT = (0, 239, 0)
FONT_PATH = self.FONT_PATH
INFO_PANEL_OBJECT = None
RANK = 0
sum = None
CHANNEL_PERMISSIONS = int(ini['CHANNEL_PERMISSIONS']['STATUS'])
server_config = await json_manager.get_json(path)
CHANNEL_STATUS = True
if CHANNEL_PERMISSIONS == 1:
pass
else:
if ctx.message.channel.id == server_config['chat0']:
CHANNEL_STATUS = False
if CHANNEL_STATUS:
STRING = str(ctx.message.content).split(' ')
def get_id(data_value):
return int(data_value.get('sum'))
if len(STRING) == 1:
user = self.client.get_user(ctx.author.id)
else:
DCID = await origin.find_and_replace(STRING[1])
user = self.client.get_user(DCID)
TEMP_DATA = await json_manager.get_data(path2)
DATA1 = []
DATA2 = await json_manager.get_data(path3)
DATA3 = []
STATUS = None
for value in TEMP_DATA:
if value['type'] == 1 or value['type'] == 2:
DATA1.append(value)
if DATA1:
for index, data in enumerate(DATA1):
if DATA2:
for index2, data2 in enumerate(DATA2):
for key, value in data2.items():
if str(data['event_name']) == str(key):
sum = data2['{}_current'.format(key)] - data2[key]
DATA3.append({'user_rsn': data2['user_rsn'], 'user_id': data2['user_id'], 'sum': sum})
for index3, value3 in enumerate(LIST2):
if str(value3) == str(key):
INFO_PANEL_OBJECT = LIST1[index3]
DATA3.sort(key=get_id, reverse=True)
for index3, data3 in enumerate(DATA3):
RANK += 1
if RANK == 1:
PLACE_IMAGE = INFO_PANEL_FIRST_IMAGE
PLACE_COLOR = COLOR_PLACE_FIRST
elif RANK == 2:
PLACE_IMAGE = INFO_PANEL_SECOND_IMAGE
PLACE_COLOR = COLOR_PLACE_SECOND
elif RANK == 3:
PLACE_IMAGE = INFO_PANEL_THIRD_IMAGE
PLACE_COLOR = COLOR_PLACE_THIRD
else:
PLACE_IMAGE = INFO_PANEL_IMAGE
PLACE_COLOR = COLOR_PLACE_DEFAULT
if hasattr(user, 'id'):
if user.id == data3['user_id']:
with Image.open(PLACE_IMAGE).convert('RGBA') as im:
with Image.open(INFO_PANEL_OBJECT).convert('RGBA') as im2:
size1 = im.size
size2 = im2.size
y = int(size1[1] / 2) - int(size2[1] / 2)
im.paste(im2, (18, y), im2)
draw = ImageDraw.Draw(im)
font = ImageFont.truetype(FONT_PATH, 16)
draw.text((50, y - 12), l.xp_tracker[guild_l]['configuration']['rsn'], PLACE_COLOR, font=font)
draw.text((50, y + 2), l.xp_tracker[guild_l]['configuration']['rank'], PLACE_COLOR, font=font)
draw.text((50, y + 18), l.xp_tracker[guild_l]['configuration']['xp'], PLACE_COLOR, font=font)
draw.text((110 if guild_l == 'LT' else 95, y - 12), '{}'.format(data3['user_rsn']), (255, 255, 255), font=font)
draw.text((130 if guild_l == 'LT' else 100, y + 2), '{}'.format(RANK), (255, 255, 255), font=font)
draw.text((98 if guild_l == 'LT' else 70, y + 18), '{} XP'.format(locale.format_string('%d', data3['sum'], grouping=True)), (255, 255, 255), font=font)
TEMP_FILE = '{}_{}_{}.png'.format(data3['user_rsn'], data['event_name'], sum)
im.save(TEMP_FILE, 'PNG')
rank = open(TEMP_FILE, 'rb')
await ctx.send(file=discord.File(rank))
rank.close()
await file_manager.delete_file(TEMP_FILE)
STATUS = True
if not STATUS:
await ctx.send(l.xp_tracker[guild_l]['msg_error_6'].format(ctx.author.mention))
RANK = 0
DATA3.clear()
else:
await ctx.send(l.xp_tracker[guild_l]['msg_4'])
else:
await ctx.send(l.xp_tracker[guild_l]['msg_5'])
else:
await ctx.send(l.module_permissions[guild_l]['msg_restricted'])
except Exception as error:
await exception.error(error)
async def fun_xpstats(self, ctx):
try:
guild_l = await origin.get_language(ctx.guild.id)
path = c.CLIENT_PATH['guild'] + str(ctx.guild.id) + c.CLIENT_JSON['server']
server_config = await json_manager.get_json(path)
path1 = c.GUILD_PATH['{}.ini'.format(self.name)].format(ctx.guild.id)
ini = await ini_manager.get_ini(path1)
CHANNEL_PERMISSIONS = int(ini['CHANNEL_PERMISSIONS']['STATUS'])
CHANNEL_STATUS = True
if CHANNEL_PERMISSIONS == 1:
pass
else:
if ctx.message.channel.id == server_config['chat0']:
CHANNEL_STATUS = False
if CHANNEL_STATUS:
path2 = c.GUILD_PATH['event.json'].format(ctx.guild.id)
path3 = c.GUILD_PATH['tracker.json'].format(ctx.guild.id)
LIST1 = self.ICON
LIST2 = self.PRE
IMAGE = None
EVENT_NAME = None
await origin.get_locale()
TEMP_DATA = await json_manager.get_data(path2)
DATA1 = []
DATA2 = await json_manager.get_data(path3)
DATA3 = []
for value in TEMP_DATA:
if value['type'] == 1 or value['type'] == 2:
DATA1.append(value)
def get_id(INFO):
return int(INFO.get('sum'))
if DATA1:
for data1 in DATA1:
if DATA2:
for data2 in DATA2:
for key, value in data2.items():
if str(key) == str(data1['event_name']):
sum = data2['{}_current'.format(key)]-data2[key]
DATA3.append({'user_username': data2['user_username'], 'user_rsn': data2['user_rsn'], 'sum': sum})
if data1['type'] == 1:
EVENT_NAME = '{} [ S ]'.format(str(data1['event_name']).capitalize())
if data1['type'] == 2:
EVENT_NAME = '{} [ R ]'.format(str(data1['event_name']).capitalize())
for index, value3 in enumerate(LIST2):
if str(value3) == str(key):
IMAGE = LIST1[index]
DATA3.sort(key=get_id, reverse=True)
path4 = c.ORIGIN_PATH['embed.tracker.json']
json_string = await json_manager.get_json(path4)
new_json_string = {'data': []}
STRING = ''
SUM = 0
for key, value in json_string[guild_l]['xp_tracker']['stats'].items():
if DATA3:
if int(key) == 1:
for index, data in enumerate(DATA3):
index = index + 1
if index <= 10:
if index == 1:
title = ':first_place: {}'.format(l.DISCORD_TOP[guild_l][index - 1])
elif index == 2:
title = ':second_place: {}'.format(l.DISCORD_TOP[guild_l][index - 1])
elif index == 3:
title = ':third_place: {}'.format(l.DISCORD_TOP[guild_l][index - 1])
else:
title = '{}'.format(l.DISCORD_TOP[guild_l][index - 1])
STRING += l.xp_tracker[guild_l]['configuration']['current_xp'].format(title, data['user_username'], data['user_rsn'], locale.format_string('%d', data['sum'], grouping=True))
SUM += data['sum']
STRING += l.xp_tracker[guild_l]['configuration']['total_xp'].format(locale.format_string('%d', SUM, grouping=True))
new_json_string['data'].append({
'name{}'.format(key): value['name'].format('\u200D'),
'value{}'.format(key): str(value['value']).format(STRING)
})
else:
STRING += l.xp_tracker[guild_l]['configuration']['total_xp'].format(locale.format_string('%d', SUM, grouping=True))
new_json_string['data'].append({
'name{}'.format(key): value['name'],
'value{}'.format(key): str(value['value']).format(ctx.guild.name)
})
await embed_creator.create_embed(ctx, discord.Color.dark_green(), False, ctx.guild.icon_url, IMAGE, l.xp_tracker[guild_l]['embed_3'].format(ctx.guild.name, EVENT_NAME), new_json_string['data'], False)
DATA3.clear()
else:
await ctx.send(l.xp_tracker[guild_l]['msg_4'])
else:
await ctx.send(l.xp_tracker[guild_l]['msg_5'])
else:
await ctx.send(l.module_permissions[guild_l]['msg_restricted'])
except Exception as error:
await exception.error(error)
# ▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬
@tasks.loop(count=1)
async def variable_init(self):
try:
path_global = c.GUILD_PATH['{}_g.ini'.format(self.name)]
ini = await ini_manager.get_ini(path_global)
self.PRE = await json_manager.get_ini_list(path_global, 'CONSTANT2', 'PRE')
self.NAME = await json_manager.get_ini_list(path_global, 'CONSTANT1', 'NAME')
self.ICON = await json_manager.get_ini_list(path_global, 'CONSTANT3', 'ICON')
self.PNG = await json_manager.get_ini_list(path_global, 'CONSTANT5', 'PNG')
self.INFO_PANEL_IMAGE = ini['CONSTANT5']['INFO_PANEL']
self.INFO_PANEL_FIRST_IMAGE = ini['CONSTANT5']['INFO_PANEL_FIRST']
self.INFO_PANEL_SECOND_IMAGE = ini['CONSTANT5']['INFO_PANEL_SECOND']
self.INFO_PANEL_THIRD_IMAGE = ini['CONSTANT5']['INFO_PANEL_THIRD']
self.FONT_PATH = ini['CONSTANT5']['FONT']
await console_interface.console_message(c.CLIENT_MESSAGES['variable_init'].format(self.name))
except Exception as error:
await exception.error(error)
def __init__(self, client):
self.PRE = None
self.NAME = None
self.ICON = None
self.PNG = None
self.INFO_PANEL_IMAGE = None
self.INFO_PANEL_FIRST_IMAGE = None
self.INFO_PANEL_SECOND_IMAGE = None
self.INFO_PANEL_THIRD_IMAGE = None
self.FONT_PATH = None
self.variable_init.start()
self.client = client
@commands.command()
async def xptracker(self, ctx):
try:
if str(ctx.message.channel.type) == 'text':
await permissions.module_status_check(ctx, self.name, c.GUILD_PATH['{}.ini'.format(self.name)], self.fun_xptracker)
else:
await ctx.send(l.module_permissions['EN']['msg_restricted_pm'])
except Exception as error:
await exception.error(error)
@commands.command()
async def addxpevent(self, ctx):
try:
if str(ctx.message.channel.type) == 'text':
await permissions.module_status_check(ctx, self.name, c.GUILD_PATH['{}.ini'.format(self.name)], self.fun_addxpevent)
else:
await ctx.send(l.module_permissions['EN']['msg_restricted_pm'])
except Exception as error:
await exception.error(error)
@commands.command()
async def removeallxp(self, ctx):
try:
if str(ctx.message.channel.type) == 'text':
await permissions.module_status_check(ctx, self.name, c.GUILD_PATH['{}.ini'.format(self.name)], self.fun_removeallxp)
else:
await ctx.send(l.module_permissions['EN']['msg_restricted_pm'])
except Exception as error:
await exception.error(error)
@commands.command()
async def axp(self, ctx):
try:
if str(ctx.message.channel.type) == 'text':
await permissions.module_status_check(ctx, self.name, c.GUILD_PATH['{}.ini'.format(self.name)], self.fun_axp)
else:
await ctx.send(l.module_permissions['EN']['msg_restricted_pm'])
except Exception as error:
await exception.error(error)
@commands.command()
async def xpupdate(self, ctx):
try:
if str(ctx.message.channel.type) == 'text':
await permissions.module_status_check(ctx, self.name, c.GUILD_PATH['{}.ini'.format(self.name)], self.fun_xpupdate)
else:
await ctx.send(l.module_permissions['EN']['msg_restricted_pm'])
except Exception as error:
await exception.error(error)
@commands.command()
async def xprank(self, ctx):
try:
if str(ctx.message.channel.type) == 'text':
await permissions.module_status_check(ctx, self.name, c.GUILD_PATH['{}.ini'.format(self.name)], self.fun_xprank)
else:
await ctx.send(l.module_permissions['EN']['msg_restricted_pm'])
except Exception as error:
await exception.error(error)
@commands.command()
async def xpstats(self, ctx):
try:
if str(ctx.message.channel.type) == 'text':
await permissions.module_status_check(ctx, self.name, c.GUILD_PATH['{}.ini'.format(self.name)], self.fun_xpstats)
else:
await ctx.send(l.module_permissions['EN']['msg_restricted_pm'])
except Exception as error:
await exception.error(error)
def setup(client):
client.add_cog(xp_tracker(client))
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.