id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
4807368 | <filename>Writeups/Hack The Box/Machine/Previse/3. Exploit/brute.py
#!/usr/bin/python3
import requests
url = "http://10.10.11.104/download.php?file="
for i in range(50,200):
r = requests.get(url + str(i), allow_redirects=False)
print("{0} -> {1}".format(i,r.text)) | StarcoderdataPython |
3208959 | <filename>recorder.py
import sounddevice as sound
from scipy.io.wavfile import write
import wavio as wav
import sys
try:
freq = 44100
duration = 1800 #The duration of recording in seconds (It can be changed)
#print("recording...")
recording = sound.rec(int(duration*freq), samplerate=freq, channels=2)
sound.wait()
write("recording.wav",freq,recording) #Creates the .wav file
except KeyboardInterrupt: #Passes keyboard interrupts
pass
| StarcoderdataPython |
1728806 | from .help import HelpAction
from .new_item import NewItemAction
from .start import StartAction
from .strike_item import StrikeItemAction
from .toggle import ToggleAction
__all__ = [
StartAction,
HelpAction,
NewItemAction,
StrikeItemAction,
ToggleAction,
]
| StarcoderdataPython |
1674001 | #!/usr/bin/env python
#
# ********* Ping Example *********
#
#
# Available SCServo model on this example : All models using Protocol SCS
# This example is tested with a SCServo(STS/SMS/SCS), and an URT
# Be sure that SCServo(STS/SMS/SCS) properties are already set as %% ID : 1 / Baudnum : 6 (Baudrate : 1000000)
#
import os
if os.name == 'nt':
import msvcrt
def getch():
return msvcrt.getch().decode()
else:
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
def getch():
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
from scservo_sdk import * # Uses SCServo SDK library
# Default setting
SCS_ID = 1 # SCServo ID : 1
BAUDRATE = 115200 # SCServo default baudrate : 1000000
DEVICENAME = 'COM13' # Check which port is being used on your controller
# ex) Windows: "COM1" Linux: "/dev/ttyUSB0" Mac: "/dev/tty.usbserial-*"
protocol_end = 0 # SCServo bit end(STS/SMS=0, SCS=1)
# Initialize PortHandler instance
# Set the port path
# Get methods and members of PortHandlerLinux or PortHandlerWindows
portHandler = PortHandler(DEVICENAME)
# Initialize PacketHandler instance
# Get methods and members of Protocol
packetHandler = PacketHandler(protocol_end)
# Open port
if portHandler.openPort():
print("Succeeded to open the port")
else:
print("Failed to open the port")
print("Press any key to terminate...")
getch()
quit()
# Set port baudrate
if portHandler.setBaudRate(BAUDRATE):
print("Succeeded to change the baudrate")
else:
print("Failed to change the baudrate")
print("Press any key to terminate...")
getch()
quit()
# Try to ping the SCServo
# Get SCServo model number
scs_model_number, scs_comm_result, scs_error = packetHandler.ping(portHandler, SCS_ID)
if scs_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(scs_comm_result))
elif scs_error != 0:
print("%s" % packetHandler.getRxPacketError(scs_error))
else:
print("[ID:%03d] ping Succeeded. SCServo model number : %d" % (SCS_ID, scs_model_number))
# Close port
portHandler.closePort() | StarcoderdataPython |
1695827 | import datetime
import logging
import smtplib
from celery.task import task
from rapidsms.router.api import get_router
from django.conf import settings
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.utils.datastructures import MultiValueDict
from .models import Session, TagNotification
logger = logging.getLogger(__name__)
@task
def check_for_session_timeout():
"""
Check sessions and send a reminder if they have not responded in
the given threshold.
"""
router = get_router()
app = router.get_app('decisiontree')
for session in Session.objects.open():
app.tick(session)
@task
def status_update():
logger.debug('status update task running')
notifications = TagNotification.objects.filter(sent=False)
notifications = notifications.select_related().order_by('tag', 'entry')
logger.info('found {0} notifications'.format(notifications.count()))
users = {}
for notification in notifications:
email = notification.user.email
if email not in users:
users[email] = []
users[email].append(notification)
for email, notifications in users.iteritems():
tags = MultiValueDict()
for notification in notifications:
tags.appendlist(notification.tag, notification)
context = {'tags': tags}
body = render_to_string('tree/emails/digest.txt', context)
try:
send_mail(subject='Survey Response Report', message=body,
recipient_list=[email],
from_email=settings.DEFAULT_FROM_EMAIL,
fail_silently=False)
sent = True
except smtplib.SMTPException, e:
logger.exception(e)
sent = False
if sent:
for notification in notifications:
notification.sent = True
notification.date_sent = datetime.datetime.now()
notification.save()
logger.info('Sent report to %s' % email)
| StarcoderdataPython |
3319761 | import json
import random
import re
import uuid
from flask import Flask, jsonify, redirect, render_template, request, url_for
from user_agents import parse
# init random number generator
random.seed()
app = Flask(__name__)
# app.logger.error("test")
from db_handler import DbHandler
db = DbHandler()
from input_validator import InputValidator
input_validator = InputValidator()
domain_duplicated_error = "You already created this domain or the created domain is the same as the well-known domain. Create a different one!"
@app.route(
"/is_step_finished/user_id/<uuid:user_id>/step_id/<any('step1', 'step2', 'step3', 'step4', 'step5', 'questionnaire'):step_id>"
)
def is_step_finished(user_id, step_id):
user_id = str(user_id)
input_validation = input_validator.check_input_user_id(user_id)
if input_validation["result"] is False:
return jsonify({
"server_error": True,
"server_error_message": input_validation["message"]
})
column_name = "finished_" + step_id
data = db.is_step_finished(user_id, column_name)
# None means the step has not yet been finished
if data is None:
return jsonify({"is_step_finished": False, "server_error": False})
else:
return jsonify({"is_step_finished": True, "server_error": False})
@app.route("/log_time", methods=["POST"])
def log_time():
user_id = request.form["user_id"]
time_type = request.form["type"]
time = request.form["time"]
input_validation = input_validator.check_log_time(user_id, time_type, time)
if input_validation["result"] is False:
return jsonify({
"server_error": True,
"server_error_message": input_validation["message"]
})
# check if time has already been logged
data = db.get_time(user_id, time_type)
if data[0] == "0":
db.log_time(user_id, time_type, time)
return jsonify({"server_error": False})
@app.route("/set_step_finished", methods=["POST"])
def set_step_finished():
user_id = request.form["user_id"]
step_id = request.form["step_id"]
input_validation = input_validator.check_input_set_step_finished(
user_id, step_id)
if input_validation["result"] is False:
return jsonify({
"server_error": True,
"server_error_message": input_validation["message"]
})
db.set_step_as_finished(user_id, "finished_" + step_id)
return jsonify({"finished_step": True, "server_error": False})
@app.route("/is_mobile_user/user_id/<uuid:user_id>")
def is_mobile_user(user_id):
user_id = str(user_id)
input_validation = input_validator.check_input_user_id(user_id)
if input_validation["result"] is False:
return render_template("bad_request.html")
data = db.is_mobile_user(user_id)
# None means the user is not a mobile user
if data is None:
return jsonify({"is_mobile_user": False})
else:
return jsonify({"is_mobile_user": True})
# request handling for steps
@app.route("/")
def index():
user_id = str(uuid.uuid4())
completion_code = str(uuid.uuid4())
user_agent = parse(request.user_agent.string)
browser = user_agent.browser.family
version = user_agent.browser.version_string
os = user_agent.os.family + " " + user_agent.os.version_string
is_mobile = user_agent.is_mobile
db.create_test_person(user_id, completion_code, browser, version, os,
is_mobile)
return render_template("index.html", user_id=user_id)
@app.route("/consent/user_id/<uuid:user_id>")
def consent(user_id):
user_id = str(user_id)
input_validation = input_validator.check_input_user_id(user_id)
if input_validation["result"] is False:
return render_template("bad_request.html")
return render_template("consent.html", user_id=user_id)
# request handling for step 1
@app.route("/step1/user_id/<uuid:user_id>")
def step1(user_id):
user_id = str(user_id)
input_validation = input_validator.check_input_user_id(user_id)
if input_validation["result"] is False:
return render_template("bad_request.html")
last_unfinished_step = input_validator.check_last_unfinished_step(user_id, "step1")
if last_unfinished_step["result"] is True:
return redirect(url_for(last_unfinished_step["current_step"], user_id=user_id))
counter = db.get_count("step1", user_id)[0]
already_created_domains = [
elem[0] for elem in db.get_already_created_domains_step1(user_id)
]
return render_template(
"step1.html",
user_id=user_id,
next_step="step2",
counter=counter,
already_created_domains=already_created_domains)
@app.route("/step1/get_ref_domain")
def step1_get_ref_domain():
data = db.get_ref_domain()
return jsonify({"ref_domain": data[1]})
@app.route("/step1/result", methods=["POST"])
def step1_result():
user_id = request.form["user_id"]
ref_domain = request.form["reference_domain"]
created_domain = request.form["created_domain"]
elapsed_time = request.form["elapsed_time"]
domain_position = request.form["domain_position"]
if created_domain == "I do not want to participate":
is_domain_created = db.is_domain_created("no_participation")
if is_domain_created is None:
db.insert_into_created_domains(ref_domain, "no_participation")
db.insert_into_step1(user_id, "no_participation", elapsed_time, domain_position)
return jsonify({
"server_error": True,
"no_participation": True,
"server_error_message": "You submitted the string 'I do not want to participate', which indicates that you do not want to create domains. Therefore, you cannot take part in the survey. You will not get any compensation. We appreciate your willingness to participate. You can leave the website now."
})
input_validation = input_validator.check_input_step1_result(
user_id, ref_domain, created_domain, elapsed_time, domain_position)
if input_validation["result"] is False:
return jsonify({
"server_error": True,
"server_error_message": input_validation["message"]
})
data = db.check_duplicated_domain_step1(user_id, created_domain)
if data is None:
is_domain_created = db.is_domain_created(created_domain)
if is_domain_created is None:
# domain has not been created before
db.insert_into_created_domains(ref_domain, created_domain)
db.insert_into_step1(user_id, created_domain, elapsed_time, domain_position)
return jsonify({"server_error": False})
else:
return jsonify({
"server_error": True,
"server_error_message": domain_duplicated_error
})
# request handling for step 2
@app.route("/step2/user_id/<uuid:user_id>")
def step2(user_id):
user_id = str(user_id)
input_validation = input_validator.check_input_user_id(user_id)
if input_validation["result"] is False:
return render_template("bad_request.html")
last_unfinished_step = input_validator.check_last_unfinished_step(user_id, "step2")
if last_unfinished_step["result"] is True:
return redirect(url_for(last_unfinished_step["current_step"], user_id=user_id))
counter = db.get_count("step2", user_id)[0]
already_created_domains = [
elem[0] for elem in db.get_already_created_domains("step2", user_id)
]
return render_template(
"step2.html",
user_id=user_id,
next_step="step3",
counter=counter,
already_created_domains=already_created_domains)
@app.route("/step2/get_ref_domain")
def step2_get_ref_domain():
data = db.get_ref_domain()
return jsonify({"ref_domain": data[1]})
@app.route("/step2/result", methods=["POST"])
def step2_result():
user_id = request.form["user_id"]
ref_domain = request.form["reference_domain"]
squatting_technique = request.form["squatting_technique"]
squatting_technique_infos = request.form["squatting_technique_infos"]
created_domain = request.form["created_domain"]
elapsed_time = request.form["elapsed_time"]
domain_position = request.form["domain_position"]
squatting_techniques_order = request.form["squatting_techniques_order"]
input_validation = input_validator.check_input_step2_result(user_id, ref_domain, squatting_technique,\
squatting_technique_infos, created_domain,\
elapsed_time, domain_position,\
squatting_techniques_order)
if input_validation["result"] is False:
return jsonify({
"server_error": True,
"server_error_message": input_validation["message"]
})
data = db.check_duplicated_domain("step2", user_id, created_domain)
# None means the same domain has not been already created by this user
if data is None:
db.insert_into_step2(user_id, ref_domain, squatting_technique,\
squatting_technique_infos, created_domain,\
elapsed_time, domain_position,\
squatting_techniques_order)
return jsonify({"server_error": False})
else:
return jsonify({
"server_error": True,
"server_error_message": domain_duplicated_error
})
# request handling for step 3
@app.route("/step3/user_id/<uuid:user_id>")
def step3(user_id):
user_id = str(user_id)
input_validation = input_validator.check_input_user_id(user_id)
if input_validation["result"] is False:
return render_template("bad_request.html")
last_unfinished_step = input_validator.check_last_unfinished_step(user_id, "step3")
if last_unfinished_step["result"] is True:
return redirect(url_for(last_unfinished_step["current_step"], user_id=user_id))
return render_template("step3.html", user_id=user_id, next_step="step4")
@app.route("/step3/next_domain/user_id/<uuid:user_id>")
def step3_next_domain(user_id):
user_id = str(user_id)
input_validation = input_validator.check_input_user_id(user_id)
if input_validation["result"] is False:
return jsonify({
"server_error": True,
"server_error_message": input_validation["message"]
})
data = {"domains_available": True}
rated_domains_count = db.get_step3_rated_domains_count(user_id)
step1_domain = db.get_step1_domain(user_id, 5)
ref_domain = db.get_ref_domain_step3(user_id)
phishing_domain = db.get_phishing_domain(user_id)
if step1_domain is None and ref_domain is None and phishing_domain is None:
# we have more phishing domains available than users are supposed to rate
# so that this is rather a backup plan if something goes wrong
data["domains_available"] = False
else:
data["count"] = rated_domains_count
data["server_error"] = False
random_number = random.randrange(10)
if random_number > 1 and random_number <= 6 and step1_domain is not None:
data["next_domain"] = step1_domain[2]
data["id"] = step1_domain[0]
data["type"] = "step1"
elif random_number > 6 and random_number <= 8 and ref_domain is not None:
data["next_domain"] = ref_domain[1]
data["id"] = ref_domain[0]
data["type"] = "ref_domain"
else:
data["next_domain"] = phishing_domain[1]
data["id"] = phishing_domain[0]
data["type"] = "phishing_domain"
return jsonify(data)
@app.route("/step3/result", methods=["POST"])
def step3_result():
user_id = request.form["user_id"]
rated_domain = request.form["rated_domain"]
type = request.form["type"]
elapsed_time = request.form["elapsed_time"]
rating = request.form["rating"]
domain_position = request.form["domain_position"]
input_validation = input_validator.check_input_step3_result(
user_id, rated_domain, type, elapsed_time, rating, domain_position)
if input_validation["result"] is False:
return jsonify({
"server_error": True,
"server_error_message": input_validation["message"]
})
db.insert_into_step3(user_id, rated_domain, type, elapsed_time, rating, domain_position)
return jsonify({"finished_step": True, "server_error": False})
# request handling for step 4
@app.route("/step4/user_id/<uuid:user_id>")
def step4(user_id):
user_id = str(user_id)
input_validation = input_validator.check_input_user_id(user_id)
if input_validation["result"] is False:
return render_template("bad_request.html")
last_unfinished_step = input_validator.check_last_unfinished_step(user_id, "step4")
if last_unfinished_step["result"] is True:
return redirect(url_for(last_unfinished_step["current_step"], user_id=user_id))
counter = db.get_count("step4", user_id)[0]
already_created_domains = [
elem[0] for elem in db.get_already_created_domains("step4", user_id)
]
return render_template(
"step4.html",
user_id=user_id,
next_step="step5",
counter=counter,
already_created_domains=already_created_domains)
@app.route("/step4/result", methods=["POST"])
def step4_result():
user_id = request.form["user_id"]
created_domain = request.form["created_domain"]
elapsed_time = request.form["elapsed_time"]
domain_position = request.form["domain_position"]
input_validation = input_validator.check_input_step4_result(
user_id, created_domain, elapsed_time, domain_position)
if input_validation["result"] is False:
return jsonify({
"server_error": True,
"server_error_message": input_validation["message"]
})
data = db.check_duplicated_domain("step4", user_id, created_domain)
if data is None:
db.insert_into_step4(user_id, created_domain, elapsed_time, domain_position)
return jsonify({"server_error": False})
else:
return jsonify({
"server_error": True,
"server_error_message": domain_duplicated_error
})
# request handling for step 5
@app.route("/step5/user_id/<uuid:user_id>")
def step5(user_id):
user_id = str(user_id)
input_validation = input_validator.check_input_user_id(user_id)
if input_validation["result"] is False:
return render_template("bad_request.html")
last_unfinished_step = input_validator.check_last_unfinished_step(user_id, "step5")
if last_unfinished_step["result"] is True:
return redirect(url_for(last_unfinished_step["current_step"], user_id=user_id))
counter = db.get_count("step5", user_id)[0]
number_of_legit_domains = random.randrange(3) + 1
domains = []
# select legitimate domains
for domain in db.get_legitimate_domains(number_of_legit_domains):
domains.append((domain[1], "legitimate", "legitimate_domain"))
# select domains from step 1
for domain in db.get_step1_domains_for_step5(user_id,
10 - number_of_legit_domains):
domains.append((domain[0], "not_legitimate", "step1_domain"))
if len(domains) < 10:
# select phishtank domains if not enough domains from step 1
number_of_domains = 10 - len(domains)
for domain in db.get_phishing_domains(number_of_domains):
domains.append((domain[0], "not_legitimate", "phishing_domain"))
# randomize order of domains so that legitimate domains are not always displayed first
random.shuffle(domains)
return render_template(
"step5.html",
user_id=user_id,
next_step="questionnaire",
counter=counter,
domains=domains)
@app.route("/step5/result", methods=["POST"])
def step5_result():
user_id = request.form["user_id"]
selected_domains = request.form["selected_domains"]
elapsed_time = request.form["elapsed_time"]
counter = request.form["counter"]
input_validation = input_validator.check_input_step5_result(
user_id, selected_domains, elapsed_time, counter)
if input_validation["result"] is False:
return jsonify({
"server_error": True,
"server_error_message": input_validation["message"]
})
db.insert_into_step5(user_id, selected_domains, elapsed_time, counter)
return jsonify({"inserted": True, "server_error": False})
# request handling for questionnaire
@app.route("/questionnaire/user_id/<uuid:user_id>")
def questionnaire(user_id):
user_id = str(user_id)
input_validation = input_validator.check_input_user_id(user_id)
if input_validation["result"] is False:
return render_template("bad_request.html")
last_unfinished_step = input_validator.check_last_unfinished_step(user_id, "questionnaire")
if last_unfinished_step["result"] is True:
return redirect(url_for(last_unfinished_step["current_step"], user_id=user_id))
countries = [elem[0] for elem in db.get_countries()]
return render_template("questionnaire.html", user_id=user_id, countries=countries)
@app.route("/questionnaire/results", methods=["POST"])
def questionnaire_results():
user_id = request.form["user_id"]
age = request.form["age"]
gender_current = request.form["gender_current"]
education = request.form["education"]
origin = request.form["origin"]
f1 = request.form["f1"]
f2 = request.form["f2"]
f3 = request.form["f3"]
f4 = request.form["f4"]
f5 = request.form["f5"]
f6 = request.form["f6"]
f7 = request.form["f7"]
f8 = request.form["f8"]
f9 = request.form["f9"]
f10 = request.form["f10"]
f11 = request.form["f11"]
f12 = request.form["f12"]
f13 = request.form["f13"]
f14 = request.form["f14"]
f15 = request.form["f15"]
f16 = request.form["f16"]
attention_test1 = request.form["attention_test1"]
attention_test2 = request.form["attention_test2"]
input_validation = input_validator.check_input_questionnaire_result(user_id, age,\
gender_current, education, origin,\
f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12,\
f13, f14, f15, f16,\
attention_test1, attention_test2)
if input_validation["result"] is False:
return jsonify({
"server_error": True,
"server_error_message": input_validation["message"]
})
response = {"already_inserted": False, "server_error": False}
questionnaire_inserted_count = db.check_questionnaire_inserted(user_id)[0]
if questionnaire_inserted_count > 0:
response["already_inserted"] = True
else:
db.insert_into_questionnaire(user_id, age, gender_current, education, origin,\
f1, f2, f3, f4, f5, f6, f7, f8, f9, f10,\
f11, f12, f13, f14, f15, f16,\
attention_test1, attention_test2)
db.set_step_as_finished(user_id, "finished_questionnaire")
return jsonify(response)
# request handling for final notes
@app.route("/final_notes/user_id/<uuid:user_id>")
def final_notes(user_id):
user_id = str(user_id)
input_validation = input_validator.check_input_user_id(user_id)
if input_validation["result"] is False:
return render_template("bad_request.html")
last_unfinished_step = input_validator.check_last_unfinished_step(user_id, "final_notes")
if last_unfinished_step["result"] is True:
return redirect(url_for(last_unfinished_step["current_step"], user_id=user_id))
data = db.get_completion_code(user_id)
completion_code = "-1"
# set completion code to actual value if all five steps and the questionnaire have been finished
if data[7] == 1 and data[8] == 1 and data[9] == 1 and data[
10] == 1 and data[11] == 1 and data[12] == 1:
completion_code = data[2]
has_provided_feedback = data[27] != ""
return render_template("final_notes.html", user_id=user_id, finished_step1=str(data[7]), finished_step2=str(data[8]),\
finished_step3=str(data[9]), finished_step4=str(data[10]), finished_step5=str(data[11]),\
finished_questionnaire=str(data[12]), completion_code=completion_code,\
has_provided_feedback=has_provided_feedback)
@app.route("/final_notes/feedback", methods=["POST"])
def final_notes_feedback():
user_id = request.form["user_id"]
feedback = request.form["feedback"]
input_validation = input_validator.check_input_final_notes_feedback(user_id, feedback)
if input_validation["result"] is False:
return jsonify({
"server_error": True,
"server_error_message": input_validation["message"]
})
db.insert_feedback(user_id, feedback)
return jsonify({
"server_error": False
})
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0")
| StarcoderdataPython |
138717 | import collections
import pytest
from radon.cli import Config
import radon.complexity as cc_mod
import radon.cli.harvest as harvest
BASE_CONFIG = Config(
exclude='test_[^.]+\.py',
ignore='tests,docs',
)
CC_CONFIG = Config(
order=getattr(cc_mod, 'SCORE'),
no_assert=False,
min='A',
max='F',
show_complexity=False,
show_closures=False,
average=True,
total_average=False,
**BASE_CONFIG.config_values
)
RAW_CONFIG = Config(
summary=True,
)
MI_CONFIG = Config(
multi=True,
min='B',
max='C',
show=True,
sort=False,
)
def fake_gobble(fobj):
return 42
def fake_gobble_raising(fobj):
raise TypeError('mystr')
def fake_run():
for i in range(3):
yield {'file-{0}'.format(i): i**2}
@pytest.fixture
def base_config():
return Config(**BASE_CONFIG.config_values.copy())
@pytest.fixture
def cc_config():
return Config(**CC_CONFIG.config_values.copy())
@pytest.fixture
def raw_config():
return Config(**RAW_CONFIG.config_values.copy())
@pytest.fixture
def mi_config():
return Config(**MI_CONFIG.config_values.copy())
def test_base_iter_filenames(base_config, mocker):
iter_mock = mocker.patch('radon.cli.harvest.iter_filenames')
h = harvest.Harvester([], base_config)
h._iter_filenames()
iter_mock.assert_called_with([], base_config.exclude,
base_config.ignore)
def test_base_gobble_not_implemented(base_config):
h = harvest.Harvester([], base_config)
with pytest.raises(NotImplementedError):
h.gobble(None)
def test_base_as_xml_not_implemented(base_config):
h = harvest.Harvester([], base_config)
with pytest.raises(NotImplementedError):
h.as_xml()
def test_base_to_terminal_not_implemented(base_config):
h = harvest.Harvester([], base_config)
with pytest.raises(NotImplementedError):
h.to_terminal()
def test_base_run(base_config):
h = harvest.Harvester(['-'], base_config)
h.gobble = fake_gobble
assert isinstance(h.run(), collections.Iterator)
assert list(h.run()) == [('-', 42)]
h.gobble = fake_gobble_raising
assert list(h.run()) == [('-', {'error': 'mystr'})]
def test_base_results(base_config):
h = harvest.Harvester([], base_config)
h.run = fake_run
results = h.results
assert isinstance(results, collections.Iterator)
assert list(results) == [{'file-0': 0}, {'file-1': 1}, {'file-2': 4}]
assert not isinstance(h.results, collections.Iterator)
assert isinstance(h.results, collections.Iterable)
assert isinstance(h.results, list)
def test_base_as_json(base_config):
h = harvest.Harvester([], base_config)
h._results = {'filename': {'complexity': 2}}
assert h.as_json() == '{"filename": {"complexity": 2}}'
def test_cc_gobble(cc_config, mocker):
sr_mock = mocker.patch('radon.cli.harvest.sorted_results')
cc_mock = mocker.patch('radon.cli.harvest.cc_visit')
cc_mock.return_value = []
fobj = mocker.MagicMock()
fobj.read.return_value = mocker.sentinel.one
h = harvest.CCHarvester([], cc_config)
h.config.show_closures = True
h.gobble(fobj)
assert fobj.read.called
cc_mock.assert_called_with(mocker.sentinel.one,
no_assert=cc_config.no_assert)
sr_mock.assert_called_with([], order=cc_config.order)
def test_cc_to_dicts(cc_config, mocker):
c2d_mock = mocker.patch('radon.cli.harvest.cc_to_dict')
c2d_mock.side_effect = lambda i: i
h = harvest.CCHarvester([], cc_config)
sample_results = [('a', [{'rank': 'A'}]), ('b', [{'rank': 'B'}]),
('c', {'error': 'An ERROR!'})]
h._results = sample_results
assert h._to_dicts() == dict(sample_results)
assert c2d_mock.call_count == 2
h.config.min = 'B'
h._results = sample_results[1:]
assert h._to_dicts() == dict(sample_results[1:])
def test_cc_as_json_xml(cc_config, mocker):
d2x_mock = mocker.patch('radon.cli.harvest.dict_to_xml')
to_dicts_mock = mocker.MagicMock()
to_dicts_mock.return_value = {'a': {'rank': 'A'}}
h = harvest.CCHarvester([], cc_config)
h._to_dicts = to_dicts_mock
assert h.as_json() == '{"a": {"rank": "A"}}'
h.as_xml()
assert d2x_mock.called
d2x_mock.assert_called_with(to_dicts_mock.return_value)
assert to_dicts_mock.call_count == 2
def test_cc_to_terminal(cc_config, mocker):
reset_mock = mocker.patch('radon.cli.harvest.RESET')
ranks_mock = mocker.patch('radon.cli.harvest.RANKS_COLORS')
c2t_mock = mocker.patch('radon.cli.harvest.cc_to_terminal')
h = harvest.CCHarvester([], cc_config)
h._results = [
('a', {'error': 'mystr'}), ('b', {})
]
c2t_mock.return_value = (['res'], 9, 3)
ranks_mock.__getitem__.return_value = '<|A|>'
reset_mock.__eq__.side_effect = lambda o: o == '__R__'
results = list(h.to_terminal())
c2t_mock.assert_called_once_with({}, cc_config.show_complexity,
cc_config.min, cc_config.max,
cc_config.total_average)
assert results == [
('a', ('mystr',), {'error': True}),
('b', (), {}),
(['res'], (), {'indent': 1}),
('\n{0} blocks (classes, functions, methods) analyzed.', (3,), {}),
('Average complexity: {0}{1} ({2}){3}',
('<|A|>', 'A', 3, '__R__'), {}),
]
def test_raw_gobble(raw_config, mocker):
r2d_mock = mocker.patch('radon.cli.harvest.raw_to_dict')
analyze_mock = mocker.patch('radon.cli.harvest.analyze')
fobj = mocker.MagicMock()
fobj.read.return_value = mocker.sentinel.one
analyze_mock.return_value = mocker.sentinel.two
h = harvest.RawHarvester([], raw_config)
h.gobble(fobj)
assert fobj.read.call_count == 1
analyze_mock.assert_called_once_with(mocker.sentinel.one)
r2d_mock.assert_called_once_with(mocker.sentinel.two)
def test_raw_as_xml(raw_config):
h = harvest.RawHarvester([], raw_config)
with pytest.raises(NotImplementedError):
h.as_xml()
def test_raw_to_terminal(raw_config):
h = harvest.RawHarvester([], raw_config)
h._results = [
('a', {'error': 'mystr'}),
('b', {'loc': 24, 'lloc': 27, 'sloc': 15, 'comments': 3,
'multi': 3, 'single_comments': 3, 'blank': 9}),
('c', {'loc': 24, 'lloc': 27, 'sloc': 15, 'comments': 3,
'multi': 3, 'single_comments': 13, 'blank': 9}),
('e', {'loc': 0, 'lloc': 0, 'sloc': 0, 'comments': 0,
'single_comments': 12, 'multi': 0, 'blank': 0}),
]
assert list(h.to_terminal()) == [
('a', ('mystr',), {'error': True}),
('b', (), {}),
('{0}: {1}', ('LOC', 24), {'indent': 1}),
('{0}: {1}', ('LLOC', 27), {'indent': 1}),
('{0}: {1}', ('SLOC', 15), {'indent': 1}),
('{0}: {1}', ('Comments', 3), {'indent': 1}),
('{0}: {1}', ('Single comments', 3), {'indent': 1}),
('{0}: {1}', ('Multi', 3), {'indent': 1}),
('{0}: {1}', ('Blank', 9), {'indent': 1}),
('- Comment Stats', (), {'indent': 1}),
('(C % L): {0:.0%}', (0.125,), {'indent': 2}),
('(C % S): {0:.0%}', (0.2,), {'indent': 2}),
('(C + M % L): {0:.0%}', (0.25,), {'indent': 2}),
('c', (), {}),
('{0}: {1}', ('LOC', 24), {'indent': 1}),
('{0}: {1}', ('LLOC', 27), {'indent': 1}),
('{0}: {1}', ('SLOC', 15), {'indent': 1}),
('{0}: {1}', ('Comments', 3), {'indent': 1}),
('{0}: {1}', ('Single comments', 13), {'indent': 1}),
('{0}: {1}', ('Multi', 3), {'indent': 1}),
('{0}: {1}', ('Blank', 9), {'indent': 1}),
('- Comment Stats', (), {'indent': 1}),
('(C % L): {0:.0%}', (0.125,), {'indent': 2}),
('(C % S): {0:.0%}', (0.2,), {'indent': 2}),
('(C + M % L): {0:.0%}', (0.25,), {'indent': 2}),
('e', (), {}),
('{0}: {1}', ('LOC', 0), {'indent': 1}),
('{0}: {1}', ('LLOC', 0), {'indent': 1}),
('{0}: {1}', ('SLOC', 0), {'indent': 1}),
('{0}: {1}', ('Comments', 0), {'indent': 1}),
('{0}: {1}', ('Single comments', 12), {'indent': 1}),
('{0}: {1}', ('Multi', 0), {'indent': 1}),
('{0}: {1}', ('Blank', 0), {'indent': 1}),
('- Comment Stats', (), {'indent': 1}),
('(C % L): {0:.0%}', (0.0,), {'indent': 2}),
('(C % S): {0:.0%}', (0.0,), {'indent': 2}),
('(C + M % L): {0:.0%}', (0.0,), {'indent': 2}),
('** Total **', (), {}),
('{0}: {1}', ('LOC', 48), {'indent': 1}),
('{0}: {1}', ('LLOC', 54), {'indent': 1}),
('{0}: {1}', ('SLOC', 30), {'indent': 1}),
('{0}: {1}', ('Comments', 6), {'indent': 1}),
('{0}: {1}', ('Single comments', 28), {'indent': 1}),
('{0}: {1}', ('Multi', 6), {'indent': 1}),
('{0}: {1}', ('Blank', 18), {'indent': 1}),
('- Comment Stats', (), {'indent': 1}),
('(C % L): {0:.0%}', (0.125,), {'indent': 2}),
('(C % S): {0:.0%}', (0.2,), {'indent': 2}),
('(C + M % L): {0:.0%}', (0.25,), {'indent': 2}),
]
def test_mi_gobble(mi_config, mocker):
mv_mock = mocker.patch('radon.cli.harvest.mi_visit')
fobj = mocker.MagicMock()
fobj.read.return_value = mocker.sentinel.one
mv_mock.return_value = 23.5
h = harvest.MIHarvester([], mi_config)
result = h.gobble(fobj)
assert fobj.read.call_count == 1
mv_mock.assert_called_once_with(mocker.sentinel.one, mi_config.multi)
assert result == {'mi': 23.5, 'rank': 'A'}
def test_mi_as_json(mi_config, mocker):
d_mock = mocker.patch('radon.cli.harvest.json.dumps')
h = harvest.MIHarvester([], mi_config)
h.config.min = 'C'
h._results = [
('a', {'error': 'mystr'}),
('b', {'mi': 25, 'rank': 'A'}),
('c', {'mi': 15, 'rank': 'B'}),
('d', {'mi': 0, 'rank': 'C'}),
]
h.as_json()
d_mock.assert_called_with(dict([h._results[0], h._results[-1]]))
def test_mi_as_xml(mi_config):
h = harvest.MIHarvester([], mi_config)
with pytest.raises(NotImplementedError):
h.as_xml()
def test_mi_to_terminal(mi_config, mocker):
reset_mock = mocker.patch('radon.cli.harvest.RESET')
ranks_mock = mocker.patch('radon.cli.harvest.MI_RANKS')
ranks_mock.__getitem__.side_effect = lambda j: '<|{0}|>'.format(j)
reset_mock.__eq__.side_effect = lambda o: o == '__R__'
h = harvest.MIHarvester([], mi_config)
h._results = [
('a', {'error': 'mystr'}),
('b', {'mi': 25, 'rank': 'A'}),
('c', {'mi': 15, 'rank': 'B'}),
('d', {'mi': 0, 'rank': 'C'}),
]
assert list(h.to_terminal()) == [
('a', ('mystr',), {'error': True}),
('{0} - {1}{2}{3}{4}', ('c', '<|B|>', 'B', ' (15.00)', '__R__'),
{}),
('{0} - {1}{2}{3}{4}', ('d', '<|C|>', 'C', ' (0.00)', '__R__'),
{}),
]
| StarcoderdataPython |
46598 | <filename>flowws_structure_pretraining/analysis/BondDenoisingVisualizer.py
import functools
import flowws
from flowws import Argument as Arg
import plato
from plato import draw
import numpy as np
from .internal import GeneratorVisualizer
from ..FileLoader import FileLoader
@flowws.add_stage_arguments
class BondDenoisingVisualizer(flowws.Stage, GeneratorVisualizer):
"""Visualize the results of a bond denoising regressor"""
ARGS = [
Arg(
'color_scale',
None,
float,
1,
valid_values=flowws.Range(0, 10, True),
help='Factor to scale color RGB intensities by',
),
Arg(
'reverse',
'-r',
bool,
False,
help='If True, reverse classification colormap',
),
Arg('mode', '-m', str, help='Colormap mode'),
Arg('color_min', None, float, 0.0, help='Minimum colormap value'),
Arg('color_max', None, float, 1.0, help='Maximum colormap value'),
Arg('contrast', None, float, 1.0, help='Contrast scale'),
Arg(
'width',
None,
float,
0.25,
valid_values=flowws.Range(0, 2.0, True),
help='Bond rendering width',
),
Arg(
'cosine_similarity',
'-c',
bool,
False,
help='Use cosine similarity rather than euclidean distance',
),
]
Frame = FileLoader.Frame
def run(self, scope, storage):
self.scope = scope
(positions, starts, ends, theta) = self.get_predictions(scope['cache_key'])
theta = self.remap_theta(theta)
colors = plato.cmap.cubehelix(theta)
colors[:, :3] *= self.arguments['color_scale']
prim = draw.Lines(
start_points=starts,
end_points=ends,
widths=np.full(len(theta), self.arguments['width']),
colors=colors,
)
scope.setdefault('plato_primitives', []).append(prim)
def theta_prediction(self, prediction):
delta = prediction.ends - prediction.starts
if self.arguments['cosine_similarity']:
left, right = delta, prediction.theta
numerator = np.sum(left * right, axis=-1)
denominator = np.linalg.norm(left, axis=-1) * np.linalg.norm(right, axis=-1)
theta = numerator / denominator
else:
delta -= prediction.theta * self.scope['x_scale']
theta = np.linalg.norm(delta, axis=-1)
result = prediction._replace(theta=theta)
return result
| StarcoderdataPython |
3362860 | <filename>action_tracker/test/TestActionTracker3.py
'''
Created on Feb 14, 2021
@author: jeff
This test generates addAction threads for each line in the input file.
The averages are matched to the output file.
'''
import simplejson as json
import unittest
import concurrent.futures
from action_tracker.Tracker import ActionTracker
class TestActionTracker3(unittest.TestCase):
action_tracker = ActionTracker()
def testAddActionWithThreads(self):
'''
Open the test input file for reading
Create a thread pool executor for handling threads and their output
Starting threads for each input line in the input file
Verify that each addAction() was successful using future.result()
'''
with open("test3input.txt") as f:
with concurrent.futures.ThreadPoolExecutor() as executor:
future_add_actions = {executor.submit(self.action_tracker.addAction,line): line for line in f}
for future in concurrent.futures.as_completed(future_add_actions):
self.assertEqual(future.result(), None, "addAction() Failed")
def testGetStatsWithThreads(self):
'''
Open the test input file for reading
Only one thread is needed, however ThreadPoolExecutor allows for returned value in .result()
Starting threads for each input line in the input file
Verify that the getStats() call was successful
'''
with open("test3output.txt") as f:
with concurrent.futures.ThreadPoolExecutor() as executor:
future_add_actions = {executor.submit(self.action_tracker.getStats)}
for future in concurrent.futures.as_completed(future_add_actions):
formatted_line = json.dumps(json.loads(f.readline()))
self.assertEqual(future.result(), formatted_line, "getStats() Failed")
| StarcoderdataPython |
114234 | <filename>test/test_files/pylops/pylops/basicoperators/LinearRegression.py
import logging
from pylops.basicoperators import Regression
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.WARNING)
def LinearRegression(taxis, dtype='float64'):
r"""Linear regression.
Creates an operator that applies linear regression to a set of points.
Values along the t-axis must be provided while initializing the operator.
Intercept and gradient form the model vector to be provided in forward
mode, while the values of the regression line curve shall be provided
in adjoint mode.
Parameters
----------
taxis : :obj:`numpy.ndarray`
Elements along the t-axis.
dtype : :obj:`str`, optional
Type of elements in input array.
Attributes
----------
shape : :obj:`tuple`
Operator shape
explicit : :obj:`bool`
Operator contains a matrix that can be solved explicitly
(``True``) or not (``False``)
Raises
------
TypeError
If ``t`` is not :obj:`numpy.ndarray`.
See Also
--------
Regression: Polynomial regression
Notes
-----
The LinearRegression operator solves the following problem:
.. math::
y_i = x_0 + x_1 t_i \qquad \forall i=1,2,...,N
We can express this problem in a matrix form
.. math::
\mathbf{y}= \mathbf{A} \mathbf{x}
where
.. math::
\mathbf{y}= [y_1, y_2,...,y_N]^T, \qquad \mathbf{x}= [x_0, x_1]^T
and
.. math::
\mathbf{A}
= \begin{bmatrix}
1 & t_{1} \\
1 & t_{2} \\
.. & .. \\
1 & t_{N}
\end{bmatrix}
Note that this is a particular case of the :py:class:`pylops.Regression`
operator and it is in fact just a lazy call of that operator with
``order=1``.
"""
return Regression(taxis, order=1, dtype=dtype)
| StarcoderdataPython |
185724 | from pydantic import BaseModel
from vo.SpiderBaseGetVideoInfoBatchResponseVO import SpiderBaseGetVideoInfoBatchResponseVO
from vo.douyin.SpiderDouyinUserInfoVO import SpiderDouyinUserInfoVO
class SpiderDouyinVideoUrlByUserResponseVO(BaseModel):
user_info: SpiderDouyinUserInfoVO = None
video_list: SpiderBaseGetVideoInfoBatchResponseVO = None
| StarcoderdataPython |
92818 | <reponame>charithmadhuranga/core<filename>tests/components/webostv/test_media_player.py
"""The tests for the LG webOS media player platform."""
from homeassistant.components.media_player import DOMAIN as MP_DOMAIN
from homeassistant.components.media_player.const import (
ATTR_INPUT_SOURCE,
ATTR_MEDIA_VOLUME_MUTED,
SERVICE_SELECT_SOURCE,
)
from homeassistant.components.webostv.const import (
ATTR_BUTTON,
ATTR_PAYLOAD,
DOMAIN,
SERVICE_BUTTON,
SERVICE_COMMAND,
)
from homeassistant.const import ATTR_COMMAND, ATTR_ENTITY_ID, SERVICE_VOLUME_MUTE
from . import ENTITY_ID, setup_webostv
async def test_mute(hass, client):
"""Test simple service call."""
await setup_webostv(hass)
data = {
ATTR_ENTITY_ID: ENTITY_ID,
ATTR_MEDIA_VOLUME_MUTED: True,
}
assert await hass.services.async_call(MP_DOMAIN, SERVICE_VOLUME_MUTE, data, True)
await hass.async_block_till_done()
client.set_mute.assert_called_once()
async def test_select_source_with_empty_source_list(hass, client):
"""Ensure we don't call client methods when we don't have sources."""
await setup_webostv(hass)
data = {
ATTR_ENTITY_ID: ENTITY_ID,
ATTR_INPUT_SOURCE: "nonexistent",
}
await hass.services.async_call(MP_DOMAIN, SERVICE_SELECT_SOURCE, data)
await hass.async_block_till_done()
client.launch_app.assert_not_called()
client.set_input.assert_not_called()
async def test_button(hass, client):
"""Test generic button functionality."""
await setup_webostv(hass)
data = {
ATTR_ENTITY_ID: ENTITY_ID,
ATTR_BUTTON: "test",
}
await hass.services.async_call(DOMAIN, SERVICE_BUTTON, data)
await hass.async_block_till_done()
client.button.assert_called_once()
client.button.assert_called_with("test")
async def test_command(hass, client):
"""Test generic command functionality."""
await setup_webostv(hass)
data = {
ATTR_ENTITY_ID: ENTITY_ID,
ATTR_COMMAND: "test",
}
await hass.services.async_call(DOMAIN, SERVICE_COMMAND, data)
await hass.async_block_till_done()
client.request.assert_called_with("test", payload=None)
async def test_command_with_optional_arg(hass, client):
"""Test generic command functionality."""
await setup_webostv(hass)
data = {
ATTR_ENTITY_ID: ENTITY_ID,
ATTR_COMMAND: "test",
ATTR_PAYLOAD: {"target": "https://www.google.com"},
}
await hass.services.async_call(DOMAIN, SERVICE_COMMAND, data)
await hass.async_block_till_done()
client.request.assert_called_with(
"test", payload={"target": "https://www.google.com"}
)
| StarcoderdataPython |
100859 | # ===============================================================================
# Copyright 2013 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from pychron.core.ui import set_toolkit
set_toolkit('qt4')
import unittest
from pychron.processing.isotope import Isotope
# ============= standard library imports ========================
# ============= local library imports ==========================
class FitBlockTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.fits= ('Ar41:(,10,average), (10,,cubic)',
'Ar40:parabolic',
'Ar39AX:parabolic',
'Ar39CDD:parabolic',
'Ar38:linear',
'Ar37:linear',
'Ar36:parabolic')
def testAr40Fit(self):
iso=Isotope()
fits = dict([f.split(':') for f in self.fits])
iso.set_fit_blocks(fits['Ar41'])
self.assertEqual(iso.get_fit(0), 'average')
self.assertEqual(iso.get_fit(-1), 'cubic')
self.assertEqual(iso.get_fit(100), 'cubic')
#
# class AutomatedRunTest(unittest.TestCase):
# def setUp(self):
# self.arun = AutomatedRun()
#
# # db = isotope_manager_factory().db
# # db.connect()
# # self.arun.db = db
#
# def testFits1(self):
# fits = 'linear'
# dets = ['H2', 'H1', 'AX']
#
# self.arun.py_activate_detectors(('H2', 'H1', 'AX'))
# self.arun.py_set_fits(fits)
#
# self.assertEqual('linear', self.arun.arar_age.isotopes['Ar40'])
# self.arun._active_detectors = dets
# self.arun.py_set_regress_fits(fits)
# self.assertListEqual(self.arun.fits, [(None, ['linear', 'linear', 'linear'])])
# def testFits2(self):
# fits = ('linear', 'linear', 'parabolic')
# dets = ['H2', 'H1', 'AX']
# # self.arun.py_activate_detectors(('H2', 'H1', 'AX'))
# self.arun._active_detectors = dets
# self.arun.py_set_regress_fits(fits)
#
# self.assertListEqual(self.arun.fits, [(None, ['linear', 'linear', 'parabolic'])])
#
# def testFits3(self):
# fits = (
# ((0, 100), ('linear', 'linear', 'parabolic')),
# )
# dets = ['H2', 'H1', 'AX']
# # self.arun.py_activate_detectors(('H2', 'H1', 'AX'))
# self.arun._active_detectors = dets
# self.arun.py_set_regress_fits(fits)
# self.assertListEqual(self.arun.fits,
# [((0, 100), ['linear', 'linear', 'parabolic'])])
#
# def testGetFitBlock1(self):
# fits = ('linear', 'linear', 'parabolic')
# dets = ['H2', 'H1', 'AX']
# # self.arun.py_activate_detectors(('H2', 'H1', 'AX'))
# self.arun._active_detectors = dets
# self.arun.py_set_regress_fits(fits)
# fits = self.arun._get_fit_block(10, self.arun.fits)
# self.assertListEqual(fits, ['linear', 'linear', 'parabolic'])
#
# def testGetFitBlock2(self):
# fits = (
# ((0, 100), ('linear', 'linear', 'parabolic')),
# )
# dets = ['H2', 'H1', 'AX']
# # self.arun.py_activate_detectors(('H2', 'H1', 'AX'))
# self.arun._active_detectors = dets
# self.arun.py_set_regress_fits(fits)
# fits = self.arun._get_fit_block(150, self.arun.fits)
# self.assertListEqual(fits, ['linear', 'linear', 'parabolic'])
#
# def testGetFitBlock3(self):
# fits = (
# ((0, 100), ('linear', 'linear', 'parabolic')),
# ((100,), ('linear', 'linear', 'linear')),
# )
# dets = ['H2', 'H1', 'AX']
# # self.arun.py_activate_detectors(('H2', 'H1', 'AX'))
# self.arun._active_detectors = dets
# self.arun.py_set_regress_fits(fits)
# fits = self.arun._get_fit_block(10, self.arun.fits)
# self.assertListEqual(fits, ['linear', 'linear', 'parabolic'])
#
# def testGetFitBlock4(self):
# fits = (
# ((0, 100), ('linear', 'linear', 'linear')),
# ((100, None), ('linear', 'linear', 'parabolic')),
# )
# dets = ['H2', 'H1', 'AX']
# # self.arun.py_activate_detectors(('H2', 'H1', 'AX'))
# self.arun._active_detectors = dets
# self.arun.py_set_regress_fits(fits)
# # print 'fffff', self.arun.fits
# fits = self.arun._get_fit_block(10, self.arun.fits)
# self.assertListEqual(fits, ['linear', 'linear', 'linear'])
#
#
# @unittest.skip('check iteration')
# def testCheckIteration(self):
# arun = self.arun
# attr = 'age'
# comp = '>'
# value = 10
# start_count = 0
# frequency = 1
#
# conditionals = [
# TruncationCondition(attr, comp, value,
# start_count,
# frequency)
# ]
#
# cnt = 1
# arun.labnumber = '61311'
# arun.analysis_type = 'unknown'
# arun.start()
#
# result = arun._check_conditions(conditionals, cnt)
# self.assertEqual(result, True)
# def testTermination(self):
# grpname = 'signal'
# ncounts = 10
# starttime = 0
# starttime_offset = 0
# series = 0
# fits = ('linear',)
# check_conditions = True
# def data_write_hook(*args):
# pass
#
# self.arun._measure_iteration(grpname, data_write_hook, ncounts,
# starttime, starttime_offset, series,
# fits, check_conditions, refresh)
# ============= EOF =============================================
| StarcoderdataPython |
1624155 | <reponame>DaveMcEwan/dmppl
#!/usr/bin/env python3
# lineFilter
# <NAME> 2020-10-03
#
# Take lines from STDIN, filter out lines, and print remaining lines on STDOUT.
# Run like:
# cat foo.txt | python lineFilter.py fileOfRegexs > bar.txt
#
# mypy --ignore-missing-imports lineFilter.py
# Standard library
import argparse
import re
import sys
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, \
Tuple, Union, cast
# git clone https://github.com/DaveMcEwan/dmppl.git && pip install -e ./dmppl
from dmppl.base import run, verb, dbg, \
rdLines
__version__ = "0.1.0"
# {{{ argparser
argparser = argparse.ArgumentParser(
description = \
("Take lines from STDIN,"
" filter out lines according to a set of regexs,"
" then print remaining lines on STDOUT."),
formatter_class = argparse.ArgumentDefaultsHelpFormatter
)
argparser.add_argument("-t", "--expand-tabs",
action="store_true",
help="Expand tab characters in input to 1..8 spaces each.")
argparser.add_argument("-s", "--deduplicate-spaces",
action="store_true",
help="Deduplicate spaces in input. Applied after optional tab expansion.")
argparser.add_argument("-l", "--left-strip",
action="store_true",
help="Remove leading whitespace from input lines.")
argparser.add_argument("-r", "--right-strip",
action="store_true",
help="Remove trailing whitespace from input lines.")
argparser.add_argument("-c", "--case-fold",
action="store_true",
help="Convert input lines to lower case.")
argparser.add_argument("-i", "--invert-match",
action="store_true",
help="Print lines which *do* match a filter.")
argparser.add_argument("filterFile",
type=str,
nargs='?',
default="lineFilter.regex",
help="Text file containing one regex per line."
" Input lines matching any given regex are filtered from output."
" Lines beginning with '#' are ignored.")
# }}} argparser
def main(args) -> int: # {{{
'''
1. Read in all regexs and precompile filters into memory.
2. Read STDIN line by line.
3. If line does not match any regex then print on STDOUT.
'''
verb("Reading and compiling regex filters ...", end='')
regexLines:Iterable = \
rdLines(args.filterFile,
commentLines=True,
commentMark='#',
expandTabs=True,
deduplicateSpaces=True,
leftStrip=True,
rightStrip=True,
caseFold=False,
raiseIOError=True)
regexes:List = [re.compile(line) for line in regexLines if len(line) > 0]
verb("Done")
verb("Opening STDIN with optional whitespace preprocessing ...", end='')
inputLines:Iterable = \
rdLines(None, # STDIN
commentLines=False,
expandTabs=args.expand_tabs,
deduplicateSpaces=args.deduplicate_spaces,
leftStrip=args.left_strip,
rightStrip=args.right_strip,
caseFold=args.case_fold)
verb("Done")
verb("Filtering ...", end='')
for line in inputLines:
reMatch:bool = any(r.search(line) for r in regexes)
if reMatch == args.invert_match:
print(line, end='')
verb("Done")
return 0
# }}} def main
def entryPoint(argv=sys.argv):
return run(__name__, argv=argv)
if __name__ == "__main__":
sys.exit(entryPoint())
| StarcoderdataPython |
1666542 | # Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains resolvers for different attributes of Action in aquery output."""
import copy
class DepSetResolver(object):
"""Utility class to resolve the dependency nested set."""
def __init__(self, dep_set_of_files, artifact_id_to_path):
self.dep_set_to_artifact_ids = {}
self.id_to_dep_set = {dep_set.id: dep_set for dep_set in dep_set_of_files}
self.artifact_id_to_path = artifact_id_to_path
def resolve(self, dep_set):
"""Given a dep set, return the flattened list of input artifact ids.
Args:
dep_set: the dep set object to be resolved.
Returns:
The flattened list of input artifact ids.
"""
if dep_set.id in self.dep_set_to_artifact_ids:
return self.dep_set_to_artifact_ids[dep_set.id]
artifact_ids = copy.copy([
self.artifact_id_to_path[artifact_id]
for artifact_id in dep_set.direct_artifact_ids
])
for transitive_dep_set_id in dep_set.transitive_dep_set_ids:
artifact_ids.extend(
self.resolve(self.id_to_dep_set[transitive_dep_set_id]))
self.dep_set_to_artifact_ids[dep_set.id] = artifact_ids
return self.dep_set_to_artifact_ids[dep_set.id]
| StarcoderdataPython |
41457 | <reponame>joseangelooliveira-br/Python3
from time import sleep
n1 = int(input('Primeiro valor:'))
n2 = int(input('Segundo valor:'))
opcao = 0
while opcao != 5:
print('''
[1] somar
[2] Multiplicar
[3] Maior
[4] Novos números
[5] Sair do programa.''')
opcao = int(input('Qual é a sua opção? '))
if opcao == 1:
soma = n1 + n2
print('A soma entre {} e {} é {}'.format(n1, n2, soma))
elif opcao == 2:
multi = n1 * n2
print('A multiplicação entre {} e {} é {}'.format(n1, n2, multi))
elif opcao == 3:
if n1 > n2:
maior = n1
else:
maior = n2
print('Entre {} e {} o maior valor é {}'.format(n1, n2, maior))
elif opcao == 4:
print('Informe os números novamente:')
n1 = int(input('Primeiro valor:'))
n2 = int(input('Segundo valor:'))
elif opcao == 5:
print('Finalizando.....')
else:
print('Opção inválida. Tente Novamente!')
print('=-=' * 10)
sleep(2)
print('Fim do Programa! Volte Sempre.') | StarcoderdataPython |
1619470 | <filename>tests/test_core.py
import os
import rail
import pytest
import pickle
import numpy as np
from types import GeneratorType
from rail.core.stage import RailStage
from rail.core.data import DataStore, DataHandle, TableHandle, Hdf5Handle, PqHandle, QPHandle, ModelHandle, FlowHandle
from rail.core.utilStages import ColumnMapper, RowSelector, TableConverter
#def test_data_file():
# with pytest.raises(ValueError) as errinfo:
# df = DataFile('dummy', 'x')
def test_util_stages():
DS = RailStage.data_store
raildir = os.path.dirname(rail.__file__)
datapath = os.path.join(raildir, '..', 'tests', 'data', 'test_dc2_training_9816.pq')
data = DS.read_file('data', TableHandle, datapath)
table_conv = TableConverter.make_stage(name='conv', output_format='numpyDict')
col_map = ColumnMapper.make_stage(name='col_map', columns={})
row_sel = RowSelector.make_stage(name='row_sel', start=1, stop=15)
with pytest.raises(KeyError) as errinfo:
table_conv.get_handle('nope', allow_missing=False)
conv_data = table_conv(data)
mapped_data = col_map(data)
sel_data = row_sel(mapped_data)
row_sel_2 = RowSelector.make_stage(name='row_sel_2', start=1, stop=15)
row_sel_2.set_data('input', mapped_data.data)
handle = row_sel_2.get_handle('input')
row_sel_3 = RowSelector.make_stage(name='row_sel_3', input=handle.path, start=1, stop=15)
row_sel_3.set_data('input', None, do_read=True)
def do_data_handle(datapath, handle_class):
DS = RailStage.data_store
raildir = os.path.dirname(rail.__file__)
th = handle_class('data', path=datapath)
with pytest.raises(ValueError) as errinfo:
th.write()
assert not th.has_data
with pytest.raises(ValueError) as errinfo:
th.write_chunk(0, 1)
assert th.has_path
assert th.is_written
data = th.read()
data2 = th.read()
assert data is data2
assert th.has_data
assert th.make_name('data') == f'data.{handle_class.suffix}'
th2 = handle_class('data2', data=data)
assert th2.has_data
assert not th2.has_path
assert not th2.is_written
with pytest.raises(ValueError) as errinfo:
th2.open()
with pytest.raises(ValueError) as errinfo:
th2.write()
with pytest.raises(ValueError) as errinfo:
th2.write_chunk(0, 1)
assert th2.make_name('data2') == f'data2.{handle_class.suffix}'
assert str(th)
assert str(th2)
return th
def test_pq_handle():
raildir = os.path.dirname(rail.__file__)
datapath = os.path.join(raildir, '..', 'tests', 'data', 'test_dc2_training_9816.pq')
handle = do_data_handle(datapath, PqHandle)
pqfile = handle.open()
assert pqfile
assert handle.fileObj is not None
handle.close()
assert handle.fileObj is None
def test_qp_handle():
raildir = os.path.dirname(rail.__file__)
datapath = os.path.join(raildir, '..', 'tests', 'data', 'output_BPZ_lite.fits')
handle = do_data_handle(datapath, QPHandle)
qpfile = handle.open()
assert qpfile
assert handle.fileObj is not None
handle.close()
assert handle.fileObj is None
def test_hdf5_handle():
raildir = os.path.dirname(rail.__file__)
datapath = os.path.join(raildir, '..', 'tests', 'data', 'test_dc2_training_9816.hdf5')
handle = do_data_handle(datapath, Hdf5Handle)
with handle.open(mode='r') as f:
assert f
assert handle.fileObj is not None
datapath_chunked = os.path.join(raildir, '..', 'tests', 'data', 'test_dc2_training_9816_chunked.hdf5')
handle_chunked = Hdf5Handle("chunked", handle.data, path=datapath_chunked)
from tables_io.arrayUtils import getGroupInputDataLength, sliceDict, getInitializationForODict
num_rows = len(handle.data['photometry']['id'])
chunk_size = 1000
data = handle.data['photometry']
init_dict = getInitializationForODict(data)
with handle_chunked.open(mode='w') as fout:
for k, v in init_dict.items():
fout.create_dataset(k, v[0], v[1])
for i in range(0, num_rows, chunk_size):
start = i
end = i+chunk_size
if end > num_rows:
end = num_rows
handle_chunked.data = sliceDict(handle.data['photometry'], slice(start, end))
handle_chunked.write_chunk(start, end)
read_chunked = Hdf5Handle("read_chunked", None, path=datapath_chunked)
data_check = read_chunked.read()
assert np.allclose(data['id'], data_check['id'])
os.remove(datapath_chunked)
def test_model_handle():
DS = RailStage.data_store
DS.clear()
raildir = os.path.dirname(rail.__file__)
model_path = os.path.join(raildir, '..', 'examples', 'estimation', 'demo_snn.pkl')
model_path_copy = os.path.join(raildir, '..', 'examples', 'estimation', 'demo_snn_copy.pkl')
mh = ModelHandle("model", path=model_path)
mh2 = ModelHandle("model2", path=model_path)
model1 = mh.read()
model2 = mh2.read()
model3 = mh.open()
assert model1 is model2
assert model2 is model3
mh3 = ModelHandle("model3", path=model_path_copy, data=model1)
with mh3.open(mode='w') as fout:
pickle.dump(obj=mh3.data, file=fout, protocol=pickle.HIGHEST_PROTOCOL)
os.remove(model_path_copy)
def test_flow_handle():
DS = RailStage.data_store
DS.clear()
raildir = os.path.dirname(rail.__file__)
flow_path = os.path.join(raildir, '..', 'examples', 'goldenspike', 'data', 'pretrained_flow.pkl')
flow_path_copy = os.path.join(raildir, '..', 'examples', 'goldenspike', 'data', 'pretrained_flow_copy.pkl')
fh = FlowHandle("flow", path=flow_path)
fh2 = FlowHandle("flow2", path=flow_path)
flow1 = fh.read()
flow2 = fh2.read()
flow3 = fh.open()
assert flow1 is flow2
assert flow2 is flow3
fh3 = FlowHandle("flo3", path=flow_path_copy, data=flow1)
with pytest.raises(NotImplementedError) as errinfo:
fh3.open(mode='w')
fh3.write()
os.remove(flow_path_copy)
def test_data_hdf5_iter():
DS = RailStage.data_store
DS.clear()
raildir = os.path.dirname(rail.__file__)
datapath = os.path.join(raildir, '..', 'tests', 'data', 'test_dc2_training_9816.hdf5')
#data = DS.read_file('data', TableHandle, datapath)
th = Hdf5Handle('data', path=datapath)
x = th.iterator(groupname='photometry', chunk_size=1000)
assert isinstance(x, GeneratorType)
for i, xx in enumerate(x):
assert xx[0] == i*1000
assert xx[1] - xx[0] <= 1000
data = DS.read_file('input', TableHandle, datapath)
cm = ColumnMapper.make_stage(input=datapath, chunk_size=1000,
hdf5_groupname='photometry', columns=dict(id='bob'))
x = cm.input_iterator('input')
assert isinstance(x, GeneratorType)
for i, xx in enumerate(x):
assert xx[0] == i*1000
assert xx[1] - xx[0] <= 1000
def test_data_store():
DS = RailStage.data_store
DS.clear()
DS.__class__.allow_overwrite = False
raildir = os.path.dirname(rail.__file__)
datapath_hdf5 = os.path.join(raildir, '..', 'tests', 'data', 'test_dc2_training_9816.hdf5')
datapath_pq = os.path.join(raildir, '..', 'tests', 'data', 'test_dc2_training_9816.pq')
datapath_hdf5_copy = os.path.join(raildir, '..', 'tests', 'data', 'test_dc2_training_9816_copy.hdf5')
datapath_pq_copy = os.path.join(raildir, '..', 'tests', 'data', 'test_dc2_training_9816_copy.pq')
DS.add_data('hdf5', None, Hdf5Handle, path=datapath_hdf5)
DS.add_data('pq', None, PqHandle, path=datapath_pq)
with DS.open('hdf5') as f:
assert f
data_pq = DS.read('pq')
data_hdf5 = DS.read('hdf5')
DS.add_data('pq_copy', data_pq, PqHandle, path=datapath_pq_copy)
DS.add_data('hdf5_copy', data_hdf5, Hdf5Handle, path=datapath_hdf5_copy)
DS.write('pq_copy')
DS.write('hdf5_copy')
with pytest.raises(KeyError) as errinfo:
DS.read('nope')
with pytest.raises(KeyError) as errinfo:
DS.open('nope')
with pytest.raises(KeyError) as errinfo:
DS.write('nope')
with pytest.raises(TypeError) as errinfo:
DS['nope'] = None
with pytest.raises(ValueError) as errinfo:
DS['pq'] = DS['pq']
with pytest.raises(ValueError) as errinfo:
DS.pq = DS['pq']
assert repr(DS)
DS2 = DataStore(pq=DS.pq)
assert isinstance(DS2.pq, DataHandle)
# pop the 'pq' data item to avoid overwriting file under git control
DS.pop('pq')
DS.write_all()
DS.write_all(force=True)
os.remove(datapath_hdf5_copy)
os.remove(datapath_pq_copy)
| StarcoderdataPython |
1726583 | <filename>scripts/test_microscope.py<gh_stars>10-100
#!/usr/bin/env python3
from time import sleep
from argparse import ArgumentParser
from temscript import Microscope, NullMicroscope, RemoteMicroscope
parser = ArgumentParser()
parser.add_argument("--null", action='store_true', default=False, help="Use NullMicroscope")
parser.add_argument("--remote", type=str, default='', help="Use RemoteMicroscope with given hostname:port")
parser.add_argument("--stage", action='store_true', default=False, help="Test stage movement (defaults to false)")
parser.add_argument("--ccd", action='store_true', default=False, help="Test CCD acquisition (defaults to false)")
parser.add_argument("--all", action='store_true', default=False, help="Perform all optional tests (defaults to false)")
parser.add_argument("--noshow", action='store_true', default=False, help="Don't show anything on UI (only stdout)")
args = parser.parse_args()
if args.null:
print("Starting test of NullMicroscope()")
microscope = NullMicroscope()
elif args.remote:
address = args.remote.split(":", 2)
if len(address) > 1:
address = (address[0], int(address[1]))
else:
address = (address[0], 8080)
print("Starting test of RemoteMicroscope(%s:%d)" % address)
microscope = RemoteMicroscope(address)
else:
print("Starting test of local Microscope()")
microscope = Microscope()
print("Microscope.get_family():", microscope.get_family())
print("Microscope.get_microscope_id():", microscope.get_microscope_id())
print("Microscope.get_version():", microscope.get_version())
print("Microscope.get_voltage():", microscope.get_voltage())
print("Microscope.get_vacuum():", microscope.get_vacuum())
print("Microscope.get_stage_holder():", microscope.get_stage_holder())
print("Microscope.get_stage_status():", microscope.get_stage_status())
print("Microscope.get_stage_limits():", microscope.get_stage_limits())
print("Microscope.get_stage_position():", microscope.get_stage_position())
cameras = microscope.get_cameras()
print("Microscope.get_cameras():", cameras)
for name in cameras.keys():
print("Microscope.get_camera_param(%s):" % name, microscope.get_camera_param(name))
detectors = microscope.get_stem_detectors()
print("Microscope.get_stem_detectors():", detectors)
for name in detectors.keys():
print("Microscope.get_stem_detector_param(%s):" % name, microscope.get_stem_detector_param(name))
# TODO: fails if not STEM
#print("Microscope.get_stem_acquisition_param():", microscope.get_stem_acquisition_param())
# Test invalid camera
try:
microscope.get_camera_param('ThereIsNoCameraWithThisName')
except KeyError:
print("Microscope.get_camera_param() fails with KeyError: YES")
else:
print("Microscope.get_camera_param() fails with KeyError: NO")
print("Microscope.get_image_shift():", microscope.get_image_shift())
print("Microscope.get_beam_shift():", microscope.get_beam_shift())
print("Microscope.get_beam_tilt():", microscope.get_beam_tilt())
print("Microscope.get_projection_sub_mode():", microscope.get_projection_sub_mode())
print("Microscope.get_projection_mode():", microscope.get_projection_mode())
print("Microscope.get_projection_mode_string():", microscope.get_projection_mode_string())
print("Microscope.get_magnification_index():", microscope.get_magnification_index())
print("Microscope.get_indicated_camera_length():", microscope.get_indicated_camera_length())
print("Microscope.get_indicated_magnification():", microscope.get_indicated_magnification())
print("Microscope.get_defocus():", microscope.get_defocus())
print("Microscope.get_objective_excitation():", microscope.get_objective_excitation())
print("Microscope.get_intensity():", microscope.get_intensity())
print("Microscope.get_objective_stigmator():", microscope.get_objective_stigmator())
print("Microscope.get_condenser_stigmator():", microscope.get_condenser_stigmator())
print("Microscope.get_diffraction_shift():", microscope.get_diffraction_shift())
print("Microscope.get_intensity():", microscope.get_intensity())
print("Microscope.get_screen_current():", microscope.get_screen_current())
print("Microscope.get_screen_position():", microscope.get_screen_position())
print("Microscope.get_state():", microscope.get_state())
if args.stage or args.all:
print("Testing stage movement:")
pos = microscope.get_stage_position()
new_x = 10e-6 if pos['x'] < 0 else -10e-6
microscope.set_stage_position(x=new_x)
for n in range(5):
print("\tstatus=%s, position=%s" % (microscope.get_stage_status(), microscope.get_stage_position()))
sleep(0.1)
pos = microscope.get_stage_position()
new_y = 10e-6 if pos['y'] < 0 else -10e-6
new_x = 10e-6 if pos['x'] < 0 else -10e-6
microscope.set_stage_position({'y': new_y}, x=new_x)
for n in range(5):
print("\tstatus=%s, position=%s" % (microscope.get_stage_status(), microscope.get_stage_position()))
sleep(0.1)
pos = microscope.get_stage_position()
new_y = 10e-6 if pos['y'] < 0 else -10e-6
microscope.set_stage_position(y=new_y, speed=0.5)
for n in range(5):
print("\tstatus=%s, position=%s" % (microscope.get_stage_status(), microscope.get_stage_position()))
sleep(0.1)
if cameras and (args.ccd or args.all):
ccd = list(cameras.keys())[0]
print("Testing camera '%s'" % ccd)
param = microscope.get_camera_param(ccd)
print("\tinitial camera_param(%s):" % ccd, param)
exposure = 1.0 if param["exposure(s)"] != 1.0 else 2.0
microscope.set_camera_param(ccd, {"exposure(s)": exposure})
param = microscope.get_camera_param(ccd)
print("\tupdated camera_param(%s):" % ccd, param)
print("\tacquiring image...")
images = microscope.acquire(ccd)
print("\t\tshape:", images[ccd].shape)
print("\t\tdtype:", images[ccd].dtype)
if not args.noshow:
import matplotlib.pyplot as plt
plt.imshow(images[ccd], cmap="gray")
plt.show()
| StarcoderdataPython |
3319024 | import numpy as np
from qubitLib import *
class Oracle:
def __init__(self):
self.qstrings=list()
self.score=np.inf
def setScore(self,score):
self.score=score
def initialization(self,n,m):
self.stringNum=n
self.stringSize=m
# create n qubit strings with m qubits each
for i in range(0,n):
self.qstrings.append(qubitString(m))
def collapse(self):
# make observation and collapse the state of all qubits of all strings
for i in range(0,self.stringNum):
self.qstrings[i].collapse()
def getFloatArrays(self):
# returns 1-dim numpy.array with all string values in row as float
res=list()
for i,j in enumerate(self.qstrings):
res.append(j.floatVal)
return np.array(res)
def getIntArrays(self):
# returns 1-dim numpy.array with all string values in row as integer
res=list()
for i,j in enumerate(self.qstrings):
res.append(j.decVal)
return np.array(res)
def setIntArrays(self,intArray):
# receives 1-dim numpy.array with all string values in row
if type(intArray) is not type(np.ndarray([])):
raise Exception("Input not of type numpy.array.")
intArray=np.around(intArray).astype(int)
for i,j in enumerate(self.qstrings):
j.updateDec(intArray[i])
def QuantumGateStep(self,otherOracle):
if type(otherOracle) != type(Oracle()):
raise Exception("otherOracle not of Oracle type.")
# for each qubit string
for qs in range(0,self.stringNum):
# for each qubit
for qb in range(0,self.stringSize):
# check if self qubit's value is equal to other qubit's value
if self.qstrings[qs].quString[qb].value == otherOracle.qstrings[qs].quString[qb].value:
continue
else: # if it's not, apply the Variable Angle Distance Quantum Rotation
self.qstrings[qs].quString[qb].varAngleDist(otherOracle.qstrings[qs].quString[qb]) | StarcoderdataPython |
136089 | <filename>lib.py
from requests import get, post
from datetime import datetime
from pprint import pprint
from time import time
from os import environ
KEY = environ["MOODLE_API_KEY"]
URL = environ["MOODLE_URL"]
ENDPOINT = "/webservice/rest/server.php"
def rest_api_parameters(in_args, prefix='', out_dict=None):
"""Transform dictionary/array structure to a flat dictionary, with key names
defining the structure.
Adapted from https://github.com/mrcinv/moodle_api.py
Example usage:
>>> rest_api_parameters({'courses':[{'id':1,'name': 'course1'}]})
{'courses[0][id]':1,
'courses[0][name]':'course1'}
"""
if out_dict == None:
out_dict = {}
if not type(in_args) in (list,dict):
out_dict[prefix] = in_args
return out_dict
if prefix == "":
prefix = prefix + '{0}'
else:
prefix = prefix + '[{0}]'
if type(in_args) == list:
for idx, item in enumerate(in_args):
rest_api_parameters(item, prefix.format(idx), out_dict)
elif type(in_args) == dict:
for key, item in in_args.items():
rest_api_parameters(item, prefix.format(key), out_dict)
return out_dict
def call(fname, **kwargs):
"""Calls moodle API function with function name fname and keyword arguments.
Adapted from https://github.com/mrcinv/moodle_api.py
Example:
>>> call_mdl_function('core_course_update_courses',
courses = [{'id': 1, 'fullname': 'My favorite course'}])
"""
parameters = rest_api_parameters(kwargs)
parameters.update({"wstoken": KEY, "moodlewsrestformat": "json", "wsfunction": fname})
response = post(URL+ENDPOINT, parameters)
response = response.json()
if type(response) == dict and response.get("exception"):
raise SystemError("Error calling Moodle API\n", response)
return response
def get_data():
calendar_data = call("core_calendar_get_calendar_upcoming_view")["events"]
return {"timestamp": time(), "values": [
{
"name": x["name"],
"timestart": x["timestart"],
"timemodified": x["timemodified"],
"islastday": x["islastday"]
} for x in calendar_data if x["course"]["id"] == 6000000053]}
def get_message(data):
result = ""
for x in data["values"]:
timestart = datetime.fromtimestamp(x["timestart"]).strftime("%d/%m/%Y, %H:%M:%S")
timemodified = datetime.fromtimestamp(x["timemodified"]).strftime("%d/%m/%Y, %H:%M:%S")
result += "*" + x["name"] + "*:\n " + timestart + ", שונה לאחרונה ב " + timemodified + "\n"
return result
| StarcoderdataPython |
145358 | from app.sensors.sensor_factory import build_sensors, provide_sensors
from app.sensors.sensor_manager import SensorManager
from app.sensors.sensor import Sensor
| StarcoderdataPython |
127941 | import os
from setuptools import find_packages, setup
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
long_description = readme.read()
with open(os.path.join(os.path.dirname(__file__), 'requirements.txt')) as f:
requirements = f.read().splitlines()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
def package_version():
from mxio.version import get_version
return get_version()
setup(
name='mxio',
version=package_version(),
packages=find_packages(),
url='https://github.com/michel4j/mxio',
include_package_data=True,
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description='A Simple MX Diffraction Image Library',
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=requirements,
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.7',
],
)
| StarcoderdataPython |
1791805 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
import sys
import time
import codecs
from logging import FileHandler
import logging.config
basedir = os.path.abspath(os.path.dirname(__file__))
logdir = os.path.join(basedir, 'logs')
logini_path = os.path.join(basedir, 'log.ini')
if not os.path.exists(logdir):
os.mkdir(logdir)
class SafeFileHandler(FileHandler):
def __init__(self, filename, mode, encoding=None, delay=0):
"""
Use the specified filename for streamed logging
"""
if codecs is None:
encoding = None
FileHandler.__init__(self, filename, mode, encoding, delay)
self.mode = mode
self.encoding = encoding
self.suffix = "%Y-%m-%d"
self.suffix_time = ""
self.delay = delay
def emit(self, record):
"""
Emit a record.
Always check time
"""
try:
if self.check_baseFilename(record):
self.build_baseFilename()
FileHandler.emit(self, record)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def check_baseFilename(self, record):
"""
Determine if builder should occur.
record is not used, as we are just comparing times,
but it is needed so the method signatures are the same
"""
timeTuple = time.localtime()
if (self.suffix_time != time.strftime(self.suffix, timeTuple) or not
os.path.exists(self.baseFilename+'.'+self.suffix_time)):
return 1
else:
return 0
def build_baseFilename(self):
"""
do builder; in this case,
old time stamp is removed from filename and
a new time stamp is append to the filename
"""
if self.stream:
self.stream.close()
self.stream = None
# remove old suffix
if self.suffix_time != "":
index = self.baseFilename.find("."+self.suffix_time)
if index == -1:
index = self.baseFilename.rfind(".")
self.baseFilename = self.baseFilename[:index]
# add new suffix
currentTimeTuple = time.localtime()
self.suffix_time = time.strftime(self.suffix, currentTimeTuple)
self.baseFilename = self.baseFilename + "." + self.suffix_time
self.mode = 'a'
if not self.delay:
self.stream = self._open()
logging.config.fileConfig(logini_path)
logger = logging.getLogger('metadataserver')
# taken from https://stackoverflow.com/questions/6234405/logging-uncaught-exceptions-in-python
def handle_exception(exc_type, exc_value, exc_traceback):
if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
else:
logger.error("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
sys.excepthook = handle_exception
| StarcoderdataPython |
97806 | """Helper function to setup the run from the command line.
Adapted from https://github.com/atomistic-machine-learning/schnetpack/blob/dev/src/schnetpack/utils/script_utils/setup.py
"""
import os
import logging
from shutil import rmtree
from nff.utils.tools import to_json, set_random_seed, read_from_json
__all__ = ["setup_run"]
def setup_run(args):
argparse_dict = vars(args)
jsonpath = os.path.join(args.model_path, "args.json")
# absolute paths
argparse_dict['data_path'] = os.path.abspath(argparse_dict['data_path'])
argparse_dict['model_path'] = os.path.abspath(argparse_dict['model_path'])
if args.mode == "train":
if args.overwrite and os.path.exists(args.model_path):
logging.info("existing model will be overwritten...")
rmtree(args.model_path)
if not os.path.exists(args.model_path):
os.makedirs(args.model_path)
to_json(jsonpath, argparse_dict)
set_random_seed(args.seed)
train_args = args
else:
train_args = read_from_json(jsonpath)
return train_args
| StarcoderdataPython |
151922 | <filename>sublimetext/FSharp/project.py
import sublime
import sublime_plugin
from queue import Queue
import queue
from threading import Thread
from zipfile import ZipFile
import os
from FSharp.lib import const
from FSharp.lib.fsac import get_server
from FSharp.lib import fs
tasks = Queue()
task_results = Queue()
SIG_QUIT = '<<QUIT>>'
def plugin_loaded():
"""
Initializes plugin.
"""
# Install binaries if needed.
if not installation.check_binaries():
installation.install_binaries()
print('FSharp: Binaries installed. Everything ok.')
else:
print('FSharp: Binaries found. Everything ok.')
# Start the pipe server.
AsyncPipe()
def plugin_unloaded():
tasks.put((SIG_QUIT, ()))
class AsyncPipe(object):
"""
Wraps the fsac server to make it asynchronous.
"""
def __init__(self):
self.server = get_server()
self.tasks = tasks
writer = Thread(target=self.write, daemon=True)
reader = Thread(target=self.read, daemon=True)
writer.start()
reader.start()
def write(self):
while True:
action, args = self.tasks.get()
method = getattr(self.server, action, None)
if self.server.proc.poll() is not None:
print("FSharp: Server process unavailable. "
"Exiting writer thread.")
break
if not method:
process_output({'Kind': 'ERROR', 'Data': 'Not a valid call.'})
continue
if action == SIG_QUIT:
# Give the other thread a chance to exit.
self.tasks.put((action, args))
break
# Write to server's stdin.
method(*args)
def read(self):
while True:
output = self.server.read_line()
if output['Kind'] == 'completion':
task_results.put(output)
else:
process_output(output)
if self.server.proc.poll() is not None:
print("FSharp: Server process unavailable. "
"Exiting reader thread.")
break
try:
# Don't block here so we can read all the remaining output.
action, args = self.tasks.get(timeout=0.01)
except:
continue
if action == SIG_QUIT:
# Give the other thread a chance to exit.
self.tasks.put((action, args))
break
self.tasks.put((action, args))
class actions:
"""
Groups methods that process data received from the autocomplete server.
"""
@staticmethod
def generic_action(data=None):
sublime.status_message("RECEIVED: " + str(data))
print("RECEIVED: " + str(data))
@staticmethod
def show_info(data):
print(data)
@staticmethod
def find_declaration(data):
data = data['Data']
fname = data['File']
row = data['Line'] + 1
col = data['Column'] + 1
encoded = "{0}:{1}:{2}".format(fname, row, col)
sublime.active_window().open_file(encoded, sublime.ENCODED_POSITION)
@staticmethod
def declarations(data):
decls = data['Data']
print(decls)
@staticmethod
def show_completions(data):
v = sublime.active_window().active_view()
v.show_popup_menu(data['Data'], None)
@staticmethod
def show_tooltip(data):
v = sublime.active_window().active_view()
v.show_popup_menu([line for line in data['Data'].split('\n') if line],
None)
class requests:
@staticmethod
def parse(view):
tasks.put(('parse', (view.file_name(), True)))
@staticmethod
def completions(view):
requests.parse(view)
row, col = view.rowcol(view.sel()[0].b)
tasks.put(('completions', (view.file_name(), row, col)))
@staticmethod
def declarations(view):
requests.parse(view)
tasks.put(('declarations', (view.file_name(),)))
@staticmethod
def tooltip(view):
requests.parse(view)
row, col = view.rowcol(view.sel()[0].b)
tasks.put(('tooltip', (view.file_name(), row, col)))
def process_output(data):
action = None
if data['Kind'] == 'completion':
raise ValueError('completion results should be handled in a different way')
elif data['Kind'] == 'tooltip':
action = actions.show_tooltip
elif data['Kind'] == 'INFO':
action = actions.show_info
elif data['Kind'] == 'finddecl':
action = actions.find_declaration
elif data['Kind'] == 'declarations':
action = actions.declarations
elif data['Kind'] == 'project':
for fname in data['Data']:
tasks.put(('parse', (fname, True)))
else:
action = actions.generic_action
if action:
# Run action on the main UI thread to make ST happy.
sublime.set_timeout(lambda: action(data), 0)
class installation:
@staticmethod
def check_binaries():
print('FSharp: Checking installed files')
return os.path.exists(const.path_to_fs_ac_binary())
@staticmethod
def install_binaries():
print('FSharp: Installing files to Packages/FSharp_Binaries...')
sublime.status_message('FSharp: Installing files to Packages/FSharp_Binaries...')
try:
os.mkdir(const.path_to_fs_binaries())
except IOError:
pass
zipped_bytes = sublime.load_binary_resource('Packages/FSharp/bundled/fsautocomplete.zip')
target = os.path.join(const.path_to_fs_binaries(), 'fsautocomplete.zip')
with open(target, 'wb') as f:
f.write(zipped_bytes)
with open(target, 'rb') as f:
ZipFile(f).extractall(path=const.path_to_fs_binaries())
os.unlink(target)
class FsSetProjectFile(sublime_plugin.WindowCommand):
def is_enabled(self):
v = self.window.active_view()
if v and fs.is_fsharp_project(v.file_name()):
return True
msg = 'FSharp: Not a project file.'
print(msg)
sublime.status_message(msg)
return False
def run(self):
v = self.window.active_view()
sublime.status_message('FSharp: Loading project...')
tasks.put(('project', (v.file_name(),)))
class FsGetTooltip(sublime_plugin.WindowCommand):
def is_enabled(self):
v = self.window.active_view()
if v and fs.is_fsharp_code(v.file_name()):
return True
msg = 'FSharp: Not an F# code file.'
print(msg)
sublime.status_message(msg)
return False
def run(self):
v = self.window.active_view()
requests.tooltip(v)
class FsDeclarations(sublime_plugin.WindowCommand):
def is_enabled(self):
v = self.window.active_view()
if v and fs.is_fsharp_code(v.file_name()):
return True
msg = 'FSharp: Not an F# code file.'
print(msg)
sublime.status_message(msg)
return False
def run(self):
v = self.window.active_view()
requests.declarations(v)
class FsStEvents(sublime_plugin.EventListener):
def on_query_completions(self, view, prefix, locations):
if not fs.is_fsharp_code(view.file_name()):
return []
while not task_results.empty():
task_results.get()
# A request for completions is treated especially: the result will
# be published to a queue.
requests.completions(view)
completions = []
try:
completions = task_results.get(timeout=0.2)
completions = completions['Data']
except queue.Empty:
# Too bad. The daemon was too slow.
pass
# TODO: Necessary? (It seems so.)
flags = (sublime.INHIBIT_EXPLICIT_COMPLETIONS |
sublime.INHIBIT_WORD_COMPLETIONS)
return [[c, c] for c in completions], flags
| StarcoderdataPython |
3269636 | import sys
import re
with open(sys.argv[1],'r') as test_cases:
for test in test_cases:
a,b = test.split(':')
nums = [x for x in a.split()]
elemets = re.findall('\d+',b)
for i in xrange(0,len(elemets),2):
nums[int(elemets[i])] , nums[int(elemets[i+1])] = nums[int(elemets[i+1])] , nums[int(elemets[i])]
print ' '.join(nums)
| StarcoderdataPython |
1647108 | <filename>src/compas/viewers/core/__init__.py
from .drawing import *
from .arrow import *
from .axes import *
from .camera import *
from .grid import *
from .mouse import *
from .slider import *
from .colorbutton import *
from .glwidget import *
from .controller import *
from .textedit import *
from .buffers import *
from .app import *
from .drawing import __all__ as a
from .arrow import __all__ as b
from .axes import __all__ as c
from .camera import __all__ as d
from .grid import __all__ as e
from .mouse import __all__ as f
from .slider import __all__ as g
from .colorbutton import __all__ as h
from .glwidget import __all__ as i
from .controller import __all__ as j
from .app import __all__ as k
from .buffers import __all__ as l
from .textedit import __all__ as m
__all__ = a + b + c + d + e + f + g + h + i + j + l + m
__all__ += k
| StarcoderdataPython |
3251808 | # Standard Imports
from fastapi import APIRouter
from fastapi import HTTPException
from fastapi import Depends
from fastapi import Path, Query
# Database Import
from app.db.engine import get_db
# Typing Imports
from sqlalchemy.orm import Session
from typing import Optional
# Exception Imports
from sqlalchemy.exc import IntegrityError
from sqlalchemy_filters.exceptions import InvalidPage
from ...utils.exceptions import ItensNotFound
from ...utils.exceptions import InvalidPageItemsNumber
# Authentication Imports
from ..users.models import User
from app.core.auth import manager
# User Schemas
from .services import ProviderService
from .schemas import ProviderCreate
from .schemas import ProviderUpdate
from .schemas import ProviderResponse
from .schemas import ProvidersResponse
route = APIRouter()
provider_service = ProviderService()
@route.get("/providers/", response_model_exclude_unset=True, response_model=ProvidersResponse)
def get_all_providers(db: Session = Depends(get_db), auth_user: User=Depends(manager), name: Optional[str] = ''):
"""
## Retrieve all providers.
### Args:
> id (int): The provider ID.
> name (str): Provider name to filter.
### Returns:
> ProvidersResponse: A dict with providers records.
"""
try:
providers = provider_service.fetch_all(db, name)
return providers
except ItensNotFound:
raise HTTPException(status_code=404, detail="Nenhum fornecedor foi encontrado.")
@route.get("/providers/page/{page}", response_model=ProvidersResponse)
def get_all_providers_in_current_page(page: int = Path(..., gt=0), per_page: int = Query(default=20, gt=0),
name: Optional[str] = '', db: Session = Depends(get_db), auth_user: User=Depends(manager)):
"""
## Retrieve all providers in current page.
### Args:
> page (int): Page to fetch.
> per_page (int): Amount of providers per page.
> name (str): Provider name to filter.
### Returns:
> ProvidersResponse: A dict with providers records and pagination metadata.
"""
try:
providers = provider_service.fetch_all_with_pagination(db, page, per_page, name)
return providers
except InvalidPage:
raise HTTPException(status_code=400, detail="Não foi possivel recuperar os itens na página informada.")
except InvalidPageItemsNumber:
raise HTTPException(status_code=400, detail="Quantidade de itens por pagina precisa ser maior que zero.")
except ItensNotFound:
raise HTTPException(status_code=404, detail="Nenhum fornecedor foi encontrado.")
@route.get("/providers/{id}", response_model=ProviderResponse)
def get_one_provider(id: int, db: Session = Depends(get_db), auth_user: User=Depends(manager)):
"""
## Retrieve one provider.
### Args:
> id (int): The provider ID.
### Raises:
> HTTPException: Raises 404 if provider was not found.
### Returns:
> ProviderResponse: The provider response model.
"""
provider = provider_service.fetch(db, id)
if not provider:
raise HTTPException(status_code=404, detail=f"Fornecedor de id {id} não foi encontrado.")
return provider
@route.post("/providers/", status_code=201, response_model=ProviderResponse)
def create_provider(provider: ProviderCreate, db: Session = Depends(get_db), auth_user: User=Depends(manager)):
"""
## Creates a provider.
### Args:
> provider (ProviderCreate): The provider create model.
### Returns:
> ProviderResponse: The provider response model.
"""
try:
provider = provider_service.create(db, auth_user, provider)
return provider
except IntegrityError as err:
if "cnpj" in repr(err):
raise HTTPException(status_code=422, detail="Já existe um fornecedor com o CNPJ informado cadastrado.")
@route.patch("/providers/{id}", response_model=ProviderResponse)
def update_provider(id: int, provider: ProviderUpdate, db: Session = Depends(get_db), auth_user: User=Depends(manager)):
"""
## Edits a provider by id.
### Args:
> id (int): The provider ID.
> provider (ProviderUpdate): The provider update model.
### Raises:
> HTTPException: Raises 404 if provider was not found.
### Returns:
> ProviderResponse: The provider response model.
"""
provider = provider_service.update(db, id, provider)
if not provider:
raise HTTPException(status_code=404, detail=f"Fornecedor de id {id} não foi encontrado.")
return provider
@route.delete("/providers/{id}", response_model=ProviderResponse)
def delete_provider(id: int, db: Session = Depends(get_db), auth_user: User=Depends(manager)):
"""
## Deletes a provider by id.
### Args:
> id (int): The provider ID.
### Raises:
> HTTPException: Raises 404 if provider was not found.
### Returns:
> UserResponse: The user response model.
"""
provider = provider_service.delete(db, id)
if not provider:
raise HTTPException(status_code=404, detail=f"Fornecedor de id {id} não foi encontrado.")
return provider | StarcoderdataPython |
1794839 | """
Code illustration: 9.06
Weather reporter
Tkinter GUI Application Development Blueprints
"""
import sys
import json
import datetime
from tkinter import Tk, Canvas, Entry, Button, Frame, Label, StringVar, ALL
from tkinter import ttk
from tkinter import messagebox
import urllib.request
import urllib.parse
class WeatherReporter:
weather_data = None
APIKEY = 'ENTER_YOUR_API_KEY_HERE'
def __init__(self, root):
self.root = root
self.create_top_frame()
self.create_weather_display_frame()
def create_top_frame(self):
frame = Frame(self.root)
frame.pack(side="top")
Label(frame, text='Enter Location').pack(side="left")
self.location = StringVar()
Entry(frame, textvariable=self.location).pack(side="left")
ttk.Button(frame, text='Go', command=self.on_show_weather_button_clicked).pack(
side="left")
def create_weather_display_frame(self):
self.canvas = Canvas(
self.root, height='425', width='340', background='black')
self.canvas.create_rectangle(10, 10, 330, 415, fill='#F6AF06')
self.canvas.pack(side="bottom")
def on_show_weather_button_clicked(self):
if not self.location.get():
return
self.clear_canvas()
self.get_weather_data()
self.format_data()
self.display_data()
def get_weather_data(self):
self.weather_data = self.get_data_from_url()
self.weather_data = self.json_to_dict(self.weather_data)
def clear_canvas(self):
self.canvas.delete(ALL)
self.canvas.create_rectangle(10, 10, 330, 415, fill='#F6AF06')
def format_data(self):
data = self.weather_data
self.name = data['name']
self.latitude = self.str2num(data['lat'], 3)
self.longitude = self.str2num(data['lon'], 3)
self.country = data['country']
self.time_now = self.time_stamp_to_data(data['dt'])
self.description = data['description']
self.icon_name = "weatherimages/{}.png".format(data['icon'].lower())
self.clouds = data['all'] + ' %'
self.sunrise_time = self.time_stamp_to_time(data['sunrise'])
self.sunset_time = self.time_stamp_to_time(data['sunset'])
self.temp_now_in_celcius = self.str2num(
self.kelvin_to_celsius(float(data['temp'])), 2) + u' \u2103'
self.temp_now_in_fahrenheit = self.str2num(
self.kelvin_to_fahrenheit(float(data['temp'])), 2) + u' \u2109'
self.temp_min_in_celcius = self.str2num(
self.kelvin_to_celsius(float(data['temp_min'])), 2) + u' \u2103'
self.temp_max_in_celcius = self.str2num(
self.kelvin_to_celsius(float(data['temp_max'])), 2) + u' \u2103'
def kelvin_to_celsius(self, k):
return k - 273.15
def kelvin_to_fahrenheit(self, k):
return (k * 9 / 5 - 459.67)
def str2num(self, string, precision):
return "%0.*f" % (precision, float(string))
def display_data(self):
if not self.weather_data:
messagebox.showerror(
'Name not found', 'Unable to fetch record - Name not found')
return
data = self.weather_data
opts = {'fill': 'white', 'font': 'Helvetica 12'}
self.canvas.create_text(52, 30, text=self.name, **opts)
self.canvas.create_text(
245, 35, text='Latitude :' + self.latitude, **opts)
self.canvas.create_text(
245, 53, text='Longitude: ' + self.longitude, **opts)
self.canvas.create_text(
55, 50, text='Country : ' + self.country, **opts)
self.canvas.create_text(155, 80, text=self.time_now, **opts)
self.canvas.create_text(85, 105, text='NOW', **opts)
self.img = PhotoImage(file=self.icon_name)
self.canvas.create_image(140, 105, image=self.img)
self.canvas.create_text(240, 105, text=self.description, **opts)
self.canvas.create_text(85, 155, text='Temperature', **opts)
self.canvas.create_text(
87, 175, text=self.temp_min_in_celcius + ' ~ ' + self.temp_max_in_celcius, **opts)
self.canvas.create_text(
225, 140, text=self.temp_now_in_celcius, **opts)
self.canvas.create_text(
225, 180, text=self.temp_now_in_fahrenheit, **opts)
self.canvas.create_text(95, 215, text='Relative Humidity', **opts)
self.canvas.create_text(198, 215, text=data['humidity'] + ' %', **opts)
self.canvas.create_text(77, 235, text='Wind Speed', **opts)
self.canvas.create_text(205, 235, text=data['speed'] + ' m/s ', **opts)
self.canvas.create_text(80, 255, text='Wind Degree', **opts)
self.canvas.create_text(
223, 255, text=data['deg'] + ' degrees', **opts)
self.canvas.create_text(80, 275, text='Pressure(at.)', **opts)
self.canvas.create_text(
225, 275, text=data['pressure'] + ' millibars', **opts)
if '3h' in data:
self.canvas.create_text(83, 293, text='Rain (Last 3h)', **opts)
self.canvas.create_text(
200, 293, text=data['3h'] + ' mm', **opts) # rain
self.canvas.create_text(58, 310, text='Clouds', **opts)
self.canvas.create_text(200, 310, text=self.clouds, **opts) # clouds
self.canvas.create_text(60, 328, text='Sunrise', **opts)
self.canvas.create_text(200, 328, text=self.sunrise_time, **opts)
self.canvas.create_text(59, 343, text='Sunset', **opts)
self.canvas.create_text(200, 343, text=self.sunset_time, **opts)
self.canvas.create_text(159, 378, text='Powered by:', **opts)
self.canvas.create_text(
159, 398, text='www.openweathermap.org', **opts)
def time_stamp_to_time(self, ts):
return (datetime.datetime.fromtimestamp(int(ts)).strftime('%H:%M:%S'))
def time_stamp_to_data(self, ts):
return (datetime.datetime.fromtimestamp(int(ts)).strftime('%Y-%m-%d %H:%M:%S'))
def get_data_from_url(self):
try:
params = urllib.parse.urlencode(
{'q': self.location.get(), 'APPID': self.APIKEY}, encoding="utf-8")
api_url = (
'http://api.openweathermap.org/data/2.5/weather?{}'
.format(params)
)
with urllib.request.urlopen(api_url) as f:
json_data = f.read()
return json_data
except IOError as e:
messagebox.showerror(
'Unable to connect', 'Unable to connect %s' % e)
sys.exit(1)
def json_to_dict(self, json_data):
decoder = json.JSONDecoder()
decoded_json_data = decoder.decode(json_data.decode("utf-8"))
flattened_dict = {}
for key, value in decoded_json_data.items():
if key == 'weather':
for ke, va in value[0].items():
flattened_dict[str(ke)] = str(va).upper()
continue
try:
for k, v in value.items():
flattened_dict[str(k)] = str(v).upper()
except:
flattened_dict[str(key)] = str(value).upper()
return flattened_dict
def main():
root = Tk()
WeatherReporter(root)
root.mainloop()
if __name__ == '__main__':
main()
| StarcoderdataPython |
146677 | <reponame>dials-src/dials
from __future__ import annotations
from math import pi, sqrt
from dials.array_family import flex # noqa: F401;
from dials_algorithms_profile_model_ellipsoid_ext import * # noqa: F401, F403;
def mosaicity_from_eigen_decomposition(eigen_values):
return (
sqrt(eigen_values[0]) * 180.0 / pi,
sqrt(eigen_values[1]) * 180.0 / pi,
sqrt(eigen_values[2]) * 180.0 / pi,
)
| StarcoderdataPython |
3311013 | <filename>facebook/alienDict.py
from collections import defaultdict
class Solution(object):
def alienOrder(self, words):
map = {}
letters = [0 for i in range(26)]
for i in range(len(words)):
for j in range(len(words[i])):
key=ord(words[i][j])-ord('a')
letters[key]=0
map[key]=set()
for i in range(len(words)-1):
word1 = words[i]
word2 = words[i+1]
idx = 0
for j in range(min(len(word1),len(word2))):
if(word1[j]!=word2[j]):
key1 = ord(word1[j])-ord('a')
key2 = ord(word2[j])-ord('a')
count = letters[key2]
if(key2 not in map[key1]):
letters[key2] =count+1
map[key1].add(key2)
break
dictionary = collections.deque()
res = ''
for i in range(26):
if(letters[i]==0 and i in map):
dictionary.appendleft(i)
while(len(dictionary)!=0):
nextup = dictionary.pop()
res+=(chr(nextup+ord('a')))
greaterSet = map[nextup]
for greater in greaterSet:
letters[greater]-=1
if(letters[greater]==0):
dictionary.appendleft(greater)
if(len(map)!=len(res)):
return ""
return res
def alienOrder1(self, words): # topo sort BFS
# a -> b
adj = defaultdict(set)
# in-degree
deg = {c: 0 for w in words for c in w}
for i, w1 in enumerate(words[:-1]):
w2 = words[i + 1]
for c1, c2 in zip(w1, w2):
if c1 == c2: continue
if c2 not in adj[c1]: deg[c2] += 1
adj[c1].add(c2)
break
res = ''
# start w 0 indegree nodes
q = deque([c for c in deg if not deg[c]])
while q:
c = q.popleft()
res += c
for n in adj[c]:
deg[n] -= 1
if not deg[n]: q.append(n)
return res if len(res) == len(deg) else '' | StarcoderdataPython |
3216717 | # coding: utf-8
"""
Xero Payroll UK
This is the Xero Payroll API for orgs in the UK region. # noqa: E501
OpenAPI spec version: 2.4.0
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
from xero_python.models import BaseModel
class EmployeeLeaveBalance(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"name": "str",
"leave_type_id": "str",
"balance": "float",
"type_of_units": "str",
}
attribute_map = {
"name": "name",
"leave_type_id": "leaveTypeID",
"balance": "balance",
"type_of_units": "typeOfUnits",
}
def __init__(
self, name=None, leave_type_id=None, balance=None, type_of_units=None
): # noqa: E501
"""EmployeeLeaveBalance - a model defined in OpenAPI""" # noqa: E501
self._name = None
self._leave_type_id = None
self._balance = None
self._type_of_units = None
self.discriminator = None
if name is not None:
self.name = name
if leave_type_id is not None:
self.leave_type_id = leave_type_id
if balance is not None:
self.balance = balance
if type_of_units is not None:
self.type_of_units = type_of_units
@property
def name(self):
"""Gets the name of this EmployeeLeaveBalance. # noqa: E501
Name of the leave type. # noqa: E501
:return: The name of this EmployeeLeaveBalance. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this EmployeeLeaveBalance.
Name of the leave type. # noqa: E501
:param name: The name of this EmployeeLeaveBalance. # noqa: E501
:type: str
"""
self._name = name
@property
def leave_type_id(self):
"""Gets the leave_type_id of this EmployeeLeaveBalance. # noqa: E501
The Xero identifier for leave type # noqa: E501
:return: The leave_type_id of this EmployeeLeaveBalance. # noqa: E501
:rtype: str
"""
return self._leave_type_id
@leave_type_id.setter
def leave_type_id(self, leave_type_id):
"""Sets the leave_type_id of this EmployeeLeaveBalance.
The Xero identifier for leave type # noqa: E501
:param leave_type_id: The leave_type_id of this EmployeeLeaveBalance. # noqa: E501
:type: str
"""
self._leave_type_id = leave_type_id
@property
def balance(self):
"""Gets the balance of this EmployeeLeaveBalance. # noqa: E501
The employees current balance for the corresponding leave type. # noqa: E501
:return: The balance of this EmployeeLeaveBalance. # noqa: E501
:rtype: float
"""
return self._balance
@balance.setter
def balance(self, balance):
"""Sets the balance of this EmployeeLeaveBalance.
The employees current balance for the corresponding leave type. # noqa: E501
:param balance: The balance of this EmployeeLeaveBalance. # noqa: E501
:type: float
"""
self._balance = balance
@property
def type_of_units(self):
"""Gets the type_of_units of this EmployeeLeaveBalance. # noqa: E501
The type of the units of the leave. # noqa: E501
:return: The type_of_units of this EmployeeLeaveBalance. # noqa: E501
:rtype: str
"""
return self._type_of_units
@type_of_units.setter
def type_of_units(self, type_of_units):
"""Sets the type_of_units of this EmployeeLeaveBalance.
The type of the units of the leave. # noqa: E501
:param type_of_units: The type_of_units of this EmployeeLeaveBalance. # noqa: E501
:type: str
"""
self._type_of_units = type_of_units
| StarcoderdataPython |
1791246 | import torch
import torch.nn as nn
import torch.nn.functional as F
from .aspp import ASPP_Module
up_kwargs = {'mode': 'bilinear', 'align_corners': False}
norm_layer = nn.BatchNorm2d
class _ConvBNReLU(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0,
dilation=1, groups=1, relu6=False, norm_layer=norm_layer):
super(_ConvBNReLU, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias=False)
self.bn = norm_layer(out_channels)
self.relu = nn.ReLU6(True) if relu6 else nn.ReLU(True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class ASPPPlusHead(nn.Module):
def __init__(self, num_classes, in_channels, norm_layer=norm_layer, up_kwargs=up_kwargs, in_index=[0, 3]):
super(ASPPPlusHead, self).__init__()
self._up_kwargs = up_kwargs
self.in_index = in_index
self.aspp = ASPP_Module(in_channels, [12, 24, 36], norm_layer=norm_layer, up_kwargs=up_kwargs)
self.c1_block = _ConvBNReLU(in_channels // 8, in_channels // 8, 3, padding=1, norm_layer=norm_layer)
self.block = nn.Sequential(
_ConvBNReLU(in_channels // 4, in_channels // 4, 3, padding=1, norm_layer=norm_layer),
nn.Dropout(0.5),
_ConvBNReLU(in_channels // 4, in_channels // 4, 3, padding=1, norm_layer=norm_layer),
nn.Dropout(0.1),
nn.Conv2d(in_channels // 4, num_classes, 1))
def _transform_inputs(self, inputs):
if isinstance(self.in_index, (list, tuple)):
inputs = [inputs[i] for i in self.in_index]
elif isinstance(self.in_index, int):
inputs = inputs[self.in_index]
return inputs
def forward(self, inputs):
inputs = self._transform_inputs(inputs)
c1, x = inputs
size = c1.size()[2:]
c1 = self.c1_block(c1)
x = self.aspp(x)
x = F.interpolate(x, size, **self._up_kwargs)
return self.block(torch.cat([x, c1], dim=1))
| StarcoderdataPython |
104449 | <reponame>kuraakhilesh8230/aries-cloudagent-python
from asynctest import TestCase as AsyncTestCase
from ..indy import V20CredExRecordIndy
class TestV20CredExRecordIndy(AsyncTestCase):
async def test_record(self):
same = [
V20CredExRecordIndy(
cred_ex_indy_id="dummy-0",
cred_ex_id="abc",
cred_request_metadata={"a": 1, "b": 2},
rev_reg_id=None,
cred_rev_id=None,
)
] * 2
diff = [
V20CredExRecordIndy(
cred_ex_indy_id="dummy-1",
cred_ex_id="def",
cred_request_metadata={"a": 1, "b": 2},
rev_reg_id=None,
cred_rev_id=None,
),
V20CredExRecordIndy(
cred_ex_indy_id="dummy-1",
cred_ex_id="ghi",
cred_request_metadata={"a": 1, "b": 2},
rev_reg_id=None,
cred_rev_id=None,
),
V20CredExRecordIndy(
cred_ex_indy_id="dummy-1",
cred_ex_id="def",
cred_request_metadata={"a": 1, "b": 2},
rev_reg_id="rev-reg-id",
cred_rev_id="cred-rev-id",
),
]
for i in range(len(same) - 1):
for j in range(i, len(same)):
assert same[i] == same[j]
for i in range(len(diff) - 1):
for j in range(i, len(diff)):
assert diff[i] == diff[j] if i == j else diff[i] != diff[j]
assert same[0].cred_ex_indy_id == "dummy-0"
| StarcoderdataPython |
3236426 | # pytorchを使ったVAEの実装
# ae_torchとは使用方法が異なる. その内統一したい
# class VAEについて, 余分に思えるメソッドがあるがこれは継承することを考慮して書いている
import numpy as np
import os
import matplotlib.pyplot as plt
import json
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
from ignite.engine import Engine, Events
from ignite.utils import convert_tensor
from ignite.contrib.handlers.tensorboard_logger import *
log2pi = float(np.log(2*np.pi))
#Variational AutoEncoder model
class Encoder_Decoder(nn.Module):
def __init__(self, layers, act_func=torch.tanh, out_func=torch.sigmoid,
use_BN=False, init_method=nn.init.xavier_uniform_,
device='cuda'):
super(Encoder_Decoder, self).__init__()
self.use_BN = use_BN
self.act_func = act_func
self.out_func = out_func
self.device = device
self._makeLayers(layers,init_method)
def _makeLayers(self,hidden, init_method):
encode_layer = []
decode_layer = []
e = nn.Linear(hidden[0], hidden[1])
init_method(e.weight)
encode_layer.append(e)
d = nn.Linear(hidden[1], hidden[0])
init_method(d.weight)
decode_layer.append(d)
self.encode_layer = nn.ModuleList(encode_layer)
self.decode_layer = nn.ModuleList(decode_layer)
"""
encode_layer = []
decode_layer = []
for i in range(len(hidden)-2):
# set encoder layer
e = nn.Linear(hidden[i],hidden[i+1])
init_method(e.weight)
encode_layer.append( e )
if self.use_BN:
e = nn.BatchNorm1d(hidden[i+1])
encode_layer.append( e )
# set decoder layer
j = len(hidden)-i-1
d = nn.Linear(hidden[j],hidden[j-1])
init_method(d.weight)
decode_layer.append( d )
if self.use_BN and j>1:
d = nn.BatchNorm1d(hidden[j-1])
decode_layer.append( d )
# pytorchでは↓のように書けば重み更新されるらしい
self.encode_layer = nn.ModuleList(encode_layer)
self.decode_layer = nn.ModuleList(decode_layer)
# enc と dec で分けるか諸説ある
# self.layer = nn.ModuleList(encode_layer+decode_layer)
# μ,σを出力する層はここで定義
self.enc_mu = nn.Linear(hidden[-2],hidden[-1])
init_method(self.enc_mu.weight)
self.enc_var = nn.Linear(hidden[-2],hidden[-1])
init_method(self.enc_var.weight)
# 出力層はここで定義. dec_muとしているが, bernoulliバージョンではmuを出力するわけではない. dec_out1とかのほうがいいかも
self.dec_mu = nn.Linear(hidden[1],hidden[0])
init_method(self.dec_mu.weight)
if self.is_gauss_dist:
self.dec_var = nn.Linear(hidden[1],hidden[0])
init_method(self.dec_var.weight)
"""
def __call__(self, x):
h = self.encode(x)
d_out = self.decode(h)
return h, d_out
"""
mu,var = self.encode(x)
e = self.sample_z(mu, var)
d_out = self.decode(e)
return e,d_out
"""
def encode(self, x):
e = x
for i in range(len(self.encode_layer)):
# BatchNormレイヤーの直前では活性化しない (BNでないレイヤーは0から数えて偶数番目)
e = self.encode_layer[i](e) if self.use_BN and not (i&1) else self.act_func(self.encode_layer[i](e))
return e
"""
e = x
for i in range(len(self.encode_layer)):
# BatchNormレイヤーの直前では活性化しない (BNでないレイヤーは0から数えて偶数番目)
e = self.encode_layer[i](e) if self.use_BN and not (i&1) else self.act_func(self.encode_layer[i](e))
mu = self.enc_mu(e)
var = self.enc_var(e)
# var = F.softplus(var) # varにsoftplusをかける実装を見かけたので一応
return mu,var
"""
def decode(self, z):
d = z
for i in range(len(self.decode_layer)):
d = self.decode_layer[i](d) if self.use_BN and not (i&1) else self.out_func(self.decode_layer[i](d))
#d = self.decode_layer[i](d)
return d
"""
d = z
for i in range(len(self.decode_layer)):
d = self.decode_layer[i](d) if self.use_BN and not (i&1) else self.act_func(self.decode_layer[i](d))
# gaussバージョンなら2出力, bernoulliバージョンなら1出力
# d_out = ( self.out_func(self.dec_mu(d)) , self.out_func(self.dec_var(d)) ) if self.is_gauss_dist else self.out_func(self.dec_mu(d))
d_out = ( self.dec_mu(d), self.out_func(self.dec_var(d)) ) if self.is_gauss_dist else self.out_func(self.dec_mu(d))
return d_out
"""
"""
# 平均と分散からガウス分布を生成. 実装パクっただけなのでよくわからん. encoderはvarの代わりにln(var)を出力させる
def sample_z(self, mu, ln_var):
epsilon = torch.randn(mu.shape).to(self.device)
return mu + torch.exp(ln_var*0.5) * epsilon
# return mu + torch.sqrt( torch.exp(ln_var) ) * epsilon
"""
"""
def latent_loss(self, enc_mu, enc_var):
return -0.5 * torch.mean(torch.sum( enc_var +1 -enc_mu*enc_mu -torch.exp(enc_var), dim=1))
"""
def reconst_loss(self, x, dec_out):
reconst = F.mse_loss(x, dec_out)
return reconst
"""
if self.is_gauss_dist:
dec_mu, dec_var = dec_out
m_vae = 0.5* (x - dec_mu)**2 * torch.exp(-dec_var)
a_vae = 0.5* (log2pi+dec_var)
reconst = torch.mean( torch.sum(m_vae + a_vae, dim=1) )
else:
reconst = -torch.mean(torch.sum(x * torch.log(dec_out) + (1 - x) * torch.log(1 - dec_out), dim=1))
return reconst
"""
class AE():
def __init__(self, input_shape, hidden, act_func=torch.tanh, out_func=torch.sigmoid, use_BN=False, init_method='xavier', folder='./model', is_gauss_dist=False, device='cuda'):
activations = {
"sigmoid" : torch.sigmoid, \
"tanh" : torch.tanh, \
"softplus" : F.softplus, \
"relu" : torch.relu, \
"leaky" : F.leaky_relu, \
"elu" : F.elu, \
"identity" : lambda x:x \
}
# gpu setting
self.device = device
# 活性化関数 文字列で指定されたとき関数に変換
if isinstance(act_func, str):
if act_func in activations.keys():
act_func = activations[act_func]
else:
print('arg act_func is ', act_func, '. This value is not exist. This model uses identity function as activation function.')
act_func = lambda x:x
if isinstance(out_func, str):
if out_func in activations.keys():
out_func = activations[out_func]
else:
print('arg out_func is ', out_func, '. This value is not exist. This model uses identity function as activation function of output.')
out_func = lambda x:x
if out_func != torch.sigmoid:
print('※ out_func should be sigmoid.')
# 重みの初期化手法 文字列で指定されたとき関数に変換
if isinstance(init_method, str):
inits = {
"xavier" : nn.init.xavier_uniform_, \
"henormal" : nn.init.kaiming_uniform_ \
}
if init_method in inits.keys():
init_method = inits[init_method]
else:
init_method = nn.init.xavier_uniform_
print('init_method is xavier initializer')
if not isinstance(hidden, list):
hidden = [hidden]
hidden = [input_shape] + hidden
print('layer' + str(hidden))
# model保存用のパス
self.save_dir = os.path.join(folder, '{}'.format(hidden))
os.makedirs(self.save_dir, exist_ok=True)
os.makedirs(self.save_dir+'/weight_plot', exist_ok=True)
self.set_model(hidden, act_func, out_func, use_BN, init_method, device=self.device)
self.set_optimizer()
# encoderの重みの数. weight_plotで使用.
self.weight_num = len(hidden) + 1
# 多分使う機会はほとんどない
def __call__(self, x):
return self.model(x)
# train
def train(self,train,epoch,batch,C=1.0, k=1, valid=None, is_plot_weight=False):
if valid is None:
print('epoch\tloss\t\treconst\t\tMSE')
else:
print('epoch\tloss\t\treconst\t\tMSE\t\tvalid_MSE')
# conversion data
train_data = torch.Tensor(train)
dataset = torch.utils.data.TensorDataset(train_data, train_data)
train_loader = DataLoader(dataset, batch_size=batch, shuffle=True)
# trainer
trainer = self.trainer(C=C,k=k, device=self.device)
# log variables init.
log = []
loss_iter = []
lat_loss_iter = []
rec_loss_iter = []
# executed function per iter
@trainer.on(Events.ITERATION_COMPLETED)
def add_loss(engine):
loss_iter.append(engine.state.output[0])
rec_loss_iter.append(engine.state.output[1])
"""
loss_iter.append(engine.state.output[0])
lat_loss_iter.append(engine.state.output[1])
rec_loss_iter.append(engine.state.output[2])
"""
# executed function per epoch
@trainer.on(Events.EPOCH_COMPLETED)
def log_report(engine):
epoch = engine.state.epoch
loss = np.mean(loss_iter)
rec_loss = np.mean(rec_loss_iter)
log.append({'epoch':epoch,'loss':loss, 'reconst':rec_loss})
if epoch % 10 == 0 or epoch==1:
perm = np.random.permutation(len(train))[:batch]
mse = self.MSE(train[perm]).mean()
if valid is None:
print(f'{epoch}\t{loss:.6f}\t{rec_loss:.6f}\t{mse:.6f}')
else:
val_mse = self.MSE(valid).mean()
print(f'{epoch}\t{loss:.6f}\t{rec_loss:.6f}\t{mse:.6f}\t{val_mse:.6f}')
if is_plot_weight: # output layer weight.
self.plot_weight(epoch)
loss_iter.clear()
rec_loss_iter.clear()
"""
epoch = engine.state.epoch
loss = np.mean(loss_iter)
lat_loss = np.mean(lat_loss_iter)
rec_loss = np.mean(rec_loss_iter)
log.append({'epoch':epoch,'loss':loss, 'latent':lat_loss, 'reconst':rec_loss})
if epoch % 10 == 0 or epoch==1:
perm = np.random.permutation(len(train))[:batch]
mse = self.MSE(train[perm]).mean()
if valid is None:
print(f'{epoch}\t{loss:.6f}\t{lat_loss:.6f}\t{rec_loss:.6f}\t{mse:.6f}')
else:
val_mse = self.MSE(valid).mean()
print(f'{epoch}\t{loss:.6f}\t{lat_loss:.6f}\t{rec_loss:.6f}\t{mse:.6f}\t{val_mse:.6f}')
if is_plot_weight: # output layer weight.
self.plot_weight(epoch)
loss_iter.clear()
rec_loss_iter.clear()
lat_loss_iter.clear()
"""
# start training
trainer.run(train_loader, max_epochs=epoch)
# save model weight
self.save_model()
# log output
file_path = os.path.join(self.save_dir, 'log')
file_ = open(file_path, 'w')
json.dump(log, file_, indent=4)
def trainer(self, C=1.0, k=1, device=None):
self.model_to(device)
def prepare_batch(batch, device=None):
x, y = batch
return (convert_tensor(x, device=device),
convert_tensor(y, device=device))
def _update(engine, batch):
self.zero_grad()
x,y = prepare_batch(batch, device=device)
h = self.encode(x)
reconst_loss = 0
for l in range(k):
d_out = self.decode(h)
reconst_loss += self.reconst_loss(y, d_out) / float(k)
loss = reconst_loss
loss.backward()
self.grad_clip()
self.step()
return loss.item(), reconst_loss.item()
"""
self.zero_grad()
x,y = prepare_batch(batch, device=device)
e_mu, e_lnvar = self.encode(x)
latent_loss = self.latent_loss(e_mu, e_lnvar)
reconst_loss = 0
for l in range(k):
z = self.sample_z(e_mu, e_lnvar)
d_out = self.decode(z)
reconst_loss += self.reconst_loss(y, d_out) / float(k)
loss = latent_loss + reconst_loss
loss.backward()
self.grad_clip()
self.step()
return loss.item(), latent_loss.item(), reconst_loss.item()
"""
return Engine(_update)
# 継承時の記述省略のためのオーバーライド用
def encode(self, x):
return self.model.encode(x)
def decode(self, z):
return self.model.decode(z)
"""
def latent_loss(self, mu, var):
return self.model.latent_loss(mu, var)
"""
def reconst_loss(self, x, d_out):
return self.model.reconst_loss(x, d_out)
def set_model(self, hidden, act_func, out_func, use_BN, init_method, device):
self.model = Encoder_Decoder(hidden, act_func, out_func, use_BN, init_method, device=device)
def set_optimizer(self, learning_rate=0.001, beta1=0.9, beta2=0.999, weight_decay=0, gradient_clipping=None):
betas=(beta1, beta2)
self.opt = optim.Adam(self.model.parameters(), lr=learning_rate, betas=betas, eps=1e-08, weight_decay=weight_decay, amsgrad=False)
self.gradient_clipping = gradient_clipping
# モデルをcpuにしたりcudaにしたりするやつ
def model_to(self, device):
self.model.to(device)
"""
# trainメソッドで model.sample_zって書きたくなかった
def sample_z(self, mu, var):
return self.model.sample_z(mu, var)
"""
def zero_grad(self):
self.opt.zero_grad()
def step(self):
self.opt.step()
def grad_clip(self):
if self.gradient_clipping is not None:
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.gradient_clipping)
# 各レイヤーの重みをプロット. 重み更新が機能してるか確認.
def plot_weight(self, epoch):
dic = self.model.state_dict()
fig = plt.figure(figsize=(16,8))
plot_num = 0
for k in dic.keys():
if 'weight' in k:
plot_num += 1
plot_data = self.tensor_to_np(dic[k]).reshape(-1)
plt.subplot(2,self.weight_num,plot_num)
plt.plot(plot_data, label=k)
plt.legend()
plt.tight_layout()
plt.savefig( self.save_dir + '/weight_plot/epoch_{}.png'.format(epoch) )
plt.close()
# modelの保存. trainメソッドの最後に呼び出される.
def save_model(self, path=None):
path = self.save_dir if path is None else path
torch.save(self.model.state_dict(), path+'/model.pth')
# modelのロード. 学習済みモデルを使用したい場合に呼び出す.
def load_model(self, path=None):
path = self.save_dir if path is None else path
param = torch.load( path + '/model.pth')
self.model.load_state_dict(param)
self.model.to(self.device)
###### 評価関係 #########
# evalモードに切り替え
def model_to_eval(self):
self.model.eval()
# 入力データの 潜在特徴, 再構成データ, 誤差 を返す.
def reconst(self, data, unregular=False):
if data.ndim == 1:
data = data.reshape(1,-1)
if not isinstance(data, torch.Tensor):
data = self.np_to_tensor(data)
feat = self.encode(data)
d_out = self.decode(feat)
"""
e_mu,e_var = self.encode(data)
feat = self.sample_z(e_mu, e_var)
d_out = self.decode(feat)
"""
"""
rec = d_out[0] if self.is_gauss_dist else d_out
"""
rec = d_out
mse = torch.mean( (rec-data)**2, dim=1 )
# lat_loss = self.latent_loss( e_mu, e_var)
# rec_loss = self.reconst_loss( data, d_out )
# vae_err = (lat_loss+rec_loss)
feat = self.tensor_to_np(feat)
rec = self.tensor_to_np(rec)
mse = self.tensor_to_np(mse)
return feat, rec, mse
# 再構成誤差を二乗平均で計算. 主に学習時の確認用.
def MSE(self, data):
if data.ndim == 1:
data = data.reshape(1,-1)
if not isinstance(data, torch.Tensor):
data = self.np_to_tensor(data)
e, d_out = self(data)
rec = d_out
mse = torch.mean( (rec-data)**2, dim=1 )
return self.tensor_to_np(mse)
"""
# 潜在特徴を入力したときのデコーダの出力を返す
def featuremap_to_image(self, feat):
if feat.ndim == 1:
feat = feat.reshape(1,-1)
if not isinstance(feat, torch.Tensor):
feat = self.np_to_tensor(feat)
d_out = d_out[0] if self.is_gauss_dist else d_out
return self.tensor_to_np(d_out)
"""
# ndarray -> torch.Tensor
def np_to_tensor(self, data):
return torch.Tensor(data).to(self.device)
def tensor_to_np(self, data):
return data.detach().to('cpu').numpy()
| StarcoderdataPython |
3322510 | <gh_stars>0
"""Celery tasks to be spawned."""
import time
from flask import current_app
from brewpi.extensions import celery
from .. import models
from ..drivers import save_temp_to_file
@celery.task
def add(x, y, kettle_id):
"""Example task."""
# db_sess = db.session
current_app.logger.info(f"add {x} and {y}")
kettle = models.Kettle.get_by_id(kettle_id)
current_app.logger.info(f"kettle name is {kettle.name}")
while True:
time.sleep(5)
current_app.logger.info(f"kettle loop state:{kettle.is_running}")
kettle = models.Kettle.get_by_id(kettle_id)
current_app.logger.info(f"kettle loop state:{kettle.is_running}")
# return x + y
@celery.task
def hysteresis_loop(kettle_id):
"""Hysterises loop to turn hold the kettle as a set temperature."""
kettle = models.Kettle.get_by_id(kettle_id)
heater = models.Heater.get_by_id(kettle.heater_id)
while True:
temp_c = kettle.current_temp # Current temperature
current_app.logger.info(f"kettle current temp:{temp_c}")
current_app.logger.info(f"kettle target temp:{kettle.target_temp}")
current_app.logger.info(f"kettle hyst window:{kettle.hyst_window}")
current_app.logger.info(f"kettle heater state:{kettle.heater.current_state}")
heater_state = kettle.heater.current_state
if heater_state:
if temp_c > (kettle.target_temp + kettle.hyst_window):
heater.turn_off()
heater_state = False
current_app.logger.info("Turning OFF")
else:
if temp_c < (kettle.target_temp - kettle.hyst_window):
heater.turn_on()
heater_state = True
current_app.logger.info("Turning ON")
time.sleep(5)
# def pid_loop(self):
# """PID Loop. Values are from craftbeerpi which are roughly the same as ours. hopefully ok?."""
# p = 44
# i = 165
# d = 4
# sample_time = 5
# pid = simple_pid.PID(
# p, i, d, setpoint=self.target_temp
# ) # dont think this can be changed once started.
# pid.output_limits = (0, 100)
# pid.sample_time = sample_time
# while kettle.is_loop_running:
# heat_percent = pid(self.current_temp)
# heating_time = pid.sample_time * (heat_percent / 100)
# wait_time = pid.sample_time - heating_time
# self.heater_enable(True)
# time.sleep(heating_time)
# self.heater_enable(False)
# time.sleep(wait_time)
# self.heater_enable(False)
@celery.task
def pwm_loop(kettle_id):
"""PWM loop to keep the kettle at at boiling temperature with reduced power."""
kettle = models.Kettle.get_by_id(kettle_id)
heater = models.Heater.get_by_id(kettle.heater_id)
while True:
heater.turn_off()
current_app.logger.info("Turning OFF")
time.sleep(1)
heater.turn_on()
current_app.logger.info("Turning ON")
time.sleep(8)
@celery.task
def update_temperature():
save_temp_to_file()
| StarcoderdataPython |
162502 | <reponame>to314as/gym-pool<filename>gym_pool/envs/utils.py
import functools
import operator
def prod(l):
return functools.reduce(operator.mul, l, 1) | StarcoderdataPython |
1604516 | <filename>twitter_elections/sentiment_analysis/labeliseurs/labeliser_v2.py
# coding: utf-8
import pymongo as pym
import re
# raw_input est valable uniquement pour Python 2. En Python 3, la fonction équivalente est input()
def retrait_doublons(collection):
print('Retrait d\'eventuels doublons...')
textCleanPipeline = [{"$group":{"_id":"$text", "dups":{"$push":"$_id"},"count":{"$sum":1}}},{"$match":{"count":{"$gt":1}}}]
duplicates = []
count = 0
try:
for doc in collection.aggregate(textCleanPipeline) :
it = iter(doc['dups'])
next(it)
for id in it:
count += 1
duplicates.append(pym.DeleteOne({'_id':id}))
if duplicates:
collection.bulk_write(duplicates)
except:
pass
print('{} doublons retirés.'.format(count))
client.close()
print(20*'-')
print('Instructions :')
print('- Ne pas labéliser les tweets qui ne sont pas en francais.')
print("- Si un tweet concerne plusieurs candidats à la fois, avec des sentiments mitigés (par exemple positif envers un candidat et negatif envers un autre), il est préférable de ne pas le labéliser (touche r).")
print(20*'-')
client = pym.MongoClient('localhost',27017)
collection = client.tweet.tweet
print('Recuperation de tweets pris au hasard dans la base...')
#corpus = collection.aggregate([{'$match':{'t_text':{'$not':re.compile('rt @')}}},{'$sample':{'size':200}},{'$project':{'t_text':1, 't_id':1}}])
corpus = collection.aggregate([{'$sample':{'size':400}},{'$project':{'t_text':1, 't_id':1}}])
client.close()
sentimentmap = {'a':1,'z':0,'e':-1}
phrase = 'Sentiment ? Pos: a , Nég: e, Neutre: z, Ne rien faire: r, Quitter: X\n'
compte = 0
collection = client.tweet.train
for tweet in corpus:
if 'rt @' not in tweet['t_text']:
print(20*'-')
print(tweet['t_text'])
print(20*'-')
try:
sentiment = raw_input(phrase)
except NameError:
sentiment = input(phrase)
while(sentiment not in ['a', 'z', 'e', 'r', 'X']) :
print("Touche invalide. Essaie encore.\n")
print(20*'-')
print(tweet['t_text'])
print(20*'-')
try:
sentiment = raw_input(phrase)
except NameError:
sentiment = input(phrase)
if sentiment == 'r':
continue
elif sentiment == 'X':
break
else:
collection.insert_one({'t_id':tweet['t_id'], 'text':tweet['t_text'], 'sentiment':sentimentmap[sentiment]})
compte += 1
else:
continue
n_tweets = collection.count()
client.close()
print('\nInsertion de {0} tweets dans la base "train", qui compte desormais {1} tweets.'.format(compte, n_tweets))
retrait_doublons(collection)
print('\nMerci de ta collaboration ! Relance le script pour continuer.') | StarcoderdataPython |
106609 | <gh_stars>0
import requests, sys, time, os, argparse
import pandas as pd
import re
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk import pos_tag, word_tokenize
from textblob import TextBlob
from collections import Counter
country_codes ={'US':'USA', 'IN':'India', 'BR':'Brazil', 'GB':'UK', 'TH':'Thailand', 'RU':'Russia', 'KR':'South Korea','ES':'Spain','JP':'Japan','CA':'Canada','AU':'Australia'}
#country_codes={'US':'USA', 'IN':'India','BR':'Brazil', 'GB':'UK'}
snippet_features = ["title",
"publishedAt",
"channelId",
"channelTitle",
"categoryId"]
unsafe_characters = ['\n', '"']
header = ["video_id"] + snippet_features + ["trending_date", "tags", "view_count", "likes", "dislikes",
"comment_count", "thumbnail_link", "comments_disabled",
"ratings_disabled", "description", "duration"] + ["country_code"]
def setup():
yt_api_key = 'your dev key'
yt_country_codes = country_codes
return yt_api_key, yt_country_codes
def prepare_feature(feature):
for ch in unsafe_characters:
feature = str(feature).replace(ch, "")
return f'"{feature}"'
def api_request(page_token, country_code):
request_url = f"https://www.googleapis.com/youtube/v3/videos?part=id,statistics,contentDetails,snippet{page_token}chart=mostPopular®ionCode={country_code}&maxResults=10&key={api_key}"
request = requests.get(request_url)
if request.status_code == 429:
print("Temp-Banned due to excess requests, please wait and continue later")
sys.exit()
return request.json()
def get_tags(tags_list):
return prepare_feature("|".join(tags_list))
def get_videos(items,df, country_code):
line = []
for video in items:
comments_disabled = False
ratings_disabled = False
if "statistics" not in video:
continue
video_id = prepare_feature(video['id'])
snippet = video['snippet']
statistics = video['statistics']
contentDetails = video['contentDetails']
features = [prepare_feature(snippet.get(feature, "")) for feature in snippet_features]
description = snippet.get("description", "")
thumbnail_link = snippet.get("thumbnails", dict()).get("default", dict()).get("url", "")
trending_date = time.strftime("%y.%d.%m")
tags = get_tags(snippet.get("tags", ["[none]"]))
view_count = statistics.get("viewCount", 0)
duration = contentDetails.get("duration", "")
if 'likeCount' in statistics and 'dislikeCount' in statistics:
likes = statistics['likeCount']
dislikes = statistics['dislikeCount']
else:
ratings_disabled = True
likes = 0
dislikes = 0
if 'commentCount' in statistics:
comment_count = statistics['commentCount']
else:
comments_disabled = True
comment_count = 0
line = [video_id] + features + [prepare_feature(x) for x in [trending_date, tags, view_count, likes, dislikes,
comment_count, thumbnail_link, comments_disabled,
ratings_disabled, description, duration]]
line.append(country_code)
df = df.append({'video_id': line[0], 'title': line[1], 'publishedAt': line[2], 'channelId': line[3], 'channelTitle': line[4], 'categoryId': line[5],
'trending_date': line[6], 'tags': line[7], 'view_count': line[8], 'likes': line[9], 'dislikes': line[10],
"comment_count": line[11], "thumbnail_link": line[12], "comments_disabled": line[13],
"ratings_disabled": line[14], "description": line[15], "duration": line[16], "country_code": line[17]
}, ignore_index=True)
return df
def get_pages(country_code, df, next_page_token="&"):
country_data = []
i = 1
while next_page_token is not None:
video_data_page = api_request(next_page_token, country_code)
next_page_token = video_data_page.get("nextPageToken", None)
next_page_token = f"&pageToken={next_page_token}&" if next_page_token is not None else next_page_token
items = video_data_page.get('items', [])
df = get_videos(items,df,country_code)
df.reset_index()
i+=1
return df
def get_data():
df = pd.DataFrame(columns=header)
for country_code in country_codes.keys():
df = df.append(get_pages(country_code,df), ignore_index=True)
df.drop_duplicates(inplace=True)
return df
#global analysis function
def global_analysis(column, data):
glo_dict = []
df = data.groupby(['country_code'])[[column]].sum()
x =df.sort_values(column, ascending= False).head()
for index, rows in x.iterrows():
glo_dict.append({"x":country_codes[index],"y":int(rows[column])})
return glo_dict
#country_wise analysis function
def country_analysis(country_code, column, category_dict, data):
con_dict =[]
df = data[data['country_code']==country_code]
df = df.groupby(['categoryId'])[[column]].sum()
x = df.sort_values(column, ascending=False).head()
for index, rows in x.iterrows():
key = '%s'%index
con_dict.append({'x':category_dict[key],'y':int(rows[column])})
return con_dict
#country_wise hashtags
def country_hashtags(df):
words =[]
for i in df['hashtags']:
for j in i:
j=j.replace('"','')
words.append(j)
return words
def youtube_analysis():
data = get_data()
wnl = WordNetLemmatizer()
stop_words = stopwords.words('english')
lemmatized_tokens =[]
for index, row in data.iterrows():
#read_only_ascii
text = str(row["description"]).encode("ascii", "ignore").decode("utf-8")
#lowerCase_the_text
text = text.lower()
text = re.sub(r'[&)(://?.-]','',text)
text = re.sub(r'http?[\w]+','',text)
#remove mentions
text = re.sub(r'@[\w]+','',text)
#remove punctuations
tokens = word_tokenize(str(text))
tokens = [word for word in tokens if word not in stop_words]
lems=[]
#remove one-letter word
tokens = [word for word in tokens if len(word)>1]
for word, tag in pos_tag(tokens):
wntag = tag[0].lower()
wntag = wntag if wntag in ['a','r','n','v'] else None
if not wntag:
lems.append(word)
else:
lems.append(wnl.lemmatize(word, wntag))
lemmatized_tokens.append(lems)
data["tokens"] = lemmatized_tokens
lis = []
for i in data['tags']:
lis.append(i.split('|'))
data['hashtags']= lis
#category-wise
category_dict = {
'"2"': "Autos & Vehicles",
'"1"': "Film & Animation",
'"10"' : "Music",
'"15"' : "Pets & Animals",
'"17"' : "Sports",
'"18"' : "Short Movies",
'"19"' : "Travel & Events",
'"20"': "Gaming",
'"21"' : "Videoblogging",
'"22"': "People & Blogs",
'"23"' : "Comedy",
'"24"' : "Entertainment",
'"25"' : "News & Politics",
'"26"' : "Howto & Style",
'"27"': "Education",
'"28"' : "Science & Technology",
'"29"': "Nonprofits & Activism",
'"30"' : "Movies",
'"31"': "Anime/Animation",
'"32"' : "Action/Adventure",
'"33"': "Classics",
'"34"' : "Comedy",
'"35"': "Documentary",
'"36"' : "Drama",
'"37"': "Family",
'"38"' : "Foreign",
'"39"': "Horror",
'"40"' : "SciFi/Fantasy",
'"41"': "Thriller",
'"42"' : "Shorts",
'"43"': "Shows",
'"44"': "Trailers"
}
general ={"global":{}}
like_lis=[]
dislike_lis=[]
view_lis=[]
comment_lis =[]
for i, r in data.iterrows():
like_lis.append(int(r['likes'].replace('"', "")))
dislike_lis.append(int(r['dislikes'].replace('"', "")))
view_lis.append(int(r['view_count'].replace('"', "")))
comment_lis.append(int(r['comment_count'].replace('"', "")))
data['likes_count'] = like_lis
data['dislikes_count'] = dislike_lis
data['views']=view_lis
data['comments']=comment_lis
columns_list = ['likes_count','views','comments','dislikes_count']
#global
for i in columns_list:
glo_dict = global_analysis(i, data)
general["global"][i] = glo_dict
#country_wise
for country in country_codes.keys():
country_output ={}
for column in columns_list:
con_dict = country_analysis(country, column, category_dict, data)
country_output[column]=con_dict
general.update({country_codes[country]:country_output})
#global hashtags
words =[]
for i in data['hashtags']:
for j in i:
j=j.replace('"','')
words.append(j)
word_freq = Counter(words)
words_json = [{'text': word, 'value':int(count)} for word, count in word_freq.items() if word!='[none]']
f = sorted(words_json, key =lambda x:x['value'], reverse=True)
f = f[:50]
general['global'].update({'keywords':f})
#country_wise hashtags
for i in country_codes.keys():
df = data[data['country_code']==i]
words = country_hashtags(df)
word_freq = Counter(words)
words_json = [{'text': word, 'value':int(count)} for word, count in word_freq.items() if word!='[none]']
f = sorted(words_json, key =lambda x:x['value'], reverse=True)
f = f[:50]
general[country_codes[i]].update({'keywords':f})
#time duration analysis -
#changing likes_count and dislikes_count into likes, dislikes
general['global']['likes'] = general['global']['likes_count']
general['global']['dislikes'] = general['global']['dislikes_count']
del general['global']['likes_count']
del general['global']['dislikes_count']
for i in country_codes.keys():
general[country_codes[i]]['likes'] = general[country_codes[i]]['likes_count']
general[country_codes[i]]['dislikes'] = general[country_codes[i]]['dislikes_count']
del general[country_codes[i]]['likes_count']
del general[country_codes[i]]['dislikes_count']
print(general)
return general
api_key, country_codes = setup()
| StarcoderdataPython |
3387948 | import os
from algorithms.RNN import RNN
from algorithms.LSTM import LSTM
from algorithms.GRU import GRU
from algorithms.Transformer import Transformer
from algorithms.RTransformer import RT
from HDF5Dataset import HDF5Dataset
import plotting
import torch
from torch.utils import data
from sklearn.preprocessing import MaxAbsScaler
from sklearn.model_selection import train_test_split
import random
import numpy as np
import pandas as pd
import importlib
from experiment_config import experiment_path, chosen_experiment
spec = importlib.util.spec_from_file_location(chosen_experiment, experiment_path)
config = importlib.util.module_from_spec(spec)
spec.loader.exec_module(config)
learning_config = config.learning_config
def model_exists(full_path):
if learning_config["do grid search"] and learning_config["mode"] == "train":
return False
else:
return os.path.exists(os.path.join(full_path, learning_config["dataset"] + "_" + learning_config["type"] + "_" + 'model.pth'))
def load_model(learning_config):
path = os.path.join(config.models_folder, learning_config['classifier'])
if learning_config['classifier'] == 'RNN':
load_saved = model_exists(path)
model = RNN(learning_config['RNN model settings'][0], learning_config['RNN model settings'][1],
learning_config['RNN model settings'][2], learning_config['RNN model settings'][3])
elif learning_config['classifier'] == 'LSTM':
load_saved = model_exists(path)
model = LSTM(learning_config['LSTM model settings'][0], learning_config['LSTM model settings'][1],
learning_config['LSTM model settings'][2], learning_config['LSTM model settings'][3])
elif learning_config['classifier'] == 'GRU':
load_saved = model_exists(path)
model = GRU(learning_config['GRU model settings'][0], learning_config['GRU model settings'][1],
learning_config['GRU model settings'][2], learning_config['GRU model settings'][3])
elif learning_config['classifier'] == 'Transformer':
load_saved = model_exists(path)
model = Transformer(learning_config['Transformer model settings'][0],
learning_config['Transformer model settings'][1],
learning_config['Transformer model settings'][2],
learning_config['Transformer model settings'][3],
learning_config['Transformer model settings'][4],
learning_config['Transformer model settings'][5])
elif learning_config['classifier'] == 'RTransformer':
load_saved = model_exists(path)
model = RT(learning_config['R-Transformer model settings'][0],
learning_config['R-Transformer model settings'][1],
learning_config['R-Transformer model settings'][2],
learning_config['R-Transformer model settings'][3],
learning_config['R-Transformer model settings'][4],
learning_config['R-Transformer model settings'][5],
learning_config['R-Transformer model settings'][6],
learning_config['R-Transformer model settings'][7],
learning_config['R-Transformer model settings'][8],
learning_config['R-Transformer model settings'][9])
else:
print('Invalid model type entered!')
return None
device = model.choose_device()
if load_saved:
print('Loading model ..')
try:
checkpoint = torch.load(os.path.join(path, learning_config["dataset"] + "_" + learning_config["type"] + "_" + 'model.pth'))
model.load_state_dict(checkpoint['model_state_dict'])
model.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epoch = checkpoint['epoch']
loss = checkpoint['loss']
model.to(device)
print('Model successfully loaded!')
return model, epoch, loss
except RuntimeError:
print('Improper model loaded (different architecture), fresh model used')
pass
else:
print('No saved Model found. Fresh model used')
model.to(device)
return model, None, None
def export_model(model, learning_config, grid_search_run):
dummy_input = torch.randn(1, 672, 1)
out = model(dummy_input)
input_names = ["input"] # + ["learned_%d" % i for i in range(3)]
output_names = ["output"]
name = learning_config['dataset'] + '.onnx'
model.eval()
torch.onnx.export(torch.jit.trace_module(model, {'forward': dummy_input}), dummy_input, name, example_outputs=out,
export_params=True, verbose=True,
input_names=input_names, output_names=output_names)
def save_model(model, epoch, loss, grid_search_run):
path = os.path.join(config.models_folder, learning_config['classifier'])
if not os.path.exists(path):
os.makedirs(path)
if learning_config["do grid search"]:
name = os.path.join(path, learning_config["dataset"] + "_" + learning_config["type"] + "_gridsearch_on_" + learning_config["grid search"][0] +
"_value_" + str(learning_config["grid search"][1][grid_search_run]) + "_" + 'model.pth')
else:
name = os.path.join(path, learning_config["dataset"] + "_" + learning_config["type"] + "_" + 'model.pth')
try:
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': model.optimizer.state_dict(),
'loss': loss,
}, name)
except TypeError:
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict,
'optimizer_state_dict': model.optimizer.state_dict(),
'loss': loss,
}, name)
# torch.save(model.state_dict(), path + ".model")
# torch.save(model.optimizer.state_dict(), path + ".optimizer")
def save_result(score, grid_search_run):
path = os.path.join(config.models_folder, learning_config['classifier'])
if not os.path.exists(path):
os.makedirs(path)
if learning_config["do grid search"]:
name = os.path.join(path, learning_config["dataset"] + "_" + learning_config["type"] + "_gridsearch_on_" + learning_config["grid search"][0]
+ "_" + 'result.txt')
else:
name = os.path.join(path, learning_config["dataset"] + "_" + learning_config["type"] + "_" + 'result.txt')
f = open(name, "a")
if grid_search_run == 0:
f.write("Configuration:\n")
for key, value in learning_config.items():
f.write("\n" + key + ' : ' + str(value))
if learning_config["do grid search"]:
f.write("\nGrid search on: " + learning_config["grid search"][0] +
"; value:" + str(learning_config["grid search"][1][grid_search_run]))
f.write("\n########## Metrics ##########")
f.write(
"Accuracy: {0}\nPrecision: {1}\nRecall: {2}\nFScore: {3}\n".format(score[0],
score[1][
0],
score[1][
1],
score[1][
2], ))
f.close()
def plot_samples(X, y, X_pre=None):
if type(X) == torch.Tensor:
X = np.array(X)
y = np.array(y)
X_zeromean = np.array([x - x.mean() for x in X]) # deduct it's own mean from every sample
if X_pre is None:
X_train, X_test, y_train, y_test = train_test_split(X_zeromean, y, random_state=0, test_size=100)
maxabs_scaler = MaxAbsScaler().fit(X_train) # fit scaler as to scale training data between -1 and 1
# samples = [y.index(0), y.index(1)]
samples = [random.sample([i for i, x in enumerate(y) if x == 0], 1)[0],
random.sample([i for i, x in enumerate(y) if x == 1], 1)[0]]
print("Samples shown: #{0} of class 0; #{1} of class 1".format(samples[0], samples[1]))
plotting.plot_sample(X[samples], label=[y[i] for i in samples], title='Raw samples')
plotting.plot_sample(X_zeromean[samples], label=[y[i] for i in samples], title='Zeromean samples')
if X_pre is None:
X_maxabs = maxabs_scaler.transform(X_zeromean[samples])
plotting.plot_sample(X_maxabs, label=[y[i] for i in samples], title='Samples scaled to -1 to 1')
else:
plotting.plot_sample(np.array(X_pre[samples]), label=[y[i] for i in samples], title='Samples scaled to -1 to 1')
def load_data(type):
path = os.path.join(config.datasets_folder, learning_config['dataset'], type)
file = learning_config['dataset'] + '_' + learning_config['type'] + '_' + type + '.hdf5'
# dataset = HDF5Dataset(path, recursive=True, load_data=False,
# data_cache_size=4, transform=None)
dataset = HDF5Dataset(os.path.join(path, file), type)
# pd.read_hdf(config.results_folder + learning_config['dataset'] + '_' + 'train' + '.h5', key = 'train/data', mode='a')
# pd.read_hdf(config.results_folder + learning_config['dataset'] + '_' + 'train' + '.h5', key = 'train/label', mode='a')
if type == 'test':
# data_info = dataset.get_data_infos('data')
# loader_params = {'batch_size': data_info[0]['shape'][0], 'shuffle': True, 'num_workers': 1} #load all test data at once?
# loader_params = {'batch_size': dataset.__len__(), 'shuffle': False, 'num_workers': 1} #load all test data at once?
if dataset.__len__() < 1000:
loader_params = {'batch_size': dataset.__len__(), 'shuffle': False,
'num_workers': 1} # load all test data at once
else:
loader_params = {'batch_size': 100, 'shuffle': False, 'num_workers': 1} # load 100 test samples at once
else:
loader_params = {'batch_size': learning_config['mini batch size'], 'shuffle': True, 'num_workers': 1}
data_loader = data.DataLoader(dataset, **loader_params)
return data_loader
def load_dataset(dataset=None):
'''
deprecated
'''
if not dataset:
if learning_config['dataset'][:7] == 'PV_noPV':
dataset = PVnoPVdataset(config.raw_data_folder + learning_config["dataset"] + '.csv')
elif learning_config['dataset'][:31] == 'malfunctions_in_LV_grid_dataset':
dataset = MlfctinLVdataset(config.raw_data_folder + learning_config["dataset"] + '.csv')
else:
dataset = Dummydataset(config.raw_data_folder + learning_config["dataset"] + '.csv')
else:
if learning_config['dataset'][:7] == 'PV_noPV':
dataset = PVnoPVdataset(config.test_data_folder + 'PV_noPV.csv')
else:
dataset = MlfctinLVdataset(config.test_data_folder + 'malfunctions_in_LV_grid_dataset.csv')
X = dataset.get_x()
y = dataset.get_y()
return dataset, X, y
def preprocess(X_train, X_test):
scaler = fit_scaler(X_train)
X_train = preprocessing(X_train, scaler)
X_test = preprocessing(X_test, scaler)
return X_train, X_test
def fit_scaler(X):
X_zeromean = np.array(X - X.mean()) # deduct it's own mean from every sample
maxabs_scaler = MaxAbsScaler().fit(X_zeromean.reshape(X_zeromean.shape[1], X_zeromean.shape[
0])) # fit scaler as to scale training data between -1 and 1
return maxabs_scaler
def preprocessing(X, scaler):
X_zeromean = np.array(X - X.mean())
X = scaler.transform(X_zeromean.reshape(X_zeromean.shape[1], X_zeromean.shape[0]))
return pd.DataFrame(data=X)
def choose_best(models_and_losses):
index_best = [i[1] for i in models_and_losses].index(min([i[1] for i in models_and_losses]))
epoch = index_best + 1
return models_and_losses[index_best], epoch
def get_weights_copy(model):
weights_path = 'weights_temp.pt'
torch.save(model.state_dict, weights_path)
return torch.load(weights_path)
'''def load_test_data(config):
pd.read_hdf(config.results_folder + learning_config['dataset'] + '_' + 'test' + '.h5', key = 'test/data', mode='a')
pd.read_hdf(config.results_folder + learning_config['dataset'] + '_' + 'test' + '.h5', key = 'test/label', mode='a')
test_loader, is_clean = load_gtsrb(config, test=True)
x_test, y_test = next(iter(test_loader))
x_test, y_test = x_test.numpy(), y_test.numpy()
return x_test, y_test, is_clean'''
| StarcoderdataPython |
3394772 | <filename>genrl/classical/bandit/contextual_policies/__init__.py
from genrl.classical.bandit.contextual_policies.bayesian import ( # noqa
BayesianUCBCBPolicy,
)
from genrl.classical.bandit.contextual_policies.epsgreedy import ( # noqa
EpsGreedyCBPolicy,
)
from genrl.classical.bandit.contextual_policies.gradient import GradientCBPolicy # noqa
from genrl.classical.bandit.contextual_policies.thompson import ( # noqa
ThompsonSamplingCBPolicy,
)
from genrl.classical.bandit.contextual_policies.ucb import UCBCBPolicy # noqa
from genrl.classical.bandit.contextual_policies.base import CBPolicy # noqa; noqa; noqa
| StarcoderdataPython |
3224436 | <reponame>cmbasnett/fake-bpy-module
ClothSolverResult.status = None
| StarcoderdataPython |
67071 |
if __name__ == "__main__":
import argparse
from ocr.ocr_document import OCRProcessor
parser = argparse.ArgumentParser(
description="Python script to detect and extract documents."
)
parser.add_argument(
"-i",
"--input-image",
help="Image containing the document",
required=True,
dest="input_image",
)
args = parser.parse_args()
image_path = args.input_image
ocr_processor = OCRProcessor()
ocr = ocr_processor(image_path)
print(ocr)
| StarcoderdataPython |
4806523 | <filename>jwtest/spider/pa.py
# -*- coding:utf-8 -*-
import urllib
import urllib2
import cookielib
import re
import string
import types
class Term:
time = ""
id = ""
courses_list = []
def __init__(self, id, time, courses_list):
self.id = id
self.time = time
self.courses_list = courses_list
def __str__(self):
return self.id + ' ' + self.time + ' ' + self.name + ' ' + self.courses_list
class Course:
id = ""
time = ""
name = ""
weight = 0
grade = 0
def __init__(self, id, time, name, weight, grade):
self.id = id
self.time = time
self.name = name
self.weight = weight
self.grade = grade
def __str__(self):
return self.id + ' ' + self.time + ' ' + self.name + ' ' + str(self.weight) + ' ' + str(self.grade)
class NPU:
def __init__(self, name, passwd):
# 登录URL
self.loginUrl = 'http://us.nwpu.edu.cn/eams/login.action'
# 成绩URL
self.gradeUrl = 'http://us.nwpu.edu.cn/eams/teach/grade/course/person!historyCourseGrade.action?projectType=MAJOR'
self.cookies = cookielib.MozillaCookieJar('cookie.txt')
self.postdata = urllib.urlencode({
'username': name,
'password': <PASSWORD>,
'encodedPassword': '',
'session_locale': 'zh_CN',
})
# 成绩对象数组
# 构建opener
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookies))
# 获取本学期成绩页面
def getPage(self):
try:
request = urllib2.Request(url=self.loginUrl, data=self.postdata)
# 建立连接,模拟登陆
result = self.opener.open(request)
self.cookies.save(ignore_discard=True, ignore_expires=True)
# 打印登录内容
# print 'asdf'
# print result.read()
# 获得成绩界面的html
result = self.opener.open(self.gradeUrl)
return result.read().decode('utf-8')
except urllib2.URLError, e:
print '连接失败'
if hasattr(e, "reason"):
print "error", e.reason
return None
def getGrades(self, page):
# print page
reg = 'not find#$$'
tablelen11 = '<tr>\s*?<th w.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?</tr>'
tablelen12 = '<tr>\s*?<th w.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?</tr>'
tablelen13 = '<tr>\s*?<th w.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?</tr>'
tablelen14 = '<tr>\s*?<th w.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?<th.*?</th>\s*?</tr>'
if re.search(u'补考成绩', page) and re.search(u'实验成绩', page) and re.findall(tablelen14, page, re.S):
print '14'
reg = '<tr>\s*?<td>(.*?)</td>.*?<td.*?<td.*?<td><a href=".*?>(.*?)</a>.*?<t.*?' + '<td>(.*?)</td.*?<td.*?<td.*?<td.*?<td.*?<td.*?<td.*?<t.*?>\s*(\w*).*?<.*?<t.*?</tr>'
elif (re.search(u'补考成绩', page) or re.search(u'实验成绩', page)) and re.search(tablelen13, page, re.S):
print '13'
reg = '<tr>\s*?<td>(.*?)</td>.*?<td.*?<td.*?<td><a href=".*?>(.*?)</a>.*?<t.*?' + '<td>(.*?)</td.*?<td.*?<td.*?<td.*?<td.*?<td.*?<t.*?>\s*(\w*).*?<.*?<t.*?</tr>'
elif re.search(tablelen12, page, re.S):
print '12'
reg = '<tr>\s*?<td>(.*?)</td>.*?<td.*?<td.*?<td><a href=".*?>(.*?)</a>.*?<t.*?' + '<td>(.*?)</td.*?<td.*?<td.*?<td.*?<td.*?<t.*?>\s*(\w*).*?<.*?<t.*?</tr>'
elif re.search(tablelen11, page, re.S):
print '11'
reg = '<tr>\s*?<td>(.*?)</td>.*?<td.*?<td.*?<td><a href=".*?>(.*?)</a>.*?<t.*?' + '<td>(.*?)</td.*?<td.*?<td.*?<td.*?<t.*?>\s*(\w*).*?<.*?<t.*?</tr>'
# if re.findall(u'补考成绩', page):
# print '含补考成绩'
# reg = '<tr>\s*?<td>(.*?)</td>.*?<td.*?<td.*?<td><a href=".*?>(.*?)</a>.*?<t.*?'+'<td>(.*?)</td.*?<td.*?<td.*?<td.*?<td.*?<td.*?<td.*?<t.*?>\s*(\w*).*?<.*?<t.*?</tr>'
# else:
# reg = '<tr>\s*?<td>(.*?)</td>.*?<td.*?<td.*?<td><a href=".*?>(.*?)</a>.*?<t.*?'+'<td>(.*?)</td.*?<td.*?<td.*?<td.*?<td.*?<td.*?<t.*?>\s*(\w*).*?<.*?<t.*?</tr>'
myItems = re.findall(reg, page, re.S)
if myItems:
print '查询成功'
else:
print '查询失败'
grade_dict = {}
terms = []
term_list = []
cnt = 1
for item in myItems:
print item[0], item[1], item[2], item[3]
# print item[0].encode('utf-8'),item[1].encode('utf-8'),item[2].encode('utf-8'),item[3].encode('utf-8')
# print type(item[0]), type(item[1]), type(item[2]), type(item[3])
if re.match('^\d+\.?\d*$', item[2]) and re.match('^\d+\.?\d*$', item[3]):
courseid = 'course_' + str(cnt)
cnt = cnt + 1
if not grade_dict.has_key(item[0].encode('utf-8')):
grade_dict[item[0].encode('utf-8')] = []
terms.append(item[0].encode('utf-8'))
grade_dict[item[0].encode('utf-8')].append(
Course(courseid, item[0].encode('utf-8'), item[1].encode('utf-8'), string.atof(item[2]),
string.atof(item[3])))
termcnt = 1
for k in terms:
termid = 'term_' + str(termcnt)
termcnt = termcnt + 1
list = grade_dict[k]
term_list.append(Term(termid, list[0].time, list))
for i in list:
print i
return term_list | StarcoderdataPython |
150004 | # Copyright (c) 2011, Found IT A/S and Piped Project Contributors.
# See LICENSE for details.
import json
from StringIO import StringIO
from twisted.application import service
from twisted.internet import defer, address
from twisted.python import filepath, failure
from twisted.trial import unittest
from twisted.web import resource, server, http_headers
from twisted.web.test import test_web
from piped import exceptions, util, processing, dependencies
from piped.providers import web_provider
class DummyRequest(test_web.DummyRequest, server.Request):
channel = Ellipsis
def __init__(self, *a, **kw):
test_web.DummyRequest.__init__(self, *a, **kw)
self.requestHeaders = http_headers.Headers()
self.content = StringIO()
def getHeader(self, key):
return server.Request.getHeader(self, key)
def setHeader(self, name, value):
return server.Request.setHeader(self, name, value)
def set_content(self, content):
if not hasattr(content, 'read'):
self.content = StringIO(content)
else:
self.content = content
def setResponseCode(self, code, message=None):
server.Request.setResponseCode(self, code, message)
@property
def written_as_string(self):
return ''.join(self.written)
class WebProviderTest(unittest.TestCase):
def setUp(self):
self.runtime_environment = processing.RuntimeEnvironment()
self.service = service.IService(self.runtime_environment.application)
self.dependency_manager = self.runtime_environment.dependency_manager
self.configuration_manager = self.runtime_environment.configuration_manager
self.resource_manager = self.runtime_environment.resource_manager
self.dependency_manager.configure(self.runtime_environment)
def tearDown(self):
if self.service.running:
self.service.stopService()
def _create_configured_web_resource(self, routing, site_configuration=None):
site_configuration = site_configuration or dict()
web_site = web_provider.WebSite('site_name', site_configuration)
web_resource = web_provider.WebResource(web_site, routing)
web_resource.configure(self.runtime_environment)
return web_resource
def assertConfiguredWithProcessor(self, web_resource, processor=None, no_resource_processor=None):
if processor:
self.assertNotEquals(web_resource.processor_dependency, None)
self.assertEquals(web_resource.processor_dependency.provider, processor)
else:
self.assertEquals(web_resource.processor_dependency, None)
if no_resource_processor:
self.assertNotEquals(web_resource.no_resource_processor_dependency, None)
self.assertEquals(web_resource.no_resource_processor_dependency.provider, no_resource_processor)
else:
self.assertEquals(web_resource.no_resource_processor_dependency, None)
def getResourceForFakeRequest(self, site, post_path=None, request=None):
if not request:
request = DummyRequest(post_path)
return site.factory.getResourceFor(request)
def getConfiguredWebSite(self, config):
web_site = web_provider.WebSite('site_name', config)
web_site.configure(self.runtime_environment)
return web_site
def test_enabled_web_sites_provided(self):
provider = web_provider.WebResourceProvider()
self.configuration_manager.set('web.my_site.routing',
dict(__config__=dict(processor='a_processor'))
)
self.configuration_manager.set('web.another_site.enabled', False)
self.configuration_manager.set('web.another_site.routing',
dict(__config__=dict(processor='a_processor'))
)
provider.configure(self.runtime_environment)
self.assertEquals(len(provider.services), 1)
def test_simple_processor_routing(self):
config = dict(
routing = dict(
__config__ = dict(processor='pipeline.a_pipeline')
)
)
web_site = self.getConfiguredWebSite(config)
web_resource = self.getResourceForFakeRequest(web_site, [''])
self.assertConfiguredWithProcessor(web_resource, 'pipeline.a_pipeline')
def test_no_resource_processor_routing(self):
config = dict(
routing = dict(
__config__ = dict(processor='pipeline.root_pipeline', no_resource_processor='pipeline.root_no_resource_pipeline'),
foo = dict(
__config__ = dict(processor = 'pipeline.foo_pipeline')
),
bar = dict(
baz = dict(
__config__ = dict(no_resource_processor = 'pipeline.baz_pipeline')
)
)
)
)
web_site = self.getConfiguredWebSite(config)
root_resource = self.getResourceForFakeRequest(web_site, [''])
self.assertConfiguredWithProcessor(root_resource, processor='pipeline.root_pipeline', no_resource_processor='pipeline.root_no_resource_pipeline')
# nonexistent resources should be rendered by the closest matching no-resource-pipeline_dependency
self.assertEquals(self.getResourceForFakeRequest(web_site, ['nonexistent']), root_resource)
self.assertEquals(self.getResourceForFakeRequest(web_site, ['nonexistent', 'nested']), root_resource)
# since foo does not have a no_resource_processor, its no_resources should be rendered by the root_resource
self.assertEquals(self.getResourceForFakeRequest(web_site, ['foo', 'nonexistent']), root_resource)
self.assertEquals(self.getResourceForFakeRequest(web_site, ['foo', 'nonexistent', 'nested']), root_resource)
# since bar does not have a processor/no_resource_processor, it should be rendered by the root_resource
self.assertEquals(self.getResourceForFakeRequest(web_site, ['bar']), root_resource)
self.assertConfiguredWithProcessor(self.getResourceForFakeRequest(web_site, ['foo']), processor='pipeline.foo_pipeline')
self.assertConfiguredWithProcessor(self.getResourceForFakeRequest(web_site, ['foo', '']), processor='pipeline.foo_pipeline')
baz_resource = self.getResourceForFakeRequest(web_site, ['bar', 'baz'])
self.assertConfiguredWithProcessor(baz_resource, no_resource_processor='pipeline.baz_pipeline')
# since baz has a no_resource_processor, it is capable of rendering that itself doesn't have a "proper" resource/processor
self.assertEquals(self.getResourceForFakeRequest(web_site, ['bar', 'baz', '']), baz_resource)
self.assertEquals(self.getResourceForFakeRequest(web_site, ['bar', 'baz', 'nonexistent']), baz_resource)
self.assertEquals(self.getResourceForFakeRequest(web_site, ['bar', 'baz', 'nonexistent', 'nested']), baz_resource)
def test_web_resource_no_resource_request_processing(self):
""" Test that various web resources are being rendered with a request instance that
has its "postpath" instance variable set to the remaining / unhandled path segments.
"""
config = dict(
routing = dict(
__config__ = dict(processor='pipeline.root_pipeline', no_resource_processor='pipeline.root_no_resource_pipeline'),
foo = dict(
__config__ = dict(processor='pipeline.foo_pipeline')
),
bar = dict(
baz = dict(
__config__ = dict(no_resource_processor='pipeline.baz_pipeline')
)
)
)
)
web_site = self.getConfiguredWebSite(config)
batons = list()
# fake the pipelines being ready:
root_resource = self.getResourceForFakeRequest(web_site, [''])
foo_resource = self.getResourceForFakeRequest(web_site, ['foo'])
baz_resource = self.getResourceForFakeRequest(web_site, ['bar', 'baz'])
for resource in (root_resource, foo_resource, baz_resource):
if resource.processor_dependency:
resource.processor_dependency.on_resource_ready(batons.append)
if resource.no_resource_processor_dependency:
resource.no_resource_processor_dependency.on_resource_ready(batons.append)
def assertRequestRenderedWithPostPath(web_site, batons, request, post_path):
self.getResourceForFakeRequest(web_site, request=request).render(request)
self.assertEquals(batons, [dict(request=request)])
request = batons.pop()['request']
self.assertEquals(request.postpath, post_path)
for request_path, expected_postpath in (
# paths under the root resource, which has both a regular processor and a no resource processor
([''], []),
(['nonexistent'], ['nonexistent']),
(['nonexistent', 'nested'], ['nonexistent', 'nested']),
# paths under the foo/bar resource, which only has a regular processor
(['foo', 'bar'], ['foo', 'bar']),
(['foo', 'bar', ''], ['foo', 'bar', '']),
(['foo', 'bar', 'nested'], ['foo', 'bar', 'nested']),
# paths under the bar resource, which has a nested resource, but no processors at all
(['bar'], ['bar']),
(['bar', ''], ['bar', '']),
(['bar', 'nested'], ['bar', 'nested']),
# paths under the bar/baz resource, which only has a no resource processor
(['bar', 'baz'], []),
(['bar', 'baz', ''], ['']),
(['bar', 'baz', 'nested'], ['nested']),
(['bar', 'baz', 'nested', ''], ['nested', '']),
(['bar', 'baz', 'nested', 'deeply'], ['nested', 'deeply'])):
assertRequestRenderedWithPostPath(web_site, batons, DummyRequest(request_path), expected_postpath)
def test_static_preprocessors(self):
current_file = filepath.FilePath(__file__)
config = dict(
routing = dict(
__config__ = dict(
static = dict(
path = current_file.dirname(),
preprocessors = dict(
foo = "request: request.setHeader('foo', 'bar')"
)
)
)
)
)
web_site = self.getConfiguredWebSite(config)
# send a request for this file:
request = DummyRequest([current_file.basename()])
resource = web_site.factory.getResourceFor(request)
resource.render(request)
self.assertEquals(request.responseHeaders.getRawHeaders('foo'), ['bar'])
def test_processor_routing_with_nested_resources(self):
config = dict(
routing = dict(
__config__ = dict(
processor = 'pipeline.a_pipeline',
static = filepath.FilePath(__file__).dirname(),
),
nested = dict(
deeply = dict(
__config__ = dict(
processor = 'pipeline.another_pipeline'
)
)
)
)
)
web_site = self.getConfiguredWebSite(config)
web_resource = self.getResourceForFakeRequest(web_site, [''])
self.assertConfiguredWithProcessor(web_resource, 'pipeline.a_pipeline')
# if we request an existing file, a static file resource will be returned
filename = filepath.FilePath(__file__).basename()
static_resource = self.getResourceForFakeRequest(web_site, [filename])
self.assertIsInstance(static_resource, web_provider.StaticFile)
web_resource = self.getResourceForFakeRequest(web_site, ['nested'])
self.assertConfiguredWithProcessor(web_resource)
no_resource = self.getResourceForFakeRequest(web_site, ['nested', 'nonexistent'])
self.assertIsInstance(no_resource, resource.NoResource)
deeply_resource = self.getResourceForFakeRequest(web_site, ['nested', 'deeply'])
self.assertConfiguredWithProcessor(deeply_resource, 'pipeline.another_pipeline')
def test_web_resource_simple_request_processing(self):
web_resource = self._create_configured_web_resource(dict(__config__=dict(processor='pipeline.a_pipeline')))
request = DummyRequest([''])
batons = list()
web_resource.processor_dependency.on_resource_ready(batons.append)
# rendering the request should result in a baton being processed by the processor
web_resource.render(request)
self.assertEquals(batons, [dict(request=request)])
def test_web_resource_processing_handles_exceptions(self):
web_resource = self._create_configured_web_resource(dict(__config__=dict(processor='pipeline.a_pipeline')))
request = DummyRequest([''])
def raiser(baton):
raise Exception()
web_resource.processor_dependency.on_resource_ready(raiser)
# rendering the request should result in an exception response
web_resource.render(request)
self.assertIn('Processing Failed', ''.join(request.written))
self.assertEquals(request.code, 500)
def test_web_resource_processing_raises_with_debugging(self):
routing = dict(__config__=dict(processor='pipeline.a_pipeline'))
site_config = dict(debug=dict(allow=['localhost']))
web_resource = self._create_configured_web_resource(routing, site_config)
request = DummyRequest([''])
request.client = address.IPv4Address('TCP', 'localhost', 1234)
def raiser(baton):
raise Exception()
web_resource.processor_dependency.on_resource_ready(raiser)
# rendering the request should result in an exception response
web_resource.render(request)
self.assertIn('web.Server Traceback (most recent call last)', ''.join(request.written))
self.assertEquals(request.code, 500)
@defer.inlineCallbacks
def test_debug_handler_reaping(self):
# reap all debuggers every reactor iteration:
site_config = dict(routing=dict())
web_site = web_provider.WebSite('site_name', site_config)
debug_handler = web_provider.WebDebugHandler(web_site, reap_interval=0, max_inactive_time=0)
debug_handler.setServiceParent(self.service)
self.service.startService()
f = failure.Failure(Exception())
debug_handler.register_failure(f)
self.assertEquals(len(debug_handler.children), 1)
yield util.wait(0) # give the reaper one reactor iteration to reap the debugger
self.assertEquals(len(debug_handler.children), 0)
def test_debug_handler_allow(self):
site_config = dict(routing=dict())
web_site = self.getConfiguredWebSite(site_config)
debug_handler = web_provider.WebDebugHandler(web_site, allow=['some_host'])
debug_handler.setServiceParent(self.service)
f = failure.Failure(Exception())
path = debug_handler.register_failure(f)
request = DummyRequest([path])
# localhost is not allowed to debug:
request.client = address.IPv4Address('TCP', 'localhost', 1234)
forbidden = debug_handler.getChildWithDefault(path, request)
self.assertIsInstance(forbidden, resource.ForbiddenResource)
# but some_host is:
request.client = address.IPv4Address('TCP', 'some_host', 1234)
web_debugger = debug_handler.getChildWithDefault(path, request)
self.assertIsInstance(web_debugger, web_provider.WebDebugger)
def test_web_debugger(self):
# create a failure instance with an actual traceback:
foo = 42 # this will become part of the debuggers namespace
try:
raise Exception()
except Exception as e:
f = util.NonCleaningFailure()
web_debugger = web_provider.WebDebugger(f)
request = DummyRequest([])
request.addArg('expr', 'foo')
result = web_debugger.render(request)
# the result should be json-encoded
self.assertEquals(result, json.dumps('42\n'))
def test_fails_if_both_static_and_concatenated_are_specified(self):
for invalid_routing in (dict(__config__=dict(static='', concatenated='')),
dict(nested=dict(__config__=dict(static='', concatenated='')))):
site = web_provider.WebSite('site_name', dict(routing=invalid_routing))
self.assertRaises(exceptions.ConfigurationError, site.configure, self.runtime_environment)
def test_request_finished_when_garbage_collected(self):
web_site = web_provider.WebSite('site_name', dict(routing=dict(__config__=dict(processor='pipeline.test_pipeline'))))
web_site.configure(self.runtime_environment)
batons = list()
web_resource = self.getResourceForFakeRequest(web_site, [])
web_resource.processor_dependency = dependencies.InstanceDependency(batons.append)
web_resource.processor_dependency.is_ready = True
request = DummyRequest([])
web_resource.render(request)
# the processor should have been asked to process a baton
self.assertEquals(len(batons), 1)
self.assertEquals(batons[0]['request'], request)
# the processor didn't finish the request:
self.assertEquals(request.finished, False)
# .. however, when the processor loses the reference to the request, it should be
# automatically finished:
batons.pop()
self.assertEquals(request.finished, True)
class TestConcatenatedFile(unittest.TestCase):
def test_concatenating_files(self):
test_data_path = filepath.FilePath(__file__).sibling('data')
file_paths = [test_data_path.child('foo'), test_data_path.child('bar')]
cf = web_provider.ConcatenatedFile('text/plain', file_paths)
request = DummyRequest([''])
text = cf.render_GET(request)
self.assertEquals(text, 'foo\nbar\n')
def test_concatenating_files_in_different_order(self):
test_data_path = filepath.FilePath(__file__).sibling('data')
file_paths = [test_data_path.child('bar'), test_data_path.child('foo')]
cf = web_provider.ConcatenatedFile('text/plain', file_paths)
request = DummyRequest([''])
text = cf.render_GET(request)
self.assertEquals(text, 'bar\nfoo\n')
def test_just_a_single_file(self):
test_data_path = filepath.FilePath(__file__).sibling('data')
file_paths = [test_data_path.child('foo')]
cf = web_provider.ConcatenatedFile('text/plain', file_paths)
request = DummyRequest([''])
text = cf.render_GET(request)
self.assertEquals(text, 'foo\n')
def test_no_files(self):
file_paths = []
cf = web_provider.ConcatenatedFile('text/plain', file_paths)
request = DummyRequest([''])
text = cf.render_GET(request)
self.assertEquals(text, '')
def test_ensure_the_right_content_type_is_set(self):
file_paths = []
cf = web_provider.ConcatenatedFile('text/plain', file_paths)
request = DummyRequest([''])
cf.render_GET(request)
self.assertEquals(request.responseHeaders.getRawHeaders('content-type'), ['text/plain'])
| StarcoderdataPython |
1707626 | import cv2
import numpy
import pyopencl
from proc_tex.OpenCLCellNoise2D import OpenCLCellNoise2D
import proc_tex.texture_transforms
from proc_tex.texture_transforms import tex_scale_to_region, tex_to_dtype
if __name__ == '__main__':
cl_context = pyopencl.create_some_context()
texture = OpenCLCellNoise2D(cl_context, 4, 1)
texture = tex_to_dtype(tex_scale_to_region(texture), numpy.uint16,
scale=65535)
eval_pts = texture.gen_eval_pts((1024, 1024), numpy.array([[0,1], [0,1]]))
image = texture.to_image(None, None, eval_pts=eval_pts)
cv2.imshow('image', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
texture.to_video(None, None, 120, 30, './example.webm', pix_fmt='gray16le',
codec_params=['-lossless', '0'], eval_pts=eval_pts)
| StarcoderdataPython |
3354664 | # O(n) time | O(n) space
def runLengthEncoding(string):
encodedString = []
currentRun = 1
for i in range(1, len(string)):
prevChar = string[i-1]
currChar = string[i]
if prevChar != currChar or currentRun == 9:
encodedString.append(str(currentRun))
encodedString.append(prevChar)
currentRun = 0
currentRun += 1
encodedString.append(str(currentRun))
encodedString.append(string[len(string)-1])
return ''.join(encodedString)
| StarcoderdataPython |
3397873 | <filename>Python/Christmas -tree.py
def input_value(bar):
foo = input(bar)
foo = check_num(foo, bar)
return foo
def check_num(foo, bar):
try:
foo = int(foo)
if int(foo) > 0:
return foo
else:
error_msg(foo)
foo = input(bar)
return foo
except:
error_msg(foo)
foo = input(bar)
return foo
def error_msg(foo):
print(f"{foo} is invalid.")
def tree(foo):
bar = "*"
for i in range(1, foo + 1):
print("{f:^80}".format(f=bar))
bar += "**"
if foo > 0 and foo < 10:
print("{f:^80}".format(f="#"))
print("{f:^80}".format(f="#"))
elif foo > 11 and foo < 20:
print("{f:^80}".format(f="###"))
print("{f:^80}".format(f="###"))
print("{f:^80}".format(f="###"))
print("{f:^80}".format(f="###"))
elif foo > 21:
print("{f:^80}".format(f="#####"))
print("{f:^80}".format(f="#####"))
print("{f:^80}".format(f="#####"))
print("{f:^80}".format(f="#####"))
print("{f:^80}".format(f="#####"))
print("{f:^80}".format(f="#####"))
print("{f:^80}".format(f="#####"))
def main():
tree(input_value("How tall shoud i grow the tree? : "))
main() | StarcoderdataPython |
4808371 | from pylab import *
from PIL import Image
import numpy as np
from scipy.ndimage import filters, measurements, morphology
# standard_deviation越大,越模糊
def gaussian_filter(img_path, standard_deviation):
"""
高斯模糊
:param img_path: 原图像路径
:param standard_deviation: 越大越模糊
:return:
"""
origin_img = Image.open(img_path)
print(np.shape(origin_img))
n_chan = len(origin_img.getbands())
origin_img = np.array(origin_img)
for i in range(n_chan):
origin_img[:, :, i] = filters.gaussian_filter(origin_img[:, :, i], standard_deviation)
result_img = Image.fromarray(np.uint8(origin_img))
# result_img.show()
return result_img
def sobel_example(img_path):
im = np.array(Image.open(img_path).convert('L'))
imx = np.zeros(im.shape)
# sobel() 函数的第二个参数表示选择 x 或者 y 方向导数,
# 第三个参数保存输出的变量
filters.sobel(im, 1, imx)
imy = np.zeros(im.shape)
filters.sobel(im, 0, imy)
magnitude = np.sqrt(imx ** 2 + imy ** 2)
# 正导数显示为亮的像素,负导数显示为暗的像素,。灰色区域表示导数的值接近于零。
plt.imshow(magnitude)
plt.show()
# print(magnitude)
# 使用高斯倒数滤波器
sigma = 5 # 标准差
imx = zeros(im.shape)
# 第三个参数指定对每个方向计算哪种类型的导数,第二个参数为使用的标准差
filters.gaussian_filter(im, (sigma, sigma), (0, 1), imx)
imy = zeros(im.shape)
filters.gaussian_filter(im, (sigma, sigma), (1, 0), imy)
magnitude = np.sqrt(imx ** 2 + imy ** 2)
imshow(magnitude)
show()
def morphology_example(img_path):
# 形态学(或数学形态学)是度量和分析基本形状的图像处理方法的基本框架与集合。
# 形态学通常用于处理二值图像,但是也能够用于灰度图像。
# 载入图像,然后使用阈值化操作,以保证处理的图像为二值图像
im = np.array(Image.open(img_path).convert('L'))
# 通过和 1 相乘,脚本将布尔数组转换成二进制表示。
im = 1 * (im < 240)
# 使用 label() 函数寻找单个的物体,
# 并且按照它们属于哪个对象将整数标签给像素赋值。
labels, nbr_objects = measurements.label(im)
imshow(labels)
# nbr_objects是计算出的物体的数目
print("Number of objects:", nbr_objects)
# 形态学二进制开(binary open)操作更好地分离各个对象
# binary_opening() 函数的第二个参数指定一个数组结构元素。
# 该数组表示以一个像素为中心时,使用哪些相邻像素。
# 在这种情况下,我们在 y 方向上使用 9 个像素
# (上面 4 个像素、像素本身、下面 4 个像素),
# 在 x 方向上使用 5 个像素。你可以指定任意数组为结构元素,
# 数组中的非零元素决定使用哪些相邻像素。
# 参数 iterations 决定执行该操作的次数。
im_open = morphology.binary_opening(im, ones((9, 5)), iterations=2)
labels_open, nbr_objects_open = measurements.label(im_open)
figure()
imshow(labels)
print("Number of objects:", nbr_objects_open)
show()
def denoise(im, U_init, tolerance=0.1, tau=0.125, tv_weight=100):
""" 使用<NAME>(2005)在公式(11)中的计算步骤实现Rudin-Osher-Fatemi(ROF)去噪模型
输入:含有噪声的输入图像(灰度图像)、U 的初始值、TV 正则项权值、步长、停业条件
输出:去噪和去除纹理后的图像、纹理残留"""
m, n = im.shape # 噪声图像的大小
# 初始化
U = U_init
Px = im # 对偶域的x 分量
Py = im # 对偶域的y 分量
error = 1
while error > tolerance:
Uold = U
# 原始变量的梯度
# roll() 函数。顾名思义,在一个坐标轴上,它循环“滚动”数组中的元素值。
# 该函数可以非常方便地计算邻域元素的差异,比如这里的导数
GradUx = roll(U, -1, axis=1) - U # 变量U 梯度的x 分量
GradUy = roll(U, -1, axis=0) - U # 变量U 梯度的y 分量
# 更新对偶变量
PxNew = Px + (tau / tv_weight) * GradUx
PyNew = Py + (tau / tv_weight) * GradUy
NormNew = maximum(1, sqrt(PxNew ** 2 + PyNew ** 2))
Px = PxNew / NormNew # 更新x 分量(对偶)
Py = PyNew / NormNew # 更新y 分量(对偶)
# 更新原始变量
RxPx = roll(Px, 1, axis=1) # 对x 分量进行向右x 轴平移
RyPy = roll(Py, 1, axis=0) # 对y 分量进行向右y 轴平移
DivP = (Px - RxPx) + (Py - RyPy) # 对偶域的散度
U = im + tv_weight * DivP # 更新原始变量
# 更新误差
# linalg.norm() 函数,该函数可以衡量两个数组间
# (这个例子中是指图像矩阵 U和 Uold)的差异
error = linalg.norm(U - Uold) / sqrt(n * m);
return U, im - U # 去噪后的图像和纹理残余
if __name__ == '__main__':
path = '/home/straw/下载/dataset/my_dataset/test.jpg'
re = gaussian_filter(path, 2)
re.show()
| StarcoderdataPython |
3290448 | <reponame>aspose-slides/Aspose.Slides-for-Python-via-.NET
import aspose.slides as slides
def charts_showing_display_unit_label():
#ExStart:ShowingDisplayUnitLabel
outDir = "./examples/out/"
with slides.Presentation() as pres:
chart = pres.slides[0].shapes.add_chart(slides.charts.ChartType.CLUSTERED_COLUMN, 50, 50, 450, 300)
chart.axes.vertical_axis.display_unit = slides.charts.DisplayUnitType.MILLIONS
pres.save(outDir + "charts_showing_display_unit_label_out.pptx", slides.export.SaveFormat.PPTX)
#ExEnd:ShowingDisplayUnitLabel | StarcoderdataPython |
1670336 | import asyncio
import json
import logging
import signal as signals
from hashlib import sha256
import yaml
from hbmqtt.client import ClientException, MQTTClient
from hbmqtt.mqtt.constants import QOS_1
from jinja2 import Template
from .control import Controller
_LOG = logging.getLogger(__name__)
class Server:
def __init__(self, config):
self.config = config
self.mqtt = None
self.loop = asyncio.get_event_loop()
self.tasks = []
self.unawaited_tasks = []
self.controller = None
def _init_controller(self):
controller_config = self.config["controller"]
setpoint = controller_config["setpoint"]
kp = controller_config.get("kp", 1)
ki = controller_config.get("ki", 0.1)
kd = controller_config.get("kd", 0.05)
power_mult = controller_config.get("power_mult", 30)
self.controller = Controller(setpoint, kp, ki, kd, power_mult)
async def _init_mqtt(self):
mqtt_config = self.config["mqtt"]
topic_prefix = mqtt_config["topic_prefix"]
wort_temp_topic = mqtt_config["wort_temp_topic"]
client_id = mqtt_config.get(
"client_id",
"beer-temp-ctrl-%s" % sha256(wort_temp_topic.encode("utf8")).hexdigest()[:8],
)
uri = "mqtt://"
username = mqtt_config.get("username")
password = <PASSWORD>_config.get("password")
if username and password:
uri += f"{username}:{password}@"
uri += f"{mqtt_config['host']}:{mqtt_config['port']}"
client_config = {}
status_topic = mqtt_config.get("status_topic")
if status_topic is not None:
client_config["will"] = dict(
retain=True,
topic=f"{topic_prefix}/{status_topic}",
message=mqtt_config.get("status_payload_dead", "dead").encode("utf8"),
qos=1,
)
self.mqtt = MQTTClient(client_id=client_id, config=client_config, loop=self.loop)
await self.mqtt.connect(uri)
if status_topic is not None:
await self.mqtt.publish(
f"{topic_prefix}/{status_topic}",
mqtt_config.get("status_payload_running", "running").encode("utf8"),
qos=1,
retain=True,
)
await self.mqtt.subscribe([(mqtt_config["wort_temp_topic"], QOS_1)])
def _handle_mqtt_msg(self, topic, payload):
_LOG.info("Message received on topic %s: %s", topic, payload)
mqtt_config = self.config["mqtt"]
if topic == mqtt_config["wort_temp_topic"]:
template = Template(
mqtt_config.get("wort_temp_value_template", "{{ value }}")
)
try:
value_json = json.loads(payload)
except ValueError:
value_json = None
val = template.render(value_json=value_json, value=payload)
self.controller.current_temp = float(val)
_LOG.info("Wort temp set to %s", val)
else:
_LOG.debug("Topic didn't match anything we want to handle")
# Tasks
async def _controller_loop(self):
try:
while True:
_LOG.info(
"Controller wants %s at %s power",
self.controller.direction,
self.controller.power_level,
)
await asyncio.sleep(10)
except Exception as e:
_LOG.exception("Exception in _controller_loop")
raise
async def _mqtt_rx_loop(self):
try:
while True:
msg = await self.mqtt.deliver_message()
topic = msg.publish_packet.variable_header.topic_name
payload = msg.publish_packet.payload.data.decode("utf8")
_LOG.info("Received message on topic %r: %r", topic, payload)
try:
self._handle_mqtt_msg(topic, payload)
except Exception:
_LOG.exception("Exception when handling MQTT message:")
finally:
await self.mqtt.publish(
"%s/%s"
% (
self.config["mqtt"]["topic_prefix"],
self.config["mqtt"]["status_topic"],
),
self.config["mqtt"]
.get("status_payload_stopped", "stopped")
.encode("utf8"),
qos=1,
retain=True,
)
_LOG.info("Disconnecting from MQTT...")
await self.mqtt.disconnect()
_LOG.info("MQTT disconnected")
async def _remove_finished_tasks(self):
while True:
await asyncio.sleep(1)
finished_tasks = [x for x in self.unawaited_tasks if x.done()]
if not finished_tasks:
continue
for task in finished_tasks:
try:
await task
except Exception as e:
_LOG.exception("Exception in task: %r:", task)
self.unawaited_tasks = list(
filter(lambda x: not x.done(), self.unawaited_tasks)
)
def run(self):
for s in (signals.SIGHUP, signals.SIGTERM, signals.SIGINT):
self.loop.add_signal_handler(
s, lambda s=s: self.loop.create_task(self.shutdown(s))
)
_LOG.debug("Controller init")
self._init_controller()
# Get connected to the MQTT server
_LOG.info("Connecting to MQTT...")
self.loop.run_until_complete(self._init_mqtt())
_LOG.info("MQTT connected")
# This is where we add any other async tasks that we want to run, such as polling
# inputs, sensor loops etc.
self.tasks = [
self.loop.create_task(coro)
for coro in (
self._controller_loop(),
self._mqtt_rx_loop(),
self._remove_finished_tasks(),
)
]
try:
self.loop.run_forever()
finally:
self.loop.close()
_LOG.debug("Loop closed")
_LOG.debug("run() complete")
async def shutdown(self, signal):
_LOG.warning("Received exit signal %s", signal.name)
# Cancel our main task first so we don't mess the MQTT library's connection
for t in self.tasks:
t.cancel()
_LOG.info("Waiting for main task to complete...")
all_done = False
while not all_done:
all_done = all(t.done() for t in self.tasks)
await asyncio.sleep(0.1)
current_task = asyncio.Task.current_task()
tasks = [
t
for t in asyncio.Task.all_tasks(loop=self.loop)
if not t.done() and t is not current_task
]
_LOG.info("Cancelling %s remaining tasks", len(tasks))
for t in tasks:
t.cancel()
_LOG.info("Waiting for %s remaining tasks to complete...", len(tasks))
all_done = False
while not all_done:
all_done = all(t.done() for t in tasks)
await asyncio.sleep(0.1)
_LOG.debug("Tasks all finished. Stopping loop...")
self.loop.stop()
_LOG.debug("Loop stopped")
| StarcoderdataPython |
1631358 | #!/usr/bin/env python3
# <http://dbpedia.org/resource/Aristotle> <http://xmlns.com/foaf/0.1/name> "Aristotle"@en .
import sys
for line in sys.stdin:
if '/name' in line:
first = line.find('"')
next = line.find('"', first+1)
name = line[first+1:next]
if not name:
continue
if ' ' not in name:
# throw out single names if there is ( or _ in the string
if '(' in line or '_' in line:
continue
# throw out names if /resource/foo> does not match name
first = line.find('/resource/')
next = line.find('>',first+10)
article_name = line[first+10:next]
# unicode always causes mismatches due to an encoding mismatch, so give it a pass if the name has unicode
# give it a pass if there's a dash in the name
if '-' not in name and '\\u' not in name and name != article_name:
continue
print(name)
| StarcoderdataPython |
187233 | <gh_stars>0
import os
import zipfile
CBZ = '{}_ch{:0>3}.cbz'
def make_cbz(imgs, manga, chapter):
print(f'-> Creating CBZ of {manga} chapter {chapter}...', flush=True)
with zipfile.ZipFile(CBZ.format(manga, chapter), 'w') as zip:
for img in imgs:
zip.write(img)
os.remove(img)
| StarcoderdataPython |
1630808 | <gh_stars>0
prog = 'R3, L2, L2, R4, L1, R2, R3, R4, L2, R4, L2, L5, L1, R5, R2, R2, L1, R4, R1, L5, L3, R4, R3, R1, L1, L5, L4, L2, R5, L3, L4, R3, R1, L3, R1, L3, R3, L4, R2, R5, L190, R2, L3, R47, R4, L3, R78, L1, R3, R190, R4, L3, R4, R2, R5, R3, R4, R3, L1, L4, R3, L4, R1, L4, L5, R3, L3, L4, R1, R2, L4, L3, R3, R3, L2, L5, R1, L4, L1, R5, L5, R1, R5, L4, R2, L2, R1, L5, L4, R4, R4, R3, R2, R3, L1, R4, R5, L2, L5, L4, L1, R4, L4, R4, L4, R1, R5, L1, R1, L5, R5, R1, R1, L3, L1, R4, L1, L4, L4, L3, R1, R4, R1, R1, R2, L5, L2, R4, L1, R3, L5, L2, R5, L4, R5, L5, R3, R4, L3, L3, L2, R2, L5, L5, R3, R4, R3, R4, R3, R1'
steps = prog.split(', ')
def move():
pos = [0, 0]
d = [0, 1]
visited = set()
visited.add(tuple(pos))
for step in steps:
t, dist = step[0], int(step[1:])
if t == 'R':
d = [d[1], -d[0]]
else:
assert t == 'L'
d = [-d[1], d[0]]
for _ in range(dist):
pos[0] = pos[0] + d[0]
pos[1] = pos[1] + d[1]
if tuple(pos) in visited:
return pos
visited.add(tuple(pos))
return pos
pos = move()
print(pos, abs(pos[0]) + abs(pos[1]))
| StarcoderdataPython |
3250185 | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2018 Zuse Institute Berlin, www.zib.de
Permissions are granted as stated in the license file you have obtained
with this software. If you find the library useful for your purpose,
please refer to README.md for how to cite IPET.
@author: <NAME>
"""
import re
from ipet.concepts import IpetNode
from ipet.misc import misc
from ipet import Key
import logging
logger = logging.getLogger(__name__)
class StatisticReader(IpetNode):
"""
base class for all statistic readers - readers should always inherit from this base class for minimal implementation
effort
readers only need to overwrite the methods extractStatistic() and perhaps execEndOfProb()
"""
name = 'NO_NAME_DEFINED_YET'
regular_exp = re.compile('')
datakey = 'NO KEY'
datatype = float
lineindex = -1
context = Key.CONTEXT_LOGFILE
sleepAfterReturn = True
sleep = False
multipliers = dict(k=1000, M=1e6, G=1e9)
# the reader might behave differently depending on the solver type, due to the different output
SOLVERTYPE_SCIP = "SCIP"
SOLVERTYPE_GUROBI = "GUROBI"
SOLVERTYPE_CPLEX = "CPLEX"
SOLVERTYPE_CBC = "CBC"
SOLVERTYPE_XPRESS = "XPRESS"
SOLVERTYPE_COUENNE = "Couenne"
solvertype = SOLVERTYPE_SCIP
@staticmethod
def boolfunction(value):
""" parses string TRUE or FALSE and returns the boolean value of this expression """
return True if value == "TRUE" else False
@staticmethod
def changeSolverType(newtype):
StatisticReader.solvertype = newtype
def setTestRun(self, testrun):
self.testrun = testrun
def supportsContext(self, context):
"""
returns True if the reader supports a given context, otherwise False
"""
if type(self.context) is int:
return self.context == context
else:
return context in self.context
def getSplitLineWithRegexp(self, regular_exp, line, index=-1, startofline=False):
if startofline == True and not re.match(regular_exp, line):
return None
if startofline == False and not re.search(regular_exp, line):
return None
if index == -1:
return line.split()
else:
return line.split()[index]
def getName(self):
"""
returns the name of the StatisticReader
"""
return self.name
def extractStatistic(self, line):
"""
overwrite this method for own reader subclasses
"""
try:
if self.regular_exp.search(line):
data = None
try:
data = self.datatype(misc.getWordAtIndex(line, self.lineindex))
except ValueError:
data = None
except IndexError:
data = None
except TypeError:
# print self.name, " failed data conversion"
raise TypeError("Type error during data conversion in line <%s>" % line)
self.addData(self.datakey, data)
except AttributeError:
# print self.name, " has no such attribute"
pass
def execEndOfProb(self):
"""
overwrite this method to implement final behaviour at the end of each problem, such as setting flags
"""
return None
def operateOnLine(self, line):
self.extractStatistic(line)
def addData(self, datakey, data):
logger.debug("Reader %s adds data" % (self.getName()))
self.testrun.addData(datakey, data)
def turnIntoFloat(self, astring):
"""
parses strings to floats, keeps track of trailing caracters signifying magnitudes
Special attention is put to strings of the form, e.g., '900k' where
the tailing 'k'-character signifies multiplication by 1000.
"""
lastelem = astring[-1]
multiplier = StatisticReader.multipliers.get(lastelem, 1.0)
return float(astring.rstrip('kMG')) * multiplier
######################################################################################
# DERIVED Classes
class NodeNameReader(StatisticReader):
"""
Read nodename data from a line in outfile like the output of 'uname -a' (currently only on Linux).
"""
context = [Key.CONTEXT_LOGFILE]
nodenameexp = re.compile('^Linux (\S*) .* GNU/Linux')
name = 'NodeNameReader'
datakey = Key.NodeName
nodename = None
def extractStatistic(self, line):
""" Save the hostname from a line like the output of 'uname -a' (currently only on Linux) to add at the end of the problem.
Parameters
----------
line from the outfile
"""
matched = self.nodenameexp.match(line)
if matched:
self.nodename = matched.groups()[0]
def execEndOfProb(self):
"""
At the end of each problem, add the current nodename to the data.
"""
if self.nodename is not None:
self.addData(self.datakey, self.nodename)
self.nodename = None
class MetaDataReader(StatisticReader):
"""
Read lines of the form
@Key Value
from meta, out and err file and stores 'Value' in a Field 'Key'.
"""
context = [Key.CONTEXT_METAFILE, Key.CONTEXT_LOGFILE, Key.CONTEXT_ERRFILE]
metadataexp = re.compile("^@\S{3,}\s+\S+$")
name = 'MetaDataReader'
datakey = Key.MetaData
def extractStatistic(self, line):
""" Read metadata from specified line
Parameters
----------
line
string to be read from. has to have the form
@attribute datum
"""
if self.metadataexp.match(line):
# TODO better to allow more spaces?
[attr, datum] = line.split('@')[1].split()
datum = datum.split('\n')[0]
self.testrun.metadatadict[attr] = datum
class BestSolInfeasibleReader(StatisticReader):
"""
catches the expression 'best solution is not feasible in original problem'
@return: False if above expression is found in the log and the best solution is thus not feasible, otherwise True
"""
name = 'BestSolInfeasibleReader'
regular_exp = re.compile('best solution is not feasible in original problem')
datakey = Key.BestSolutionInfeasible
def extractStatistic(self, line):
if self.regular_exp.search(line):
self.addData(self.datakey, True)
class DateTimeReader(StatisticReader):
"""
reads in the start and finish time from a timestamp in given in Milliseconds
If found, the corresponding data keys are Datetime_Start and Datetime_End
"""
name = 'DateTimeReader' # : the name for this reader
datetimestartexp = re.compile(r"^@03 ([0-9]+)") # : the expression for the date time start
datetimeendexp = re.compile(r"^@04 ([0-9]+)") # : the expression for the date time after termination
datetimestartkey = Key.DatetimeStart # : data key for start of run
datetimeendkey = Key.DatetimeEnd # : data key for end of run
datetimekw = {datetimestartkey:datetimestartexp, datetimeendkey:datetimeendexp}
def extractStatistic(self, line):
for key, exp in list(self.datetimekw.items()):
matched = exp.match(line)
if matched:
timestamp = int(matched.groups()[0])
#time = misc.convertTimeStamp(timestamp)
self.addData(key, timestamp)
break
class DualLPTimeReader(StatisticReader):
"""
reads the dual LP time
"""
name = 'DualLPTimeReader'
regular_exp = re.compile('^ dual LP')
datakey = Key.DualLpTime
datatype = float
lineindex = 3
class ErrorFileReader(StatisticReader):
"""
reads information from error files
"""
name = "ErrorFileReader"
regular_exp = re.compile("returned with error code (\d+)")
datakey = Key.ErrorCode
context = Key.CONTEXT_ERRFILE
def extractStatistic(self, line):
match = self.regular_exp.search(line)
if match:
returncode = match.groups()[0]
self.addData(self.datakey, int(returncode))
class SettingsFileReader(StatisticReader):
"""
parses settings from a settings file
parses the type, the default value, the name and the current value for every parameter
# [type: int, range: [-536870912,536870911], default: 100000]
nodeselection/bfs/stdpriority = 1000000
"""
name = "SettingsFileReader"
regular_exp_name = re.compile("^([\w/]+) = (\S+)")
regular_exp_type = re.compile("^# \[type: (\w+),.*default: ([^\]]+)\]")
context = Key.CONTEXT_SETFILE
typemap = {
"real" : float,
"char" : str,
"string" : str,
"int" : int,
"bool" : StatisticReader.boolfunction,
"longint" : int
}
""" map from a parameter type to a python standard data type """
def extractStatistic(self, line):
match_type = self.regular_exp_type.match(line)
if match_type:
self.type = match_type.groups()[0]
self.default = match_type.groups()[1]
else:
match_name = self.regular_exp_name.match(line)
if match_name:
name = match_name.groups()[0]
value = match_name.groups()[1]
typefunction = self.typemap.get(self.type, str)
try:
self.testrun.addParameterValue(name, typefunction(value))
self.testrun.addDefaultParameterValue(name, typefunction(self.default))
except ValueError:
# when an error occurs, just return a string
self.testrun.addParameterValue(name, value)
self.testrun.addDefaultParameterValue(name, self.default)
class GapReader(StatisticReader):
"""
reads the primal dual gap at the end of the solving
"""
name = 'GapReader'
regular_exp = re.compile('^Gap :')
datakey = Key.Gap
datatype = float
lineindex = 2
def extractStatistic(self, line):
if self.regular_exp.match(line):
gapasword = misc.getWordAtIndex(line, self.lineindex)
# if the gap is infinite, no data is passed to the test run
if gapasword != "infinite":
gap = self.turnIntoFloat(gapasword)
self.addData(self.datakey, gap)
class MaxDepthReader(StatisticReader):
"""
reads the maximum depth
"""
name = 'MaxDepthReader'
regular_exp = re.compile(' max depth :')
datakey = Key.MaximumDepth
datatype = int
lineindex = 3
class NodesReader(StatisticReader):
"""
reads the total number of solving nodes of the branch and bound search
"""
name = 'NodesReader'
regular_exp = re.compile("^ nodes \(total\) :")
datakey = Key.Nodes
datatype = int
lineindex = 3
class ObjsenseReader(StatisticReader):
name = 'ObjsenseReader'
regular_exp = re.compile("^ Objective : (\w*),")
datakey = Key.ObjectiveSense
minimize = 1
maximize = -1
orig_prob_state = False
orig_prob_expr = re.compile('^Original Problem :')
pres_prob_expr = re.compile('^Presolved Problem :')
def extractStatistic(self, line):
if self.orig_prob_expr.match(line):
self.orig_prob_state = True
return
if self.pres_prob_expr.match(line):
self.orig_prob_state = False
return
match = self.regular_exp.match(line)
if self.orig_prob_state and match:
objsense = self.minimize
if match.groups()[0] == "maximize":
objsense = self.maximize
self.addData(self.datakey, objsense)
class ObjlimitReader(StatisticReader):
name = "ObjlimitReader"
regular_exp = re.compile("objective value limit set to")
datakey = Key.ObjectiveLimit
datatype = float
lineindex = 5
class RootNodeFixingsReader(StatisticReader):
"""
reads the number of variable fixings during root node
"""
name = 'RootNodeFixingsReader'
regular_exp = re.compile('^ root node')
datakey = Key.RootNodeFixings
datatype = int
lineindex = 4
class TimeLimitReader(StatisticReader):
"""
extracts the time limit for a problem
"""
name = 'TimeLimitReader'
timelimitreadkeys = {
StatisticReader.SOLVERTYPE_SCIP : '@05',
StatisticReader.SOLVERTYPE_CPLEX : '@05',
StatisticReader.SOLVERTYPE_GUROBI : "@05",
StatisticReader.SOLVERTYPE_CBC : "@05",
StatisticReader.SOLVERTYPE_XPRESS : "@05",
StatisticReader.SOLVERTYPE_COUENNE : "^@05"}
datakey = Key.TimeLimit
def extractStatistic(self, line):
if re.search(self.timelimitreadkeys[StatisticReader.solvertype], line):
self.addData(self.datakey, float(line.split()[-1]))
class TimeToBestReader(StatisticReader):
name = 'TimeToBestReader'
regular_exp = re.compile(' Primal Bound :')
datakey = Key.TimeToBestSolution
datatype = float
lineindex = 3
def extractStatistic(self, line):
if self.regular_exp.match(line):
try:
self.addData(self.datakey, float(misc.getNumberAtIndex(line, self.lineindex)))
except TypeError:
pass
class TimeToFirstReader(StatisticReader):
name = 'TimeToFirstReader'
regular_exp = re.compile(' First Solution :')
datakey = Key.TimeToFirstSolution
datatype = float
lineindex = 3
def extractStatistic(self, line):
if self.regular_exp.match(line):
try:
timetofirst = float(misc.getNumberAtIndex(line, self.lineindex))
self.addData(self.datakey, timetofirst)
except TypeError:
pass
class ListReader(StatisticReader):
"""
reads a list matching a regular expression
"""
name = "ListReader"
def __init__(self, regpattern=None, name=None):
"""
construct a new list reader to parse key-value pairs from a given context
List readers parse key-value pairs of the form
(regpattern-match 1) value
(regpattern-match 2) value
(regpattern-match 3) value
The matching regpattern is used as data key
Parameters:
-----------
regpattern : a pattern (regular expression supported) that suitable lines must match
name : a name for this reader
"""
if regpattern is None:
raise ValueError("Error: No 'regpattern' specified for reader %s" % str(name))
self.regular_exp = re.compile(regpattern)
self.regpattern = regpattern
if name is None:
name = ListReader.name
self.name = name
def getEditableAttributes(self):
return ["name", "regpattern"]
def set_context(self, contextname):
self.context = self.contextname2contexts.get(contextname, self.context)
def getRequiredOptionsByAttribute(self, attr):
if attr == "context":
return list(self.contextname2contexts.keys())
return None
def getLineData(self, line):
match = self.regular_exp.match(line)
if match is not None:
datakey = match.group(1)
strval = match.group(2)
try:
val = int(strval)
except ValueError:
val = float(strval)
return (datakey, val)
return None
def extractStatistic(self, line):
data = self.getLineData(line)
if data is not None:
self.addData(data[0], data[1])
class SolCheckerReader(StatisticReader):
"""reads the solution checker output, if available, that can be used for validation
The output looks like this
Read MPS: 1
MIP has 49606 vars and 26069 constraints
Read SOL: 1
Objective value computed by solver: 82895.99999986154
Integrality tolerance: 1/10000
Linear tolerance: 1/10000
Objective tolerance: 1/10000
Check SOL: Integrality 1 Constraints 1 Objective 1
Maximum violations: Integrality 3.902288396999999e-06 Constraints 2.81133904261283e-05 Objective 1.47339791e-12
"""
name = "SolCheckerReader"
def extractStatistic(self, line : str):
if line.startswith("Read SOL: 1"):
self.addData(Key.SolCheckerRead, True)
elif line.startswith("Read SOL: 0"):
self.addData(Key.SolCheckerRead, False)
if line.startswith("Check SOL: Integrality 1 Constraints 1 Objective 1"):
self.addData(Key.SolCheckerFeas, True)
elif line.startswith("Check SOL:"):
self.addData(Key.SolCheckerFeas, False)
| StarcoderdataPython |
4825745 | <gh_stars>0
import numpy as np
class Graph_data_container:
def __init__(self, x:np.ndarray, y:np.ndarray, label:str) -> None:
self.x = x
self.y = y
self.label = label
@property
def xs_lim(self):
x_l = np.floor(np.log10 (max(1, min(self.x)) ))
x_u = np.ceil(np.log10 (max(1, max(self.x)) ))
return [10**x_l, 10**x_u]
@property
def ys_lim(self):
x_l = np.floor(np.log10 ( min(self.y) ))-1
x_u = np.ceil(np.log10 ( max(self.y) ))+1
return [10**x_l, 10**x_u]
@property
def extrema(self):
"""returns the extreme values for x and y
[x_min, x_max, y_min, y_max]
Returns:
_type_: _description_
"""
return [self.x.min(), self.x.max(), self.y.min(), self.y.max()]
| StarcoderdataPython |
1756580 | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
from textwrap import dedent
from typing import Optional, Sequence
import libcst as cst
import libcst.matchers as m
import libcst.metadata as meta
from libcst.matchers import extractall, findall
from libcst.testing.utils import UnitTest
class MatchersFindAllTest(UnitTest):
def assertNodeSequenceEqual(
self,
seq1: Sequence[cst.CSTNode],
seq2: Sequence[cst.CSTNode],
msg: Optional[str] = None,
) -> None:
suffix = "" if msg is None else f"\n{msg}"
if len(seq1) != len(seq2):
raise AssertionError(
f"\n{seq1!r}\nis not deeply equal to \n{seq2!r}{suffix}"
)
for node1, node2 in zip(seq1, seq2):
if not node1.deep_equals(node2):
raise AssertionError(
f"\n{seq1!r}\nis not deeply equal to \n{seq2!r}{suffix}"
)
def test_findall_with_sentinels(self) -> None:
# Verify behavior when provided a sentinel
nothing = findall(cst.RemovalSentinel.REMOVE, m.Name("True") | m.Name("False"))
self.assertNodeSequenceEqual(nothing, [])
nothing = findall(cst.MaybeSentinel.DEFAULT, m.Name("True") | m.Name("False"))
self.assertNodeSequenceEqual(nothing, [])
def test_simple_findall(self) -> None:
# Find all booleans in a tree
code = """
a = 1
b = True
def foo(bar: int) -> bool:
return False
"""
module = cst.parse_module(dedent(code))
booleans = findall(module, m.Name("True") | m.Name("False"))
self.assertNodeSequenceEqual(booleans, [cst.Name("True"), cst.Name("False")])
def test_findall_with_metadata_wrapper(self) -> None:
# Find all assignments in a tree
code = """
a = 1
b = True
def foo(bar: int) -> bool:
return False
"""
module = cst.parse_module(dedent(code))
wrapper = meta.MetadataWrapper(module)
# Test that when we find over a wrapper, we implicitly use it for
# metadata as well as traversal.
booleans = findall(
wrapper,
m.MatchMetadata(
meta.ExpressionContextProvider, meta.ExpressionContext.STORE
),
)
self.assertNodeSequenceEqual(
booleans,
[
cst.Name("a"),
cst.Name("b"),
cst.Name("foo"),
cst.Name("bar"),
],
)
# Test that we can provide an explicit resolver and tree
booleans = findall(
wrapper.module,
m.MatchMetadata(
meta.ExpressionContextProvider, meta.ExpressionContext.STORE
),
metadata_resolver=wrapper,
)
self.assertNodeSequenceEqual(
booleans,
[
cst.Name("a"),
cst.Name("b"),
cst.Name("foo"),
cst.Name("bar"),
],
)
# Test that failing to provide metadata leads to no match
booleans = findall(
wrapper.module,
m.MatchMetadata(
meta.ExpressionContextProvider, meta.ExpressionContext.STORE
),
)
self.assertNodeSequenceEqual(booleans, [])
def test_findall_with_visitors(self) -> None:
# Find all assignments in a tree
class TestVisitor(m.MatcherDecoratableVisitor):
METADATA_DEPENDENCIES: Sequence[meta.ProviderT] = (
meta.ExpressionContextProvider,
)
def __init__(self) -> None:
super().__init__()
self.results: Sequence[cst.CSTNode] = ()
def visit_Module(self, node: cst.Module) -> None:
self.results = self.findall(
node,
m.MatchMetadata(
meta.ExpressionContextProvider, meta.ExpressionContext.STORE
),
)
code = """
a = 1
b = True
def foo(bar: int) -> bool:
return False
"""
module = cst.parse_module(dedent(code))
wrapper = meta.MetadataWrapper(module)
visitor = TestVisitor()
wrapper.visit(visitor)
self.assertNodeSequenceEqual(
visitor.results,
[
cst.Name("a"),
cst.Name("b"),
cst.Name("foo"),
cst.Name("bar"),
],
)
def test_findall_with_transformers(self) -> None:
# Find all assignments in a tree
class TestTransformer(m.MatcherDecoratableTransformer):
METADATA_DEPENDENCIES: Sequence[meta.ProviderT] = (
meta.ExpressionContextProvider,
)
def __init__(self) -> None:
super().__init__()
self.results: Sequence[cst.CSTNode] = ()
def visit_Module(self, node: cst.Module) -> None:
self.results = self.findall(
node,
m.MatchMetadata(
meta.ExpressionContextProvider, meta.ExpressionContext.STORE
),
)
code = """
a = 1
b = True
def foo(bar: int) -> bool:
return False
"""
module = cst.parse_module(dedent(code))
wrapper = meta.MetadataWrapper(module)
visitor = TestTransformer()
wrapper.visit(visitor)
self.assertNodeSequenceEqual(
visitor.results,
[
cst.Name("a"),
cst.Name("b"),
cst.Name("foo"),
cst.Name("bar"),
],
)
class MatchersExtractAllTest(UnitTest):
def test_extractall_simple(self) -> None:
expression = cst.parse_expression("a + b[c], d(e, f * g, h.i.j)")
matches = extractall(expression, m.Arg(m.SaveMatchedNode(~m.Name(), "expr")))
extracted_args = cst.ensure_type(
cst.ensure_type(expression, cst.Tuple).elements[1].value, cst.Call
).args
self.assertEqual(
matches,
[{"expr": extracted_args[1].value}, {"expr": extracted_args[2].value}],
)
| StarcoderdataPython |
1783002 | <filename>src/main.py
def main():
import sys, time, random
try:
import pygame
except ImportError as ex:
print("Please install pygame - it is a required module!")
return
from player import Player
from banana import Banana
from label import Label
from projectile import Projectile
print("Excellent - pygame is installed and imported!")
pygame.init()
paused = False
screen_scale = 1
screen_size = screen_width, screen_height = screen_scale * 960, screen_scale * 540
screen = pygame.display.set_mode(screen_size)
pygame.display.set_caption('Banana dodge v2')
banana_spawn_rate, frames_until_banana_spawn = 100, 1
bananas_dodged, bananas_shot, lives = 0, 0, 3
player_bob_rate = 3
shooting_cool_down, frames_until_can_shoot = 75, 30
projectile_speed = [0, -4 * int(screen_height / 480)]
background_colour = (250, 250, 250)
background = pygame.Surface(screen.get_size())
background = background.convert()
background.fill(background_colour)
player_1 = Player(pygame.image.load("../img/nyan-balloon.png"), [int(screen_width / 480), 0], screen_size)
labels = [
Label("0 frames until banana"),
Label("0 frames until shooting is available"),
Label("0 bananas dodged"),
Label("0 bananas shot"),
Label("3 lives")
] # note: NOT a dictionary, to maintain ordering
pause_label = pygame.font.Font(None, 50).render("Paused", 1, (10, 10, 10))
pause_label_rect = pause_label.get_rect()
pause_label_rect.centerx = background.get_rect().centerx
pause_label_rect.centery = background.get_rect().centery
bananas = []
projectiles = []
def spawn_banana():
speed = [random.choice([0, 0, 0, 0, 0, -1, -1, 1, 1, -2, 2]), 2]
if random.random() < 0.05:
img = pygame.image.load("../img/life-banana.bmp")
bananas.append(Banana(True, img, int(random.random() * screen_width), 0, speed))
else:
img = pygame.image.load("../img/banana.bmp")
bananas.append(Banana(False, img, int(random.random() * screen_width), 0, speed))
def shoot_projectile_from_player(player):
img = pygame.image.load("../img/projectile.png")
projectiles.append(Projectile(img, projectile_speed, player.get_rect()))
def background_fade():
background_recover_rate = 2
r, g, b = background_colour
if background_colour != (250, 250, 250):
if r < 250:
r += background_recover_rate
elif r > 250:
r -= background_recover_rate
if g < 250:
g += background_recover_rate
elif g > 250:
g -= background_recover_rate
if b < 250:
b += background_recover_rate
elif b > 250:
b -= background_recover_rate
return r, g, b
def update_labels():
# update labels ready for drawing
labels[0].set_text("{} frames until banana (every {} frames)".format(frames_until_banana_spawn, banana_spawn_rate))
labels[1].set_text("{} frames until shooting is available".format(frames_until_can_shoot))
labels[2].set_text("{} bananas dodged".format(bananas_dodged))
labels[3].set_text("{} bananas shot".format(bananas_shot))
labels[4].set_text("{} lives".format(lives))
def process_events(p_paused):
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
p_paused = not p_paused
if p_paused:
screen.blit(pause_label, pause_label_rect)
pygame.display.flip()
return p_paused
def show_death_messages(p_background, p_bananas_dodged, p_bananas_shot, p_screen):
print("Game over!\n{} bananas dodged successfully".format(p_bananas_dodged))
game_over_label = pygame.font.Font(None, 40).render("Game over!", 1, (10, 10, 10))
bananas_dodged_label = pygame.font.Font(None, 36).render(
"You dodged {} bananas, and shot {}!".format(p_bananas_dodged, p_bananas_shot), 1, (10, 10, 10))
escape_exit_label = pygame.font.Font(None, 25).render("Press Esc to exit", 1, (10, 10, 10))
gol_rect = game_over_label.get_rect()
bdl_rect = bananas_dodged_label.get_rect()
eel_rect = escape_exit_label.get_rect()
gol_rect.centerx = p_background.get_rect().centerx
bdl_rect.centerx = p_background.get_rect().centerx
eel_rect.centerx = p_background.get_rect().centerx
gol_rect.bottom = p_background.get_rect().centery - gol_rect.height - 10
bdl_rect.centery = p_background.get_rect().centery
eel_rect.top = p_background.get_rect().centery + gol_rect.height + 10
p_screen.blit(game_over_label, gol_rect)
p_screen.blit(bananas_dodged_label, bdl_rect)
p_screen.blit(escape_exit_label, eel_rect)
pygame.display.flip()
while lives > 0:
paused = process_events(paused)
if not paused:
frames_until_banana_spawn -= 1
if frames_until_can_shoot > 0:
frames_until_can_shoot -= 1
if frames_until_banana_spawn <= 0:
spawn_banana()
frames_until_banana_spawn = banana_spawn_rate
# player movement
keys = pygame.key.get_pressed() # checking pressed keys
if keys[pygame.K_a] or keys[pygame.K_LEFT]:
if player_1.get_rect().left > 0:
player_1.go_left()
if keys[pygame.K_d] or keys[pygame.K_RIGHT]:
if player_1.get_rect().right < screen_width:
player_1.go_right()
if keys[pygame.K_SPACE]:
if frames_until_can_shoot <= 0:
frames_until_can_shoot = shooting_cool_down
shoot_projectile_from_player(player_1)
for p in projectiles:
p.move_at_speed()
rem_b = []
for b in bananas:
b.move_at_speed()
if b.get_rect().left < 0: # separated to ensure correct direction is stuck to
b.ensure_travel_right()
elif b.get_rect().right > screen_width:
b.ensure_travel_left()
if b.get_rect().colliderect(player_1.get_rect()):
rem_b.append(b)
if b.get_gives_life():
if lives < 10:
lives += 1
background_colour = (0, 254, 254)
else:
lives -= 1
background_colour = (250, 0, 0)
else:
ind = b.get_rect().collidelist([x.get_rect() for x in projectiles])
if ind != -1:
if b.get_gives_life():
if lives < 10:
lives += 1
background_colour = (0, 254, 254)
else:
bananas_shot += 1 # life-bananas don't count towards this
rem_b.append(b)
del projectiles[ind]
elif b.get_rect().top > screen_height: # if not collided with player, check for locational despawn
rem_b.append(b)
bananas_dodged += 1
if banana_spawn_rate > 15:
banana_spawn_rate -= 2
bananas = [b for b in bananas if b not in rem_b]
update_labels()
# draw on background
background_colour = background_fade()
background.fill(background_colour)
top = 8
for l in labels:
l.get_rect().top = top
l.get_rect().left = 4
background.blit(l.get_rendered_text(), l.get_rect())
top += l.get_rect().height + 8
# draw background on screen then draw on screen in front
screen.blit(background, (0, 0))
for b in bananas:
# animate spinning
b.rotate_tick()
screen.blit(b.get_img(), b.get_rect())
for p in projectiles:
screen.blit(p.get_img(), p.get_rect())
# animate bobbing
player_1.balloon_bob(player_bob_rate)
screen.blit(player_1.get_img(), player_1.get_rect())
pygame.display.flip()
time.sleep(0.01)
else:
time.sleep(0.2) # paused
show_death_messages(background, bananas_dodged, bananas_shot, screen)
escaped = False
while not escaped:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
escaped = (event.key == pygame.K_ESCAPE)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt as ex:
print("Please exit using the X button, or Esc on the death screen next time!")
print("See you again soon!")
| StarcoderdataPython |
3291335 | import numpy as np
pipe = np.zeros((2000, 2000), dtype=np.uint8).astype('?')
with open("12.txt") as f:
for line in f:
a = line.strip().split(' <-> ')
i = int(a[0])
b = list(map(int, a[1].split(', ')))
for j in b:
pipe[i, j] = pipe[j, i] = True
def connected(i):
return np.arange(2000)[pipe[i, :]]
def connect(i):
pre = 0
post = pipe[i, :].sum()
while pre != post:
pre = post
pipe[i, i] = True
for j in connected(i):
pipe[i, :] = pipe[i, :] | pipe[j, :]
post = pipe[i, :].sum()
return post
# Part 1
print(connect(0))
# Part 2
for i in range(1, 2000):
connect(i)
print(np.unique(pipe, axis=0).shape[0])
| StarcoderdataPython |
3248660 | <reponame>kberkey/ccal<filename>ccal/update_variant_dict.py
from .get_allelic_frequencies import get_allelic_frequencies
from .get_genotype import get_genotype
from .get_maf_variant_classification import get_maf_variant_classification
from .get_population_allelic_frequencies import get_population_allelic_frequencies
from .get_variant_start_and_end_positions import get_variant_start_and_end_positions
from .get_variant_type import get_variant_type
def update_variant_dict(variant_dict):
ref = variant_dict["REF"]
alt = variant_dict["ALT"]
variant_dict["variant_type"] = get_variant_type(ref, alt)
start_position, end_position = get_variant_start_and_end_positions(
int(variant_dict["POS"]), ref, alt
)
variant_dict["start_position"] = start_position
variant_dict["end_position"] = end_position
caf = variant_dict.get("CAF")
if caf:
variant_dict[
"population_allelic_frequencies"
] = get_population_allelic_frequencies(caf)
for ann_dict in variant_dict["ANN"].values():
ann_dict["variant_classification"] = get_maf_variant_classification(
ann_dict["effect"], ref, alt
)
for sample_dict in variant_dict["sample"].values():
if "GT" in sample_dict:
sample_dict["genotype"] = get_genotype(ref, alt, sample_dict["GT"])
if "AD" in sample_dict and "DP" in sample_dict:
sample_dict["allelic_frequency"] = get_allelic_frequencies(
sample_dict["AD"], sample_dict["DP"]
)
| StarcoderdataPython |
1790405 | from conans import ConanFile
class RapidJSONConan(ConanFile):
name = "RapidJSON"
version = "1.0.2"
license = "MIT, https://github.com/miloyip/rapidjson/blob/master/license.txt"
url = "https://github.com/miloyip/rapidjson/"
def source(self):
self.output.info("")
self.output.info("---------- source ----------")
self.output.info("")
self.run("git clone https://github.com/miloyip/rapidjson")
self.run("cd rapidjson && git checkout v%s" % self.version)
def package(self):
self.output.info("")
self.output.info("---------- package ----------")
self.output.info("")
self.copy("*", dst="include", src="rapidjson/include")
def build(self):
self.output.info("")
self.output.info("---------- build ----------")
self.output.info("") | StarcoderdataPython |
3293669 | <gh_stars>1-10
"""
0088. Merge Sorted Array
Array
Given two sorted integer arrays nums1 and nums2, merge nums2 into nums1 as one sorted array.
Note:
The number of elements initialized in nums1 and nums2 are m and n respectively.
You may assume that nums1 has enough space (size that is greater or equal to m + n) to hold additional elements from nums2.
Example:
Input:
nums1 = [1,2,3,0,0,0], m = 3
nums2 = [2,5,6], n = 3
Output: [1,2,2,3,5,6]
"""
# Solution 1:
class Solution:
def merge(self, nums1: List[int], m: int, nums2: List[int], n: int):
while n > 0:
if m <= 0 or nums2[n-1] >= nums1[m-1]:
nums1[m+n-1] = nums2[n-1]
n -= 1
else:
nums1[m+n-1] = nums1[m-1]
m -= 1
return nums1
# Solution 2:
class Solution:
def merge(self, nums1: List[int], m: int, nums2: List[int], n: int):
l1, l2, end = m-1, n-1, m+n-1
while l1 >= 0 and l2 >= 0:
if nums2[l2] > nums1[l1]:
nums1[end] = nums2[l2]
l2 -= 1
else:
nums1[end] = nums1[l1]
l1 -= 1
end -= 1
if l1 < 0: # if nums2 left
nums1[:l2+1] = nums2[:l2+1]
class Solution:
def merge(self, nums1: List[int], m: int, nums2: List[int], n: int):
m, n = m-1, n-1
while m >= 0 and n >= 0:
if nums1[m] > nums2[n]:
nums1[m+n+1] = nums1[m]
m -= 1
else:
nums1[m+n+1] = nums2[n]
n -= 1
if n != -1: # nums2 is still left
nums1[:n+1] = nums2[:n+1] | StarcoderdataPython |
98110 | from .merge_result_infos import merge_result_infos
from .field_to_fc import field_to_fc
from .html_doc import html_doc
from .unitary_field import unitary_field
from .extract_field import extract_field
from .bind_support import bind_support
from .scalars_to_field import scalars_to_field
from .change_location import change_location
from .strain_from_voigt import strain_from_voigt
from .set_property import set_property
from .forward_field import forward_field
from .forward_fields_container import forward_fields_container
from .forward_meshes_container import forward_meshes_container
from .forward import forward
from .txt_file_to_dpf import txt_file_to_dpf
from .bind_support_fc import bind_support_fc
from .default_value import default_value
from .extract_time_freq import extract_time_freq
from .python_generator import python_generator
from .make_overall import make_overall
from .merge_fields_containers import merge_fields_containers
from .merge_scopings import merge_scopings
from .merge_materials import merge_materials
from .merge_property_fields import merge_property_fields
from .remote_workflow_instantiate import remote_workflow_instantiate
from .remote_operator_instantiate import remote_operator_instantiate
from .merge_fields_by_label import merge_fields_by_label
from .merge_scopings_containers import merge_scopings_containers
from .merge_meshes import merge_meshes
from .merge_time_freq_supports import merge_time_freq_supports
from .merge_fields import merge_fields
from .merge_supports import merge_supports
from .merge_meshes_containers import merge_meshes_containers
from .change_shell_layers import change_shell_layers
| StarcoderdataPython |
44625 | <gh_stars>0
import os
from gtts import gTTS
from pathlib import Path
def generate_audio_file_from_text(
text, file_name, file_type="mp3", language="en", slow=False
):
audio = gTTS(text=text, lang=language, slow=slow)
file_path = os.path.join(
Path().absolute(),
"media",
"common_responses",
f"{file_name}.{file_type}",
)
audio.save(file_path)
return file_path
if __name__ == "__main__":
# Replace with list of tuples.
# E.g. [("Please could you repeat that?", "pardon")]
texts_and_file_names = []
for text, file_name in texts_and_file_names:
generate_audio_file_from_text(text, file_name)
| StarcoderdataPython |
3335352 | # *_*coding:utf-8 *_*
import os
import sys
from os import makedirs
from os.path import exists, join
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'models'))
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
from ply_helper import read_ply, write_ply
from sklearn.metrics import confusion_matrix
from metrics import IoU_from_confusions
import json
import argparse
import numpy as np
import tensorflow as tf
import socket
import importlib
import time
from pathlib import Path
from scannet_dataset_grid import ScannetDataset
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--data', type=str, default='../data/Scannet', help='Root for dataset')
parser.add_argument('--batch_size', type=int, default=4, help='Batch Size during training [default: 4]')
parser.add_argument('--model_path', required=True, help='model checkpoint file path')
parser.add_argument('--num_votes', type=int, default=100, help='Aggregate scores from multiple test [default: 100]')
parser.add_argument('--split', type=str, default='validation', help='[validation/test]')
parser.add_argument('--saving', action='store_true', help='Whether save test results')
parser.add_argument('--debug', action='store_true', help='Whether save test results')
FLAGS = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = str(FLAGS.gpu)
config = parser.parse_args()
with open(Path(FLAGS.model_path).parent / 'args.txt', 'r') as f:
config.__dict__ = json.load(f)
config.validation_size = 500
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = config.num_point
MODEL_PATH = FLAGS.model_path
GPU_INDEX = FLAGS.gpu
WITH_RGB = config.with_rgb
MODEL = importlib.import_module(config.model) # import network module
NUM_CLASSES = 21
HOSTNAME = socket.gethostname()
feature_channel = 3 if WITH_RGB else 0
class TimeLiner:
def __init__(self):
self._timeline_dict = None
def update_timeline(self, chrome_trace):
# convert crome trace to python dict
chrome_trace_dict = json.loads(chrome_trace)
# for first run store full trace
if self._timeline_dict is None:
self._timeline_dict = chrome_trace_dict
# for other - update only time consumption, not definitions
else:
for event in chrome_trace_dict['traceEvents']:
# events time consumption started with 'ts' prefix
if 'ts' in event:
self._timeline_dict['traceEvents'].append(event)
def save(self, f_name):
with open(f_name, 'w') as f:
json.dump(self._timeline_dict, f)
class ModelTester:
def __init__(self, pred, num_classes, saver, restore_snap=None):
self.saver = saver
cProto = tf.ConfigProto()
cProto.gpu_options.allow_growth = True
cProto.allow_soft_placement = True
cProto.log_device_placement = False
self.sess = tf.Session(config=cProto)
if (restore_snap is not None):
self.saver.restore(self.sess, restore_snap)
print("Model restored from " + restore_snap)
else:
self.sess.run(tf.global_variables_initializer())
# Add a softmax operation for predictions
self.prob_logits = tf.nn.softmax(pred[:, :, 1:])
self.num_classes = num_classes
def test_cloud_segmentation(self, input, dataset, test_init_op, num_votes=100, saving=FLAGS.saving):
# Smoothing parameter for votes
test_smooth = 0.98
# Initialise iterator with train data
self.sess.run(test_init_op)
# Initiate global prediction over test clouds
nc_model = self.num_classes - 1
self.test_probs = [np.zeros((l.data.shape[0], nc_model), dtype=np.float32) for l in dataset.input_trees['test']]
# Test saving path
if saving:
saving_path = time.strftime('Log_%Y-%m-%d_%H-%M-%S', time.gmtime())
test_path = join('test', saving_path.split('/')[-1])
if not exists(test_path):
makedirs(test_path)
if not exists(join(test_path, 'predictions')):
makedirs(join(test_path, 'predictions'))
if not exists(join(test_path, 'probs')):
makedirs(join(test_path, 'probs'))
else:
test_path = None
i0 = 0
epoch_ind = 0
last_min = -0.5
mean_dt = np.zeros(2)
last_display = time.time()
while last_min < num_votes:
try:
# Run one step of the model.
t = [time.time()]
ops = (self.prob_logits,
input['labels'],
input['point_inds'],
input['cloud_inds'])
stacked_probs, labels, point_inds, cloud_inds = \
self.sess.run(ops, {input['is_training_pl']: False})
t += [time.time()]
# Stack all predictions for each class separately
for b in range(stacked_probs.shape[0]):
# Get prediction (only for the concerned parts)
probs = stacked_probs[b]
inds = point_inds[b]
c_i = cloud_inds[b]
# Update current probs in whole cloud
self.test_probs[c_i][inds] = test_smooth * self.test_probs[c_i][inds] + (1 - test_smooth) * probs
# Average timing
t += [time.time()]
mean_dt = 0.95 * mean_dt + 0.05 * (np.array(t[1:]) - np.array(t[:-1]))
# Display
if (t[-1] - last_display) > 1.0:
last_display = t[-1]
message = 'Epoch {:3d}, step {:3d} (timings : {:4.2f} {:4.2f}). min potential = {:.1f}'
print(message.format(epoch_ind, i0, 1000 * (mean_dt[0]), 1000 * (mean_dt[1]),
np.min(dataset.min_potentials['test'])))
i0 += 1
except tf.errors.OutOfRangeError:
# Save predicted cloud
new_min = np.min(dataset.min_potentials['test'])
print('Epoch {:3d}, end. Min potential = {:.1f}'.format(epoch_ind, new_min))
print([np.mean(pots) for pots in dataset.potentials['test']])
if last_min + 2 < new_min:
print('Saving clouds')
# Update last_min
last_min = new_min
# Project predictions
print('\nReproject Vote #{:d}'.format(int(np.floor(new_min))))
t1 = time.time()
files = dataset.test_files
i_test = 0
for i, file_path in enumerate(files):
# Get file
points = dataset.load_evaluation_points(file_path)
# Reproject probs
probs = self.test_probs[i_test][dataset.test_proj[i_test], :]
# Insert false columns for ignored labels
probs2 = probs.copy()
for l_ind, label_value in enumerate(dataset.label_values):
if label_value in dataset.ignored_labels:
probs2 = np.insert(probs2, l_ind, 0, axis=1)
# Get the predicted labels
preds = dataset.label_values[np.argmax(probs2, axis=1)].astype(np.int32)
# Project potentials on original points
pots = dataset.potentials['test'][i_test][dataset.test_proj[i_test]]
# Save plys
cloud_name = file_path.split('/')[-1]
test_name = join(test_path, 'predictions', cloud_name)
write_ply(test_name,
[points, preds, pots],
['x', 'y', 'z', 'preds', 'pots'])
test_name2 = join(test_path, 'probs', cloud_name)
prob_names = ['_'.join(dataset.label_to_names[label].split()) for label in dataset.label_values
if label not in dataset.ignored_labels]
write_ply(test_name2,
[points, probs],
['x', 'y', 'z'] + prob_names)
# Save ascii preds
ascii_name = join(test_path, 'predictions', cloud_name[:-4] + '.txt')
np.savetxt(ascii_name, preds, fmt='%d')
i_test += 1
t2 = time.time()
print('Done in {:.1f} s\n'.format(t2 - t1))
self.sess.run(test_init_op)
epoch_ind += 1
i0 = 0
continue
return
def test_cloud_segmentation_on_val(self, input, dataset, val_init_op, num_votes=100, saving=True):
# Smoothing parameter for votes
test_smooth = 0.95
# Initialise iterator with train data
self.sess.run(val_init_op)
# Initiate global prediction over test clouds
nc_model = self.num_classes - 1
self.test_probs = [np.zeros((l.shape[0], nc_model), dtype=np.float32)
for l in dataset.input_labels['validation']]
# Number of points per class in validation set
val_proportions = np.zeros(nc_model, dtype=np.float32)
i = 0
for label_value in dataset.label_values:
if label_value not in dataset.ignored_labels:
val_proportions[i] = np.sum([np.sum(labels == label_value)
for labels in dataset.validation_labels])
i += 1
# Test saving path
if saving:
saving_path = time.strftime('Log_%Y-%m-%d_%H-%M-%S', time.gmtime())
test_path = join('test', saving_path)
if not exists(test_path):
makedirs(test_path)
if not exists(join(test_path, 'val_predictions')):
makedirs(join(test_path, 'val_predictions'))
if not exists(join(test_path, 'val_probs')):
makedirs(join(test_path, 'val_probs'))
else:
test_path = None
i0 = 0
epoch_ind = 0
last_min = -0.5
mean_dt = np.zeros(2)
last_display = time.time()
while last_min < num_votes:
try:
# Run one step of the model.
t = [time.time()]
ops = (self.prob_logits,
input['labels'],
input['point_inds'],
input['cloud_inds'])
stacked_probs, labels, point_inds, cloud_inds = self.sess.run(ops, {input['is_training_pl']: False})
t += [time.time()]
# Stack all validation predictions for each class separately
for b in range(stacked_probs.shape[0]):
# Get prediction (only for the concerned parts)
probs = stacked_probs[b]
inds = point_inds[b]
c_i = cloud_inds[b]
# Update current probs in whole cloud
self.test_probs[c_i][inds] = test_smooth * self.test_probs[c_i][inds] + (1 - test_smooth) * probs
# Average timing
t += [time.time()]
mean_dt = 0.95 * mean_dt + 0.05 * (np.array(t[1:]) - np.array(t[:-1]))
# Display
if (t[-1] - last_display) > 10.0:
last_display = t[-1]
message = 'Epoch {:3d}, step {:3d} (timings : {:4.2f} {:4.2f}). min potential = {:.1f}'
print(message.format(epoch_ind, i0, 1000 * (mean_dt[0]), 1000 * (mean_dt[1]),
np.min(dataset.min_potentials['validation'])))
i0 += 1
except tf.errors.OutOfRangeError:
# Save predicted cloud
new_min = np.min(dataset.min_potentials['validation'])
print('Epoch {:3d}, end. Min potential = {:.1f}'.format(epoch_ind, new_min))
if last_min + 1 < new_min:
# Update last_min
last_min += 1
# Show vote results (On subcloud so it is not the good values here)
print('\nConfusion on sub clouds')
Confs = []
for i_test in range(dataset.num_validation):
# Insert false columns for ignored labels
probs = self.test_probs[i_test]
for l_ind, label_value in enumerate(dataset.label_values):
if label_value in dataset.ignored_labels:
probs = np.insert(probs, l_ind, 0, axis=1)
# Predicted labels
preds = dataset.label_values[np.argmax(probs, axis=1)].astype(np.int32)
# Targets
targets = dataset.input_labels['validation'][i_test]
# Confs
Confs += [confusion_matrix(targets, preds, dataset.label_values)]
# Regroup confusions
C = np.sum(np.stack(Confs), axis=0).astype(np.float32)
# Remove ignored labels from confusions
for l_ind, label_value in reversed(list(enumerate(dataset.label_values))):
if label_value in dataset.ignored_labels:
C = np.delete(C, l_ind, axis=0)
C = np.delete(C, l_ind, axis=1)
# Rescale with the right number of point per class
C *= np.expand_dims(val_proportions / (np.sum(C, axis=1) + 1e-6), 1)
# Compute IoUs
IoUs = IoU_from_confusions(C)
mIoU = np.mean(IoUs)
s = '{:5.2f} | '.format(100 * mIoU)
for IoU in IoUs:
s += '{:5.2f} '.format(100 * IoU)
print(s + '\n')
if int(np.ceil(new_min)) % 4 == 0:
# Project predictions
print('\nReproject Vote #{:d}'.format(int(np.floor(new_min))))
t1 = time.time()
files = dataset.train_files
i_val = 0
proj_probs = []
for i, file_path in enumerate(files):
if dataset.all_splits[i] == dataset.validation_split:
# Reproject probs on the evaluations points
probs = self.test_probs[i_val][dataset.validation_proj[i_val], :]
proj_probs += [probs]
i_val += 1
t2 = time.time()
print('Done in {:.1f} s\n'.format(t2 - t1))
# Show vote results
print('Confusion on full clouds')
t1 = time.time()
Confs = []
for i_test in range(dataset.num_validation):
# Insert false columns for ignored labels
for l_ind, label_value in enumerate(dataset.label_values):
if label_value in dataset.ignored_labels:
proj_probs[i_test] = np.insert(proj_probs[i_test], l_ind, 0, axis=1)
# Get the predicted labels
preds = dataset.label_values[np.argmax(proj_probs[i_test], axis=1)].astype(np.int32)
# Confusion
targets = dataset.validation_labels[i_test]
Confs += [confusion_matrix(targets, preds, dataset.label_values)]
t2 = time.time()
print('Done in {:.1f} s\n'.format(t2 - t1))
# Regroup confusions
C = np.sum(np.stack(Confs), axis=0)
# Remove ignored labels from confusions
for l_ind, label_value in reversed(list(enumerate(dataset.label_values))):
if label_value in dataset.ignored_labels:
C = np.delete(C, l_ind, axis=0)
C = np.delete(C, l_ind, axis=1)
IoUs = IoU_from_confusions(C)
mIoU = np.mean(IoUs)
s = '{:5.2f} | '.format(100 * mIoU)
for IoU in IoUs:
s += '{:5.2f} '.format(100 * IoU)
print('-' * len(s))
print(s)
print('-' * len(s) + '\n')
# Save predictions
print('Saving clouds')
t1 = time.time()
files = dataset.train_files
i_test = 0
for i, file_path in enumerate(files):
if dataset.all_splits[i] == dataset.validation_split:
# Get points
points = dataset.load_evaluation_points(file_path)
# Get the predicted labels
preds = dataset.label_values[np.argmax(proj_probs[i_test], axis=1)].astype(np.int32)
# Project potentials on original points
pots = dataset.potentials['validation'][i_test][dataset.validation_proj[i_test]]
# Save plys
cloud_name = file_path.split('/')[-1]
test_name = join(test_path, 'val_predictions', cloud_name)
write_ply(test_name,
[points, preds, pots, dataset.validation_labels[i_test]],
['x', 'y', 'z', 'preds', 'pots', 'gt'])
test_name2 = join(test_path, 'val_probs', cloud_name)
prob_names = ['_'.join(dataset.label_to_names[label].split())
for label in dataset.label_values]
write_ply(test_name2,
[points, proj_probs[i_test]],
['x', 'y', 'z'] + prob_names)
i_test += 1
t2 = time.time()
print('Done in {:.1f} s\n'.format(t2 - t1))
self.sess.run(val_init_op)
epoch_ind += 1
i0 = 0
continue
return
def val():
with tf.Graph().as_default():
with tf.device('/gpu:0'):
dataset = ScannetDataset(FLAGS.data, NUM_POINT, config.input_threads, load_test=FLAGS.split=='test', buffer=config.num_buffer, debug=FLAGS.debug)
dl0 = config.first_subsampling_dl
dataset.load_subsampled_clouds(dl0)
map_func = dataset.get_tf_mapping(config)
gen_function_val, gen_types, gen_shapes = dataset.get_batch_gen(FLAGS.split, config)
val_data = tf.data.Dataset.from_generator(gen_function_val, gen_types, gen_shapes)
# Transform inputs
val_data = val_data.map(map_func=map_func, num_parallel_calls=dataset.num_threads)
val_data = val_data.batch(FLAGS.batch_size, drop_remainder=True)
val_data = val_data.prefetch(10)
# create a iterator of the correct shape and type
iter = tf.data.Iterator.from_structure(val_data.output_types, val_data.output_shapes)
flat_inputs = iter.get_next()
# create the initialisation operations
val_init_op = iter.make_initializer(val_data)
is_training_pl = tf.placeholder(tf.bool, shape=())
if not WITH_RGB:
points = flat_inputs[0]
else:
points = tf.concat([flat_inputs[0], flat_inputs[1][:, :, :3]], axis=-1)
point_labels = flat_inputs[2]
pred, end_points = MODEL.get_model(points, is_training_pl, NUM_CLASSES,
feature_channel=feature_channel)
saver = tf.train.Saver()
input = {
'is_training_pl': is_training_pl,
'pred': pred,
'labels': point_labels,
'point_inds': flat_inputs[-2],
'cloud_inds': flat_inputs[-1]}
tester = ModelTester(pred, NUM_CLASSES, saver, MODEL_PATH)
if FLAGS.split == "validation":
tester.test_cloud_segmentation_on_val(input, dataset, val_init_op)
else:
tester.test_cloud_segmentation(input, dataset, val_init_op)
if __name__ == "__main__":
val() | StarcoderdataPython |
155389 | import discord
from discord.ext import commands
from random import choice as rndchoice
from .utils import checks
import os
class Succ:
"""Succ command."""
def __init__(self, bot):
self.bot = bot
@commands.group(pass_context=True, invoke_without_command=True)
async def givemethesucc(self, ctx, *, user: discord.Member=None):
"""Gives you succ"""
botid = self.bot.user.id
user = ctx.message.author
await self.bot.say("**Slurp Slurp**")
def setup(bot):
n = Succ(bot)
bot.add_cog(n)
| StarcoderdataPython |
1600018 | <reponame>djt5019/episode_renamer<filename>setup.py<gh_stars>0
#!/usr/bin/env python
from setuptools import find_packages, setup
from eplist import __author__ as author
from eplist import __email__ as email
from eplist import __version__ as version
import sys
info = sys.version_info
if (info.major, info.minor) != (2, 7):
print "Requires Python 2.7"
exit(1)
setup(
name='eplist',
version=version,
description='Simple episode renaming program',
long_description=open('README.rst').read(),
author=author,
author_email=email,
url='https://github.com/djt5019/episode_renamer',
packages=find_packages(),
license="unlicense",
zip_safe=False,
platforms="all",
classifiers=[
"Programming Language :: Python :: 2.7",
"Topic :: Multimedia :: Video",
"Topic :: Utilities",
"Environment :: Console",
"Environment :: X11 Applications :: Qt",
"Operating System :: OS Independent",
],
requires=[
"BeautifulSoup (>=3.2.0)",
"requests (>=0.9.1)",
],
entry_points={
'console_scripts': ['eplist = eplist.main:main']
},
package_data={'': ['eplist.py', 'LICENSE', 'README.rst']},
include_package_data=True,
)
| StarcoderdataPython |
3258703 | <reponame>thumbor/thumbor-aws<gh_stars>1-10
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor aws extensions
# https://github.com/thumbor/thumbor-aws
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2021 <NAME> <EMAIL>
from uuid import uuid4
import pytest
from preggy import expect
from thumbor.config import Config
from tornado.testing import gen_test
from tests import BaseS3TestCase
import thumbor_aws.loader
from thumbor_aws.storage import Storage
@pytest.mark.usefixtures("test_images")
class LoaderTestCase(BaseS3TestCase):
@property
def bucket_name(self):
"""Name of the bucket to put test files in"""
return self.context.config.AWS_LOADER_BUCKET_NAME
@gen_test
async def test_can_load_file_from_s3(self):
"""
Verifies that an image can be loaded from S3
using Loader and that it's there
"""
await self.ensure_bucket()
storage = Storage(self.context)
filepath = f"/test/can_put_file_{uuid4()}"
expected = self.test_images["default"]
await storage.put(filepath, expected)
exists = await storage.exists(filepath)
expect(exists).to_be_true()
result = await thumbor_aws.loader.load(self.context, filepath)
expect(result.successful).to_be_true()
expect(result.buffer).to_equal(expected)
expect(result.metadata["size"]).to_equal(len(expected))
expect(result.metadata["updated_at"]).not_to_be_null()
@gen_test
async def test_result_false_when_file_not_in_s3(self):
"""
Verifies that result is false when image not present in S3
"""
await self.ensure_bucket()
filepath = f"/test/can_put_file_{uuid4()}"
result = await thumbor_aws.loader.load(self.context, filepath)
expect(result.successful).to_be_false()
@pytest.mark.usefixtures("test_images")
class LoaderCompatibilityModeTestCase(LoaderTestCase):
def get_config(self) -> Config:
return self.get_compatibility_config()
@property
def bucket_name(self):
"""Name of the bucket to put test files in"""
return "test-bucket-compat"
@pytest.mark.usefixtures("test_images")
class EmptyBucketConfigLoaderTestCase(BaseS3TestCase):
def get_config(self) -> Config:
cfg = super().get_config()
cfg.AWS_LOADER_BUCKET_NAME = ""
return cfg
@gen_test
async def test_can_load_file_from_s3(self):
"""
Verifies that an image can be loaded from S3
using Loader and that it's there
"""
await self.ensure_bucket()
storage = Storage(self.context)
filepath = f"/test/can_put_file_{uuid4()}"
expected = self.test_images["default"]
await storage.put(filepath, expected)
exists = await storage.exists(filepath)
expect(exists).to_be_true()
filepath_with_bucket = (
f"/{self.context.config.AWS_STORAGE_BUCKET_NAME}{filepath}"
)
result = await thumbor_aws.loader.load(
self.context, filepath_with_bucket
)
expect(result.successful).to_be_true()
expect(result.buffer).to_equal(expected)
expect(result.metadata["size"]).to_equal(len(expected))
expect(result.metadata["updated_at"]).not_to_be_null()
@pytest.mark.usefixtures("test_images")
class LoaderNoPrefixTestCase(LoaderTestCase):
def get_config(self) -> Config:
cfg = super().get_config()
cfg.AWS_LOADER_BUCKET_NAME = "test-bucket-loader-no-prefix"
cfg.AWS_STORAGE_BUCKET_NAME = "test-bucket-loader-no-prefix"
cfg.AWS_LOADER_ROOT_PATH = ""
cfg.AWS_STORAGE_ROOT_PATH = ""
return cfg
| StarcoderdataPython |
3207379 | <reponame>qq4215279/study_python<gh_stars>0
import pickle
a1 = "高淇"
a2 = 234
a3 = [10,20,30,40]
with open("data.dat","wb") as f:
pickle.dump(a1,f)
pickle.dump(a2,f)
pickle.dump(a3,f)
with open("data.dat","rb") as f:
b1 = pickle.load(f);b2 = pickle.load(f);b3 = pickle.load(f)
print(b1);print(b2);print(b3)
print(id(a1));print(id(b1))
| StarcoderdataPython |
1693583 | <reponame>JouleCai/GeoSpaceLab<gh_stars>10-100
class Panel(object):
def __init__(self):
pass
def add_line(self):
pass
def add_image(self):
pass
def add_pcolor(self):
pass
def add_scatter(self):
pass
| StarcoderdataPython |
4810751 | <filename>metrician/monitors/__init__.py
from .OOD import * | StarcoderdataPython |
14633 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Unit tests for PyMVPA Procrustean mapper"""
import unittest
import numpy as np
import itertools
from numpy.linalg import norm
from mvpa2.base import externals
from mvpa2.datasets.base import dataset_wizard
from mvpa2.testing import *
from mvpa2.testing.datasets import *
from mvpa2.mappers.procrustean import ProcrusteanMapper
svds = ["numpy"]
if externals.exists("liblapack.so"):
svds += ["dgesvd"]
if externals.exists("scipy"):
svds += ["scipy"]
class ProcrusteanMapperTests(unittest.TestCase):
@sweepargs(oblique=(False, True))
@sweepargs(svd=svds)
@reseed_rng()
def test_simple(self, svd, oblique):
d_orig = datasets["uni2large"].samples
d_orig2 = datasets["uni4large"].samples
for sdim, nf_s, nf_t, full_test in (
("Same 2D", 2, 2, True),
("Same 10D", 10, 10, True),
("2D -> 3D", 2, 3, True),
("3D -> 2D", 3, 2, False),
):
# figure out some "random" rotation
d = max(nf_s, nf_t)
R = get_random_rotation(nf_s, nf_t, d_orig)
if nf_s == nf_t:
adR = np.abs(1.0 - np.linalg.det(R))
self.assertTrue(
adR < 1e-10,
"Determinant of rotation matrix should " "be 1. Got it 1+%g" % adR,
)
self.assertTrue(norm(np.dot(R, R.T) - np.eye(R.shape[0])) < 1e-10)
for (s, scaling), demean in itertools.product(
((0.3, True), (1.0, False)), (False, True)
):
pm = ProcrusteanMapper(
scaling=scaling, oblique=oblique, svd=svd, demean=demean
)
# pm2 = ProcrusteanMapper(scaling=scaling, oblique=oblique)
if demean:
t1, t2 = d_orig[23, 1], d_orig[22, 1]
else:
t1, t2 = 0, 0
full_test = False # although runs, not intended to perform properly
# Create source/target data
d = d_orig[:, :nf_s]
d_s = d + t1
d_t = np.dot(s * d, R) + t2
# train bloody mapper(s)
ds = dataset_wizard(samples=d_s, targets=d_t)
pm.train(ds)
## not possible with new interface
# pm2.train(d_s, d_t)
## verify that both created the same transformation
# npm2proj = norm(pm.proj - pm2.proj)
# self.assertTrue(npm2proj <= 1e-10,
# msg="Got transformation different by norm %g."
# " Had to be less than 1e-10" % npm2proj)
# self.assertTrue(norm(pm._offset_in - pm2._offset_in) <= 1e-10)
# self.assertTrue(norm(pm._offset_out - pm2._offset_out) <= 1e-10)
# do forward transformation on the same source data
d_s_f = pm.forward(d_s)
self.assertEqual(
d_s_f.shape,
d_t.shape,
msg="Mapped shape should be identical to the d_t",
)
dsf = d_s_f - d_t
ndsf = norm(dsf) / norm(d_t)
if full_test:
dsR = norm(s * R - pm.proj)
if not oblique:
self.assertTrue(
dsR <= 1e-12,
msg="We should have got reconstructed rotation+scaling "
"perfectly. Now got d scale*R=%g" % dsR,
)
self.assertTrue(
np.abs(s - pm._scale) < 1e-12,
msg="We should have got reconstructed scale "
"perfectly. Now got %g for %g" % (pm._scale, s),
)
self.assertTrue(
ndsf <= 1e-12,
msg="%s: Failed to get to the target space correctly."
" normed error=%g" % (sdim, ndsf),
)
# Test if we get back
d_s_f_r = pm.reverse(d_s_f)
# Test if recon proj is true inverse except for high->low projection
if nf_s <= nf_t:
assert_almost_equal(
np.dot(pm._proj, pm._recon),
np.eye(pm._proj.shape[0]),
err_msg="Deviation from identity matrix is too large",
)
dsfr = d_s_f_r - d_s
ndsfr = norm(dsfr) / norm(d_s)
if full_test:
self.assertTrue(
ndsfr <= 1e-12,
msg="%s: Failed to reconstruct into source space correctly."
" normed error=%g" % (sdim, ndsfr),
)
@reseed_rng()
def test_reflection(self, rep=10):
for i in range(rep):
from mvpa2.testing.datasets import get_random_rotation
d = np.random.random((100, 2))
T = get_random_rotation(d.shape[1])
d2 = np.dot(d, T)
# scale it up a bit
d2 *= 1.2
# add a reflection by flipping the first dimension
d2[:, 0] *= -1
ds = dataset_wizard(samples=d, targets=d2)
norm0 = np.linalg.norm(d - d2)
mapper = ProcrusteanMapper(scaling=False, reflection=False)
mapper.train(ds)
norm1 = np.linalg.norm(d2 - mapper.forward(ds).samples)
eps = 1e-7
self.assertLess(
norm1,
norm0 + eps,
msg="Procrustes should reduce difference, "
"but %f > %f" % (norm1, norm0),
)
mapper = ProcrusteanMapper(scaling=True, reflection=False)
mapper.train(ds)
norm2 = np.linalg.norm(d2 - mapper.forward(ds).samples)
self.assertLess(
norm2,
norm1 + eps,
msg="Procrustes with scaling should work better, "
"but %f > %f" % (norm2, norm1),
)
mapper = ProcrusteanMapper(scaling=False, reflection=True)
mapper.train(ds)
norm3 = np.linalg.norm(d2 - mapper.forward(ds).samples)
self.assertLess(
norm3,
norm1 + eps,
msg="Procrustes with reflection should work better, "
"but %f > %f" % (norm3, norm1),
)
mapper = ProcrusteanMapper(scaling=True, reflection=True)
mapper.train(ds)
norm4 = np.linalg.norm(d2 - mapper.forward(ds).samples)
self.assertLess(
norm4,
norm3 + eps,
msg="Procrustes with scaling should work better, "
"but %f > %f" % (norm4, norm3),
)
self.assertLess(
norm4,
norm2 + eps,
msg="Procrustes with reflection should work better, "
"but %f > %f" % (norm4, norm2),
)
def suite(): # pragma: no cover
return unittest.makeSuite(ProcrusteanMapperTests)
if __name__ == "__main__": # pragma: no cover
from . import runner
runner.run()
| StarcoderdataPython |
1600130 | def make_shirt(message, size="M"):
"""[Exibe uma camisa com tamanho e uma mensagem personalizados]
Args:
size ([string]): [Tamanho da camisa]
message ([string]): [Mensagem personalizada]
"""
print("Your shirt is ready.")
print(f"The size is {size} and the printed message is '{message}'.\n")
make_shirt("I am programmer", "G")
make_shirt("i'm a jedi like my father before me")
| StarcoderdataPython |
3208234 | '''
Created on 13.01.2016
@author: Asthmet
'''
import plot_class
import random
class Minesweeper:
''' Constructor of the class: start the game for you '''
def __init__( self, lines = 10, cols = 10 ):
self._lines = lines
self._cols = cols
self._map = [ [plot_class.Plot() for i in range(cols) ] for j in range(lines) ]
''' Returns the display of the cell '''
def getCell( self, x, y ):
var = self._map[x][y]
return var.getIndicator( trueSight = True )
''' Display the whole map for the player '''
def displayMap( self, trueSight = False ):
count = 0
for line in self._map:
print( ' ', sep = '', end = '' )
for col in line:
if col.getIndicator(trueSight = True) == plot_class.c_mine :
count += 1
print( col.getIndicator( trueSight = trueSight ), sep = '', end = '' )
print( ' ', sep = '', end = '' )
print( )
print( 'Total : ' + str(count) + ' mines' + ' - Format: ' + str(self._cols) + 'x' + str(self._lines) + '\n' )
''' Add a random bomb to the map '''
def randomBomb( self ):
x = random.randrange( self._lines )
y = random.randrange( self._cols )
if self.getCell( x, y ) == plot_class.c_mine :
self.randomBomb()
else :
self._map[x][y].setMine()
''' Generate as much bombs as specified '''
def carpetBomb( self, n = 10 ):
for i in range(n):
self.randomBomb()
''' Pass through every plot to determine its indicator value '''
''' Run this only once after doing the carpet bomb'''
def scanMap( self ):
for i, line in enumerate( self._map ) :
for j, p in enumerate( line ) :
count = 0
if p.getIndicator(trueSight = True) == plot_class.c_mine :
continue
else :
# up left
if i-1 >= 0 and j-1 >= 0 :
if self.getCell( i-1, j-1 ) == plot_class.c_mine :
count += 1
# up top
if i-1 >= 0 :
if self.getCell( i-1, j ) == plot_class.c_mine :
count += 1
# up right
if i-1 >= 0 and j+1 < self._cols :
if self.getCell( i-1, j+1 ) == plot_class.c_mine :
count += 1
# left
if j-1 >= 0 :
if self.getCell( i, j-1 ) == plot_class.c_mine :
count += 1
# right
if j+1 < self._cols :
if self.getCell( i, j+1 ) == plot_class.c_mine :
count += 1
# down left
if i+1 < self._lines and j-1 >= 0 :
if self.getCell( i+1, j-1 ) == plot_class.c_mine :
count += 1
# down bottom
if i+1 < self._lines :
if self.getCell( i+1, j ) == plot_class.c_mine :
count += 1
# down right
if i+1 < self._lines and j+1 < self._cols :
if self.getCell( i+1, j+1 ) == plot_class.c_mine :
count += 1
p.setIndicator( str(count) )
''' Give the player the first start into the game '''
def showClue( self ):
x = random.randrange( self._lines )
y = random.randrange( self._cols )
if self.getCell( x, y ) != plot_class.c_empty :
self.showClue()
else :
self._map[x][y].revealPlot()
self.propagateDiscovery(x, y)
''' When a empty plot is found, we look for other similar neighbor '''
def propagateDiscovery( self, x, y ):
if self.getCell(x, y) == plot_class.c_empty :
# Reveal the plot and propagate to the neighbors
self._map[x][y].revealPlot()
# up left
if x-1 >= 0 and y-1 >= 0 and self._map[x-1][y-1].revealed == False :
self.propagateDiscovery(x-1, y-1)
# up top
if x-1 >= 0 and self._map[x-1][y].revealed == False :
self.propagateDiscovery(x-1, y)
# up right
if x-1 >= 0 and y+1 < self._cols and self._map[x-1][y+1].revealed == False :
self.propagateDiscovery(x-1, y+1)
# left
if y-1 >= 0 and self._map[x][y-1].revealed == False :
self.propagateDiscovery(x, y-1)
# right
if y+1 < self._cols and self._map[x][y+1].revealed == False :
self.propagateDiscovery(x, y+1)
# down left
if x+1 < self._lines and y-1 >= 0 and self._map[x+1][y-1].revealed == False :
self.propagateDiscovery(x+1, y-1)
# down bottom
if x+1 < self._lines and self._map[x+1][y].revealed == False :
self.propagateDiscovery(x+1, y)
# down right
if x+1 < self._lines and y+1 < self._cols and self._map[x+1][y+1].revealed == False :
self.propagateDiscovery(x+1, y+1)
else :
# just reveat the plot
self._map[x][y].revealPlot()
''' '''
def findUnsolvable( self ):
for i, line in enumerate( self._map ) :
for j, p in enumerate( line ) :
if self.getCell(i, j) == plot_class.c_empty and self._map[i][j].revealed == False :
self.propagateDiscovery(i, j)
#----------------------
# Creating the application
program = Minesweeper( lines = 16, cols = 30 )
program.carpetBomb(50)
program.scanMap()
program.displayMap( trueSight = True )
#program.findUnsolvable()
program.propagateDiscovery( 0, 0)
program.displayMap()
| StarcoderdataPython |
1601320 | <reponame>SpaceNetChallenge/SpaceNet_Optimized_Routing_Solutions
import numpy as np
import torch
import torch.nn as nn
class Accuracy(nn.Module):
def __init__(self):
super().__init__()
def forward(self, logits: torch.Tensor, labels: torch.Tensor) -> float:
assert len(logits.size()) == 2
assert len(labels.size()) == 1
assert logits.size(0) == labels.size(0)
preds = torch.argmax(logits, dim=1)
assert preds.size() == labels.size()
return float((preds == labels).sum()) / labels.size(0)
class FScore(nn.Module):
def __init__(self, beta: float):
super().__init__()
self._f_score_numpy = FScoreNumpy(beta=beta)
def forward(self, logits, labels) -> float:
if logits.size() != labels.size():
raise ValueError(f'Size mismatch: {logits.size()} vs {labels.size()}')
y_pred = torch.sigmoid(logits.data).cpu().numpy() > 0.1
y_true = labels.data.cpu().numpy() > 0.5
return torch.tensor([self._f_score_numpy(y_pred, y_true)], device=torch.device('cuda'))
class F2Score(FScore):
def __init__(self):
super().__init__(beta=2.0)
class FScoreNumpy:
def __init__(self, beta: float):
self._beta = beta
def __call__(self, y_pred: np.ndarray, y_true: np.ndarray) -> float:
"""
:param y_pred: boolean np.ndarray of shape (num_samples, num_classes)
:param y_true: boolean np.ndarray of shape (num_samples, num_classes)
:return:
"""
if y_pred.shape != y_true.shape:
raise ValueError(f'Shape mismatch: predicted shape {y_pred.shape} vs gt shape {y_true.shape}')
if y_pred.dtype != np.bool:
raise TypeError(f'Expected y_pred to be of dtype `np.bool`, got `{y_pred.dtype}`')
if y_true.dtype != np.bool:
raise TypeError(f'Expected y_pred to be of dtype `np.bool`, got `{y_true.dtype}`')
tp = np.logical_and(y_pred, y_true).sum(axis=1)
tn = np.logical_and(np.logical_not(y_pred), np.logical_not(y_true)).sum(axis=1)
fp = np.logical_and(y_pred, np.logical_not(y_true)).sum(axis=1)
fn = np.logical_and(np.logical_not(y_pred), y_true).sum(axis=1)
num_samples, num_classes = y_true.shape
assert (tp + tn + fp + fn == num_classes).all()
assert len(tp) == num_samples
p = tp / (tp + fp)
r = tp / (tp + fn)
scores = (1 + self._beta ** 2) * p * r / (self._beta ** 2 * p + r)
scores[np.isnan(scores)] = 0.0
assert len(scores) == num_samples
# return scores # FIXME
return np.mean(scores)
| StarcoderdataPython |
191807 | """
Agent module
"""
from random import randint, random
import numpy as np
def q_learning(environment, learning_rate, gamma, total_iteration, show=False):
"""
Q-learning: An off-policy TD control algorithm
as described in Reinforcement Learning: An Introduction" 1998 p158 by <NAME>
https://web.stanford.edu/class/psych209/Readings/SuttonBartoIPRLBook2ndEd.pdf
Some comments include ##'comment', they quote from <NAME> pseudo-code's
"""
#ADDED features compared to <NAME>. SUTTON algoritm's
epoch_show = 200
exploration_function = lambda i: -1 / (total_iteration * 0.8) * i + 1
##'Initialize Q(s,a)...arbitrarly execpt that Q(terminal,.)=0'
number_states = environment.nb_discretisation_x
number_action = 2
Q = np.random.rand(number_states, number_states, number_states, number_states, number_action)
##'Repeat (for each episode):'
for iteration in range(total_iteration):
end = False
##'Initialize S'
statut = environment.reset()
##'Repeat (for each step of episode):'
while not end:
##'Choose A from S using ...'
if random() < exploration_function(iteration):
#ADDED: features to encourage erratic behavior
action = randint(0, number_action-1)
else:
##'policy derived from Q'
action = np.argmax(Q[statut[0], statut[1], statut[2], statut[3]])
##'Take action A ...'
observation = environment.step(action)
##', observe R,S''
futur_statut, reward, end = observation
##'Q(S,A)=Q(S,A)+learning_rate*...'
Q[statut[0], statut[1], statut[2], statut[3], action] = Q[statut[0], statut[1], statut[2], statut[3], action] + learning_rate * (reward + gamma * np.max(Q[futur_statut[0], futur_statut[1], futur_statut[2], futur_statut[3], :]) - Q[statut[0], statut[1], statut[2], statut[3], action])
##'S becomes S''
statut = futur_statut.copy()
#ADDED: Show behavior in a window
if (iteration%epoch_show == 0 or iteration == total_iteration-1) and show:
environment.render()
return Q
| StarcoderdataPython |
4809040 | <filename>Python/pascalT.py
# Print Pascal's Triangle in Python
from math import factorial
n = int(input("Enter the no of rows: "))
for i in range(n):
for j in range(n-i+1):
print(end=" ")
for j in range(i+1):
# nCr = n!/((n-r)!*r!)
print(factorial(i)//(factorial(j)*factorial(i-j)), end=" ")
# for new line
print()
| StarcoderdataPython |
3302508 | ##@package client
#@author <NAME>
import asyncio,websockets
import traceback
from .iginterface import interact as IGInteract
from .lgiinterface import interact as LGIInteract
from .giinterface import interact as GIInteract
from .ggrinterface import interact as GGRInteract
from .gdrinterface import interact as GDRInteract
from .diinterface import interact as DIInteract
from .isinterface import interact as ISInteract
from .rinterface import interact as RInteract
from .isinterface import isComputing as isComputing
from .log import log
## Manages all the interaction with the client.
class Client:
def __init__(self,websocket):
self.socket=websocket
self.ip=websocket.remoteIP
## Log an action
def log(self,message):
log("%s : %s"%(self.ip,message))
## Interact with the client.
@asyncio.coroutine
def interact(self):
self.log("connected")
while True:
message = yield from self.socket.recv()
if message is None:
break
else:
yield from self.handleMessage(message)
self.log("disconnected")
@asyncio.coroutine
def handleMessage(self,message):
try:
if message is None:
return
message=message.strip().lower()
if message == "instance generation request":
self.log(message)
yield from IGInteract(self)
elif message == "list generated instances":
self.log(message)
yield from LGIInteract(self)
elif message.startswith("get instance"):
self.log(message)
yield from GIInteract(self, message)
elif message.startswith("delete instance"):
self.log(message)
yield from DIInteract(self, message)
elif message.startswith("reset instance"):
self.log(message)
yield from RInteract(self,message)
elif message.startswith("instance simulation request"):
self.log(message)
yield from ISInteract(self, message)
elif message.startswith("is computing simulation"):
yield from isComputing(self,message)
elif message.startswith("get daily result"):
self.log(message)
yield from GDRInteract(self, message)
elif message.startswith("get global results"):
self.log(message)
yield from GGRInteract(self, message)
else:
raise Exception("Unknown request \"%s\""%message)
except Exception:
self.log("Unexpected error: %s"%traceback.format_exc())
if self.socket.open:
yield from self.socket.send("ERROR %s"%traceback.format_exc())
## @var instancesFolder
# Folder with the instances of the client.
instancesFolder="instances"
## @var socket
# Websocket interface to the client.
## @var ip
# IP of the client.
| StarcoderdataPython |
137071 | <filename>Data/GalZoo2_TFrecords.py
import numpy as np
import tensorflow as tf
from astropy.io import fits
import matplotlib.image as mpimg
from skimage.transform import rescale
from skimage.color import rgb2gray
from numpy.random import choice
from skimage.exposure import rescale_intensity
import sys
from multiprocessing import Pool
import tqdm
import warnings
warnings.filterwarnings("ignore")
#TODO tidy this shit up
def load_image(ID):
img = mpimg.imread('/data/astroml/aspindler/GalaxyZoo/GalaxyZooImages/galaxy_'+str(ID)+'.png')
img_crop = img[84:340,84:340]
img_grey = rgb2gray(img_crop)
img_64 = rescale(img_grey, scale=0.25, preserve_range=True)
img_uint = rescale_intensity(img_64, out_range=(0,255)).astype('uint8')
return img_uint
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def grab_data(i):
ID = IDs[0][i]
condition = (IDs[1][i]).astype('uint8')
img = load_image(ID)
feature = {'image': _bytes_feature(tf.compat.as_bytes(img.tostring())),
'condition': _bytes_feature(tf.compat.as_bytes(condition.tostring()))
}
example = tf.train.Example(features=tf.train.Features(feature=feature))
return example#writer.write(example.SerializeToString())
#load IDs from fits
data = fits.open('Data/Scratch/GalZoo2/gz2_hart16.fits.gz')[1].data
spiral = (data['t02_edgeon_a05_no_flag'] & (data['t02_edgeon_a05_no_count'] > 20)).astype(bool)
data = data[spiral]
global IDs
IDs = [data['dr7objid'],
np.array([data['t03_bar_a06_bar_flag'],
data['t03_bar_a07_no_bar_flag'],
data['t04_spiral_a08_spiral_flag'],
data['t04_spiral_a09_no_spiral_flag'],
data['t05_bulge_prominence_a10_no_bulge_flag'],
data['t05_bulge_prominence_a11_just_noticeable_flag'],
data['t05_bulge_prominence_a12_obvious_flag'],
data['t05_bulge_prominence_a13_dominant_flag']
])]
IDs[1] = np.swapaxes(IDs[1],0,1)
train, valid, test = 10000, 10000, 25000
gals = choice(len(IDs[0]), train+valid+test)
global gals_train, gals_valid, gals_test
gals_train = gals[0:train]
gals_valid = gals[train:train+valid]
gals_test = gals[train+valid:train+valid+test]
train_filename = 'Data/Scratch/GalZoo2/galzoo_spiral_flags_train.tfrecords' # address to save the TFRecords file
# open the TFRecords file
writer = tf.python_io.TFRecordWriter(train_filename)
pool = Pool(processes=20) #Set pool of processors
for result in tqdm.tqdm(pool.imap_unordered(grab_data, gals_train), total=len(gals_train)):
writer.write(result.SerializeToString())
writer.close()
sys.stdout.flush()
valid_filename = 'Data/Scratch/GalZoo2/galzoo_spiral_flags_valid.tfrecords' # address to save the TFRecords file
# open the TFRecords file
writer = tf.python_io.TFRecordWriter(valid_filename)
pool = Pool(processes=20) #Set pool of processors
for result in tqdm.tqdm(pool.imap_unordered(grab_data, gals_valid), total=len(gals_valid)):
writer.write(result.SerializeToString())
writer.close()
sys.stdout.flush()
test_filename = 'Data/Scratch/GalZoo2/galzoo_spiral_flags_test.tfrecords' # address to save the TFRecords file
# open the TFRecords file
writer = tf.python_io.TFRecordWriter(test_filename)
pool = Pool(processes=20) #Set pool of processors
for result in tqdm.tqdm(pool.imap_unordered(grab_data, gals_test), total=len(gals_test)):
writer.write(result.SerializeToString())
writer.close()
sys.stdout.flush()
| StarcoderdataPython |
174892 | <filename>component/widget/date_range_slider.py
from sepal_ui import sepalwidgets as sw
import ipyvuetify as v
from component import parameter as cp
class DateRangeSlider(sw.SepalWidget, v.Layout):
def __init__(self, dates=None, **kwargs):
# save the dates values
self.dates = dates
# display the dates in a text filed in a the prepend slot
self.display = v.Html(tag="span", children = [""])
# create a range widget with the default params
self.range = v.RangeSlider(
disabled = True,
v_model = [0, 1],
max=1,
class_="pl-5 pr-1 mt-1"
)
# add the non conventional parameters for customization
for k, val in kwargs.items():
if hasattr(self.range, k):
setattr(self.range, k, val)
# wrap everything in a layout
super().__init__(
row=True,
v_model = None,
xs12=True,
children=[
v.Flex(xs9=True, children=[self.range]),
v.Flex(xs3=True, children=[self.display])
]
)
# link the v_models
self.range.observe(self._on_change, 'v_model')
# add the dates if existing
if dates:
self.set_dates(dates)
def _on_change(self, change):
"""update the display and v_model when the slider changes"""
# to avoid bugs on disable
if not self.dates:
return self
# get the real dates from the list
start, end = [self.dates[int(i)] for i in change['new']]
self.v_model = [start, end]
# update what is display to the user
self.display.children = [f"{start} to {end}"]
return self
def disable(self):
"""disabled the widget and reset its value"""
self.dates = None
self.v_model = None
self.range.v_model = [0, 1]
self.range.max = 1
self.range.disabled = True
self.display.children = ['']
return self
def set_dates(self, dates):
"""set the dates and activate the widget"""
# save the dates
self.dates = dates
# the min value is eiter the minimum number of image or half the length of the time series (if not enough image)
min_ = min(cp.min_images, len(dates)/2)
# set the slider
self.range.max = len(dates)-1
self.range.v_model = [min_, len(dates)-1]
# activate the slider
self.range.disabled = False
return self | StarcoderdataPython |
89942 | from model.contact import Contact
import re
from random import randrange
def test_contact_data_for_random_contact(app):
if app.contact.count() == 0:
app.contact.create(Contact(firstname="John", lastname="Connor", address=("%s, %s %s" % ("Los Angeles", str(randrange(1000)), "Nickel Road")), workphone="w44654532", email="<EMAIL>"))
old_contacts = app.contact.get_contact_list()
index = randrange(len(old_contacts))
contact_from_home_page = app.contact.get_contact_list()[index]
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(index)
#compare firstname
assert contact_from_home_page.firstname == contact_from_edit_page.firstname
#compare lastname
assert contact_from_home_page.lastname == contact_from_edit_page.lastname
#compare address
assert contact_from_home_page.address == contact_from_edit_page.address
#compare phones
assert contact_from_home_page.all_phones_from_home_page == merge_phones_like_on_home_page(contact_from_edit_page)
#compare emails
assert contact_from_home_page.all_emails_from_home_page == merge_emails_like_on_home_page(contact_from_edit_page)
def clear(s):
return re.sub("[() -]", "", s)
def merge_phones_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "", map(lambda x: clear(x), filter(lambda x: x is not None, [contact.homephone, contact.mobilephone, contact.workphone, contact.secondaryphone]))))
def merge_emails_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "", filter(lambda x: x is not None, [contact.email, contact.email2, contact.email3])))
def test_contact_db_info_matches_ui(app, db):
ui_list = app.contact.get_contact_list()
def clean(contact):
return Contact(id=contact.id, firstname=contact.firstname.strip(), lastname=contact.lastname.strip(),
address=contact.address.strip(), all_phones_from_home_page=merge_phones_like_on_home_page(contact),
all_emails_from_home_page=merge_emails_like_on_home_page(contact))
db_list = map(clean, db.get_contact_list())
assert sorted(ui_list, key=Contact.id_or_max) == sorted(db_list, key=Contact.id_or_max) | StarcoderdataPython |
1673479 | <reponame>yansinan/pycameresp
#!/usr/bin/python3
# Distributed under MIT License
# Copyright (c) 2021 <NAME>
# pylint:disable=multiple-statements
# pylint:disable=too-many-lines
""" Class defining a VT100 text editor.
This editor works directly in the board.
This allows you to make quick and easy changes directly on the board, without having to use synchronization tools.
This editor allows script execution, and displays errors and execution time.
Editor shortcuts :
<br> - <b>Exit </b>: Escape
<br> - <b>Move cursor </b>: Arrows, Home, End, PageUp, PageDown, Ctrl-Home, Ctrl-End, Ctrl-Left, Ctrl-Right
<br> - <b>Selection </b>: Shift-Arrows, Shift-Home, Shift-End, Alt-Shift-Arrows, Ctrl-Shift-Left, Ctrl-Shift-Right
<br> - <b>Clipboard </b>: Selection with Ctrl X(Cut), Ctrl-C(Copy), Ctrl-V(Paste)
<br> - <b>Case change </b>: Selection with Ctrl-U(Toggle majuscule, minuscule)
<br> - <b>Indent </b>: Selection with Tab(Indent) or Shift-Tab(Unindent)
<br> - <b>Comment block </b>: Selection with Ctrl-Q
<br> - <b>Save </b>: Ctrl-S
<br> - <b>Find </b>: Ctrl-F
<br> - <b>Replace </b>: Ctrl-H
<br> - <b>Toggle mode </b>: Ctrl-T (Insertion/Replacement)
<br> - <b>Delete line </b>: Ctrl-L
<br> - <b>Goto line </b>: Ctrl-G
<br> - <b>Execute </b>: F5
This editor also works on linux and osx, and can also be used autonomously,
you need to add the useful.py script to its side.
All the keyboard shortcuts are at the start of the script.
On the boards with low memory, it may work, but on very small files, otherwise it may produce an error due to insufficient memory.
"""
import sys
sys.path.append("lib")
sys.path.append("lib/tools")
try:
from tools import useful
except:
import useful
TABSIZE = 4 # Tabulation size
HORIZONTAL_MOVE=8 # Scrolling minimal deplacement
ESCAPE = "\x1b"
# Move shortcuts
UP = ["\x1b[A"]
DOWN = ["\x1b[B"]
RIGHT = ["\x1b[C"]
LEFT = ["\x1b[D"]
HOME = ["\x1b[1;3D", "\x1b[H", "\x1b\x1b[D", "\x1b[1~", "\x1bb"]
END = ["\x1b[1;3C", "\x1b[F", "\x1b\x1b[C", "\x1b[4~", "\x1bf"]
PAGE_UP = ["\x1b[1;3A", "\x1b[A", "\x1b\x1b[A", "\x1b[5~"]
PAGE_DOWN = ["\x1b[1;3B", "\x1b[B", "\x1b\x1b[B", "\x1b[6~"]
TOP = ["\x1b[1;5H"]
BOTTOM = ["\x1b[1;5F"]
NEXT_WORD = ["\x1b[1;5C"]
PREVIOUS_WORD = ["\x1b[1;5D"]
# Selection shortcuts
SELECT_UP = ["\x1b[1;2A"]
SELECT_DOWN = ["\x1b[1;2B"]
SELECT_RIGHT = ["\x1b[1;2C"]
SELECT_LEFT = ["\x1b[1;2D"]
SELECT_PAGE_UP = ["\x1b[1;10A","\x1b[1;4A","\x1b[5;2~"]
SELECT_PAGE_DOWN = ["\x1b[1;10B","\x1b[1;4B","\x1b[6;2~"]
SELECT_HOME = ["\x1b[1;2H","\x1b[1;10D"]
SELECT_END = ["\x1b[1;2F","\x1b[1;10C"]
SELECT_TOP = ["\x1b[1;6H"]
SELECT_BOTTOM = ["\x1b[1;6F"]
SELECT_ALL = ["\x01"]
SELECT_NEXT_WORD = ["\x1b[1;6C","\x1b[1;4C"]
SELECT_PREV_WORD = ["\x1b[1;6D","\x1b[1;4D"]
# Clipboard shortcuts
CUT = ["\x18","\x1bx"] # Cut
COPY = ["\x03","\x1bc"] # Copy
PASTE = ["\x16","\x1bv"] # Paste
# Selection modification shortcut
INDENT = ["\t"] # Indent
UNINDENT = ["\x1b[Z"] # Unindent
CHANGE_CASE = ["\x15"] # Change case
COMMENT = ["\x11"] # Comment block
DELETE = ["\x1b[3~"] # Delete pressed
BACKSPACE = ["\x7F"] # Backspace pressed
NEW_LINE = ["\n", "\r"] # New line pressed
TOGGLE_MODE = ["\x14"] # Toggle replace/insert mode
EXIT = [ESCAPE] # Exit
FIND = ["\x06"] # Find
FIND_NEXT = ["\x1bOR"] # Find next
FIND_PREVIOUS = ["\x1b[1;2R"] # Find previous
GOTO = ["\x07"] # Goto line
SAVE = ["\x13","\x1bs"] # Save
DELETE_LINE = ["\x0C"] # Delete line
REPLACE = ["\x08"] # Replace
REPLACE_CURRENT = ["\x12"] # Replace the selection
EXECUTE = ["\x1b[15~"] # Execute script
class View:
""" Class which manage the view of the edit field """
def __init__(self, view_height, view_top):
""" Constructor """
self.line = 0
self.column = 0
if view_height is None:
self.height = 20
else:
self.height = view_height
self.width = 80
self.top = view_top
self.is_refresh_all = True
self.is_refresh_line = False
self.is_refresh_line_before = False
self.is_refresh_line_after = False
self.refresh_part = None
self.text = None
self.tab_cursor_column = 0
self.sel_line_start = None
self.sel_line_end = None
self.screen_height = 1
self.screen_width = 1
def write(self, data):
""" Write data to stdout """
sys.stdout.write(data)
def flush(self):
""" Flush text to stdout """
try:
sys.stdout.flush()
except:
pass
def set_text(self, text):
""" Set the text object """
self.text = text
def get_screen_position(self):
""" Get the screen position of cursor """
return (self.text.get_cursor_line() - self.line + self.top, self.tab_cursor_column - self.column)
def reset(self):
""" Reset VT100 """
self.write("\x1B""c")
self.flush()
def reset_scroll_region(self):
""" Reset VT100 scroll region """
if self.screen_height > 0:
self.set_scrolling_region(0, self.screen_height-1)
def set_scrolling_region(self, top_line, bottom_line):
""" Define VT100 scroll region """
if top_line < bottom_line:
self.write("\x1B[%d;%dr"%(top_line+1,bottom_line+1))
def scroll_up(self):
""" Scroll to up """
self.set_scrolling_region(self.top, self.height+1)
self.write("\x1B[1S")
def scroll_down(self):
""" Scroll to down """
self.set_scrolling_region(self.top, self.height+1)
self.write("\x1B[1T")
def scroll_part_up(self):
""" Scroll the upper part """
line, column = self.get_screen_position()
if line < self.height:
self.set_scrolling_region(line, self.height+1)
self.write("\x1B[1S")
def scroll_part_down(self):
""" Scroll the lower part """
line, column = self.get_screen_position()
if line < self.height:
self.set_scrolling_region(line+1, self.height+1)
self.write("\x1B[1T")
else:
self.is_refresh_line_after = True
def move(self):
""" Move the view """
self.tab_cursor_column = self.text.get_tab_cursor(self.text.get_cursor_line())
# Move view port
if self.tab_cursor_column < self.column:
self.is_refresh_all = True
if self.tab_cursor_column > HORIZONTAL_MOVE:
self.column = self.tab_cursor_column-HORIZONTAL_MOVE
else:
self.column = 0
elif self.tab_cursor_column >= self.column + self.width:
self.column = self.tab_cursor_column-self.width+HORIZONTAL_MOVE
self.is_refresh_all = True
if self.text.get_cursor_line() < self.line:
delta = self.line - self.text.get_cursor_line()
self.line = self.text.get_cursor_line()
if self.line < 0:
self.line = 0
if delta <= 1:
self.scroll_down()
self.is_refresh_line = True
else:
self.is_refresh_all = True
elif self.text.get_cursor_line() > self.line + self.height:
delta = self.text.get_cursor_line() - self.line - self.height
self.line = self.text.get_cursor_line()-self.height
if delta <= 1:
self.scroll_up()
self.is_refresh_line = True
else:
self.is_refresh_all = True
def set_refresh_line(self):
""" Indicates that the line must be refreshed """
self.is_refresh_line = True
def set_refresh_after(self):
""" Indicates that all lines after the current line must be refreshed """
self.is_refresh_line = True
self.is_refresh_line_after = True
def set_refresh_before(self):
""" Indicates that all lines before the current line must be refreshed """
self.is_refresh_line = True
self.is_refresh_line_before = True
def set_refresh_all(self):
""" Indicates that all lines must be refreshed """
self.is_refresh_all = True
def show_line(self, current_line, screen_line, selection_start, selection_end, quick=False):
""" Show one line """
if quick:
line_to_display = ""
else:
line_to_display = "\x1B[%d;1f\x1B[K"%(screen_line+1)
count_line = self.text.get_count_lines()
if current_line < count_line and current_line >= 0:
line = self.text.get_tab_line(current_line)
partLine = line[self.column:self.column+self.width]
# If the line selected
if selection_start is not None:
# If the line not empty
if len(partLine) >= 1:
# If the line have carriage return at the end
if partLine[-1] == "\n":
# Remove the carriage return
partLine = partLine[:-1]
if len(partLine) > 0:
dummy, sel_line_start, sel_column_start = selection_start
dummy, sel_line_end, sel_column_end = selection_end
# If the current line is the end of selection
if current_line == sel_line_end:
# If the end of selection is outside the visible part
if sel_column_end - self.column < 0:
sel_column_end = 0
else:
sel_column_end -= self.column
# If the start of selection is on the previous lines
if sel_line_start < sel_line_end:
# Select the start of line
partLine = "\x1B[7m" + partLine[:sel_column_end] + "\x1B[m" + partLine[sel_column_end:]
else:
# Unselect the end of line
partLine = partLine[:sel_column_end] + "\x1B[m" + partLine[sel_column_end:]
# If the current line is the start of selection
if current_line == sel_line_start:
# If the start of selection is outside the visible part
if sel_column_start - self.column < 0:
sel_column_start = 0
else:
sel_column_start -= self.column
# If the end of selection is on the next lines
if sel_line_start < sel_line_end:
# Select the end of line
partLine = partLine[:sel_column_start] + "\x1B[7m" + partLine[sel_column_start:] + "\x1B[m"
else:
# Select the start of line
partLine = partLine[:sel_column_start] + "\x1B[7m" + partLine[sel_column_start:]
# If the line is completly selected
if current_line > sel_line_start and current_line < sel_line_end:
# Select all the line
partLine = "\x1B[7m" + partLine + "\x1B[m"
else:
partLine = ""
self.write(line_to_display + partLine)
else:
self.write(line_to_display + partLine.rstrip())
def refresh_line(self, selection_start, selection_end):
""" Refresh line """
screen_line, screen_column = self.get_screen_position()
refreshed = False
# If the line must be refreshed before the cursor line
if self.is_refresh_line_before:
self.is_refresh_line_before = False
self.show_line(self.text.get_cursor_line()-1, screen_line-1, selection_start, selection_end)
refreshed = True
# If the line must be refreshed after the cursor line
if self.is_refresh_line_after:
self.is_refresh_line_after = False
self.show_line(self.text.get_cursor_line()+1, screen_line+1, selection_start, selection_end)
offset = self.height - screen_line
self.show_line(self.text.get_cursor_line()+offset+1, screen_line+offset+1, selection_start, selection_end)
refreshed = True
# If only the cursor line must be refresh
if self.is_refresh_line:
self.is_refresh_line = False
self.show_line(self.text.get_cursor_line(), screen_line, selection_start, selection_end)
refreshed = True
# If no refresh detected and a selection started
if selection_start is not None and refreshed is False:
# Refresh the selection
self.show_line(self.text.get_cursor_line(), screen_line, selection_start, selection_end)
def refresh(self):
""" Refresh view """
selection_start, selection_end = self.text.get_selection()
if self.refresh_part is not None:
self.refresh_content(selection_start, selection_end, self.refresh_part)
self.refresh_part = None
# Refresh all required
if self.is_refresh_all:
self.refresh_content(selection_start, selection_end, True)
self.is_refresh_all = False
self.is_refresh_line = False
else:
# If no selection activated
if selection_start is None:
# Refresh the current line
self.refresh_line(selection_start, selection_end)
else:
# Refresh the selection
self.refresh_content(selection_start, selection_end, False)
self.move_cursor()
self.flush()
def refresh_content(self, selection_start, selection_end, all_):
""" Refresh content """
# If selection present
if selection_start is not None:
# Get the selection
dummy, sel_line_start, sel_column_start = selection_start
dummy, sel_line_end, sel_column_end = selection_end
lineStart = sel_line_start
lineEnd = sel_line_end
# The aim of this part is to limit the refresh area
# If the precedent display show a selection
if self.sel_line_end is not None and self.sel_line_start is not None:
# If the start and end of selection is on the sames lines
if self.sel_line_end == sel_line_end and self.sel_line_start == sel_line_start:
lineStart = lineEnd = self.text.get_cursor_line()
else:
# If the end of selection is after the precedent display
if self.sel_line_end > sel_line_end:
lineEnd = self.sel_line_end
# If the end of selection is on the same line than the precedent display
elif self.sel_line_end == sel_line_end:
# If the start of selection is before the precedent display
if self.sel_line_start < sel_line_start:
lineEnd = sel_line_start
else:
lineEnd = self.sel_line_start
# If the start of selection is before the precedent display
if self.sel_line_start < sel_line_start:
lineStart = self.sel_line_start
# If the start of selection is on the same line than the precedent display
elif self.sel_line_start == sel_line_start:
# If the end of selection is after the precedent display
if self.sel_line_end > sel_line_end:
lineStart = sel_line_end
else:
lineStart = self.sel_line_end
else:
lineStart = 0
lineEnd = self.line + self.height
current_line = self.line
screen_line = self.top
if type(all_) == type([]):
lineStart, lineEnd = all_
all_ = False
count_line = self.text.get_count_lines()
maxLine = self.line + self.height
if all_:
# Erase the rest of the screen with empty line (used when the text is shorter than the screen)
self.move_cursor(screen_line, 0)
self.write("\x1B[J")
# Refresh all lines visible
while current_line < count_line and current_line <= maxLine:
self.show_line(current_line, screen_line, selection_start, selection_end, True)
screen_line += 1
current_line += 1
if (current_line < count_line and current_line <= maxLine):
self.write("\n\r")
else:
# Refresh all lines visible
while current_line < count_line and current_line <= maxLine:
# If the line is in selection or all must be refreshed
if lineStart <= current_line <= lineEnd or all_:
self.show_line(current_line, screen_line, selection_start, selection_end)
screen_line += 1
current_line += 1
# If selection present
if selection_start is not None:
# Save current selection
dummy, self.sel_line_start, dummy = selection_start
dummy, self.sel_line_end, dummy = selection_end
def hide_selection(self):
""" Hide the selection """
selection_start, selection_end = self.text.get_selection()
if selection_start is not None:
self.set_refresh_selection()
self.sel_line_start = None
self.sel_line_end = None
def set_refresh_selection(self):
""" Indicates that the selection must be refreshed """
selection_start, selection_end = self.text.get_selection()
if selection_start is not None:
# self.is_refresh_all = True
lineStart = selection_start[1]
if self.sel_line_start < lineStart:
lineStart = self.sel_line_start
lineEnd = selection_end[1]
if self.sel_line_end > lineEnd:
lineEnd = self.sel_line_end
self.refresh_part = [lineStart, lineEnd]
def move_cursor(self, screen_line=None, screen_column=None):
""" Move the cursor in the view """
self.write(self.get_move_cursor(screen_line, screen_column))
def get_move_cursor(self, screen_line=None, screen_column=None):
""" Move the cursor in the view """
if screen_line is None and screen_column is None:
screen_line, screen_column = self.get_screen_position()
return "\x1B[%d;%df"%(screen_line+1,screen_column+1)
def get_screen_size(self):
""" Get the screen size """
height, width = useful.get_screen_size()
self.screen_height = height
self.screen_width = width
self.height = height-self.top-1
self.width = width
self.move_cursor()
def cls(self):
""" clear the screen """
self.write("\x1B[2J")
self.move_cursor(0,0)
class Text:
""" Class which manage the text edition """
def __init__(self, read_only=False):
""" Constructor """
self.lines = [""]
self.cursor_line = 0
self.cursor_column = 0
self.tab_cursor_column = 0
self.modified = False
self.replace_mode = False
self.read_only = read_only
self.view = None
self.tab_size = TABSIZE
self.selection_start = None
self.selection_end = None
self.selection = []
self.filename = None
def set_view(self, view):
""" Define the view attached to the text """
self.view = view
def get_count_lines(self):
""" Get the total of lines """
return len(self.lines)
def get_cursor_line(self):
""" Get the current line of the cursor """
return self.cursor_line
def get_tab_cursor(self, current_line, current_column=None):
""" Get position of cursor with line with tabulation """
if current_column is None:
cursor_column = self.cursor_column
else:
cursor_column = current_column
line = self.lines[current_line]
if "\t" in line:
tab_cursor_column = 0
column = 0
lenLine = len(line)
while column < cursor_column:
if line[column] == "\t":
pos = tab_cursor_column%self.tab_size
tab_cursor_column += self.tab_size-pos
column += 1
else:
tab = line.find("\t",column)
if tab > 0:
partSize = tab - column
else:
partSize = lenLine - column
if column + partSize > cursor_column:
partSize = cursor_column - column
tab_cursor_column += partSize
column += partSize
return tab_cursor_column
else:
return cursor_column
def get_tab_line(self, current_line = None):
""" Get the tabuled line """
line = self.lines[current_line]
if "\t" in line:
tabLine = ""
tab_cursor_column = 0
lenLine = len(line)
column = 0
while column < lenLine:
char = line[column]
if char == "\t":
pos = tab_cursor_column%self.tab_size
tab_cursor_column += self.tab_size-pos
tabLine += " "*(self.tab_size-pos)
column += 1
else:
tab = line.find("\t",column)
if tab > 0:
part = line[column:tab]
else:
part = line[column:]
tab_cursor_column += len(part)
tabLine += part
column += len(part)
else:
tabLine = line
return tabLine
def get_tab_cursor_column(self):
""" Get the column of cursor in tabuled line """
line = self.lines[self.cursor_line]
column = 0
self.tab_cursor_column = 0
while column < self.cursor_column:
if line[column] == "\t":
pos = self.tab_cursor_column%self.tab_size
self.tab_cursor_column += self.tab_size-pos
column += 1
else:
tab = line.find("\t",column)
if tab > 0:
delta = tab - column
if column + delta > self.cursor_column:
delta = self.cursor_column - column
self.tab_cursor_column += delta
column += delta
else:
self.tab_cursor_column += delta
column += delta
else:
delta = self.cursor_column - column
self.tab_cursor_column += delta
column += delta
def set_cursor_column(self):
""" When the line change compute the cursor position with tabulation in the line """
line = self.lines[self.cursor_line]
column = 0
tab_cursor_column = 0
lenLine = len(line)
column = 0
while column < lenLine:
char = line[column]
# If the previous position found exactly in the current line
if tab_cursor_column == self.tab_cursor_column:
self.cursor_column = column
break
# If the previous position not found in the current line
if tab_cursor_column > self.tab_cursor_column:
# Keep last existing position
self.cursor_column = column
break
# If tabulation found
if char == "\t":
tab_cursor_column += self.tab_size-(tab_cursor_column%self.tab_size)
column += 1
else:
# Optimization to accelerate the cursor position
tab = line.find("\t", column)
# Tabulation found
if tab > 0:
delta = tab - column
# If the tabulation position is after the previous tabulation cursor
if delta + tab_cursor_column > self.tab_cursor_column:
# Move the cursor to the left
self.cursor_column = column + (self.tab_cursor_column - tab_cursor_column)
break
else:
# Another tabulation found, move it after
tab_cursor_column += delta
column += delta
# Tabulation not found
else:
# Move the cursor to the end of line
self.cursor_column = column + (self.tab_cursor_column - tab_cursor_column)
break
else:
if len(line) >= 1:
self.cursor_column = len(line)-1
else:
self.cursor_column = 0
def load(self, filename_):
""" Load file in the editor """
self.filename = None
try:
self.lines = []
self.filename = filename_
file = open(filename_, "r")
line = file.readline()
while line != "":
self.lines.append(line.replace("\r\n","\n"))
line = file.readline()
file.close()
if len(self.lines) == 0:
self.lines = [""]
except MemoryError:
# pylint: disable=raise-missing-from
raise MemoryError()
except OSError:
self.lines = [""]
# File not existing
except Exception as err:
useful.syslog(err)
self.lines = [""]
def save(self):
""" Save text in the file """
result = False
if self.read_only is False:
if self.filename is not None:
try:
file = open(self.filename, "w")
for line in self.lines:
file.write(line)
file.close()
self.modified = False
result = True
except Exception as err:
useful.syslog(err)
return result
def change_line(self, moveLine):
""" Move the cursor on another line """
# If cursor is before the first line
if moveLine + self.cursor_line < 0:
# Set the cursor to the first line
self.cursor_line = 0
self.cursor_column = 0
self.change_column(0)
# If the cursor is after the last line
elif moveLine + self.cursor_line >= len(self.lines):
self.cursor_line = len(self.lines) -1
self.cursor_column = len(self.lines[self.cursor_line])
self.change_column(0)
# else the cursor is in the lines of text
else:
previousLine = self.cursor_line
self.cursor_line += moveLine
if len(self.lines) - 1 == self.cursor_line:
lenLine = len(self.lines[self.cursor_line])
else:
lenLine = len(self.lines[self.cursor_line])-1
self.set_cursor_column()
# If the new cursor position is outside the last line of text
if self.cursor_column > lenLine:
self.cursor_column = lenLine
if self.selection_start is not None:
self.selection_end = [self.cursor_column, self.cursor_line,self.get_tab_cursor(self.cursor_line)]
self.view.move()
def change_column(self, move_column):
""" Move the cursor on another column """
cursor_line = self.cursor_line
cursor_column = self.cursor_column
# If the cursor go to the previous line
if move_column + self.cursor_column < 0:
# If start of line
if abs(move_column) > 1:
self.cursor_column = 0
# If move to the left and must go to previous line
elif self.cursor_line > 0:
self.cursor_line -= 1
self.cursor_column = len(self.lines[self.cursor_line])-1
# If the cursor is at the end of line
elif move_column + self.cursor_column > len(self.lines[self.cursor_line])-1:
# If the cursor is on the last line of file
if abs(move_column) > 1 or self.cursor_line+1 == len(self.lines):
# If the file is empty
if self.lines[self.cursor_line] == "":
self.cursor_column = 0
self.tab_cursor_column = 0
# If the last line of contains return char
elif self.lines[self.cursor_line][-1] == "\n":
# Move cursor before return
self.cursor_column = len(self.lines[self.cursor_line])-1
else:
# Move cursor after the last char
self.cursor_column = len(self.lines[self.cursor_line])
# If the cursor is on the end of line and must change of line
elif self.cursor_line+1 < len(self.lines):
self.cursor_line += 1
self.cursor_column = 0
self.tab_cursor_column = 0
# Normal move of cursor
else:
# Next or previous column
self.cursor_column += move_column
if abs(move_column) > 0:
self.get_tab_cursor_column()
self.close_selection()
self.view.move()
if self.cursor_column == cursor_column and self.cursor_line == cursor_line:
return False
else:
return True
def backspace(self):
""" Manage the backspace key """
self.modified = True
if self.remove_selection() is False:
# The cursor not in the begining of line
if self.cursor_column >= 1:
line = self.lines[self.cursor_line]
line = line[0:self.cursor_column-1:]+ line[self.cursor_column : :]
self.lines[self.cursor_line] = line
self.change_column(-1)
self.view.set_refresh_line()
# The cursor is on the begining of line
else:
# If the cursor not on the first line
if self.cursor_line >= 1:
# Copy the current line to the end of previous line
self.cursor_column = len(self.lines[self.cursor_line-1])
self.lines[self.cursor_line-1] = self.lines[self.cursor_line-1][:-1] + self.lines[self.cursor_line]
del self.lines[self.cursor_line]
self.view.scroll_part_up()
self.cursor_line -= 1
self.view.set_refresh_after()
self.change_column(-1)
def delete(self):
""" Manage the delete key """
self.modified = True
if self.remove_selection() is False:
line = self.lines[self.cursor_line]
if self.cursor_column < len(line):
# If the line is empty
if line[self.cursor_column] == "\n":
# If the cursor not at end of files
if self.cursor_line < len(self.lines)-1:
# Copy the next line to the current line
self.lines[self.cursor_line] = line[:self.cursor_column] + self.lines[self.cursor_line+1]
del self.lines[self.cursor_line+1]
self.view.scroll_part_up()
self.view.set_refresh_after()
# Else the char is deleted in the middle of line
else:
line = line[0:self.cursor_column:]+ line[self.cursor_column+1 : :]
self.lines[self.cursor_line] = line
self.change_column(0)
self.view.is_refresh_line = True
def delete_line(self):
""" Manage the delete of line key """
self.hide_selection()
self.modified = True
# If file contains one or none line
if len(self.lines) <= 1:
# Clean the content of file
self.lines = [""]
self.cursor_column = 0
self.cursor_line = 0
self.change_column(0)
# If the current line is not the last of file
elif self.cursor_line < len(self.lines):
# Delete the line
self.cursor_column = 0
del self.lines[self.cursor_line]
self.view.scroll_part_up()
if self.cursor_line >= len(self.lines):
self.cursor_line = len(self.lines)-1
self.change_column(0)
self.view.set_refresh_after()
def new_line(self):
""" Manage the newline key """
self.modified = True
if self.remove_selection() is False:
line1 = self.lines[self.cursor_line][:self.cursor_column]+"\n"
line2 = self.lines[self.cursor_line][self.cursor_column:]
self.lines[self.cursor_line]=line1
self.lines.insert(self.cursor_line+1, line2)
self.view.scroll_part_down()
self.change_column(1)
self.view.set_refresh_before()
def insert_char(self, char):
""" Insert character """
self.modified = True
self.lines[self.cursor_line] = self.lines[self.cursor_line][:self.cursor_column] + char + self.lines[self.cursor_line][self.cursor_column:]
self.change_column(1)
self.view.set_refresh_line()
def replace_char(self, char):
""" Replace character """
self.modified = True
if self.cursor_line == len(self.lines)-1 and self.cursor_column >= len(self.lines[self.cursor_line])-1:
self.lines[self.cursor_line] = self.lines[self.cursor_line][:self.cursor_column] + char
self.change_column(1)
self.view.set_refresh_line()
# If it is the last char in the line
elif self.lines[self.cursor_line][self.cursor_column] == "\n":
# Append char to the line
self.insert_char(char)
# Else the char must be replaced in the line
else:
self.lines[self.cursor_line] = self.lines[self.cursor_line][:self.cursor_column] + char + self.lines[self.cursor_line][self.cursor_column+1:]
self.change_column(1)
self.view.set_refresh_line()
def open_selection(self):
""" Start a selection """
if self.selection_start is None:
self.selection_start = [self.cursor_column, self.cursor_line, self.get_tab_cursor(self.cursor_line)]
def close_selection(self):
""" Terminate selection """
if self.selection_start is not None:
self.selection_end = [self.cursor_column, self.cursor_line,self.get_tab_cursor(self.cursor_line)]
def select_all(self):
""" Do a select all """
self.selection_start = [0,0,0]
lastLine = len(self.lines)-1
lastColumn = len(self.lines[lastLine])-1
self.move_cursor(lastLine, lastColumn)
self.selection_end = [lastColumn, lastLine, self.get_tab_cursor(lastLine, lastColumn)]
self.view.set_refresh_all()
def get_selection(self):
""" Get information about selection """
if self.selection_start:
if self.selection_start[1] > self.selection_end[1]:
return self.selection_end, self.selection_start
elif self.selection_start[1] < self.selection_end[1]:
return self.selection_start, self.selection_end
elif self.selection_start[0] < self.selection_end[0]:
return self.selection_start, self.selection_end
else:
return self.selection_end, self.selection_start
else:
return None, None
def arrow_up(self, keys):
""" Manage arrow up key """
self.hide_selection()
self.change_line(-1)
def arrow_down(self, keys):
""" Manage arrow down key """
self.hide_selection()
self.change_line(1)
def arrow_left(self, keys):
""" Manage arrow left key """
self.hide_selection()
self.change_column(-len(keys))
def arrow_right(self, keys):
""" Manage arrow right key """
self.hide_selection()
self.change_column(len(keys))
def select_up(self, keys):
""" Manage select up key """
self.open_selection()
self.change_line(-1)
def select_down(self, keys):
""" Manage select down key """
self.open_selection()
self.change_line(1)
def select_left(self, keys):
""" Manage select left key """
self.open_selection()
self.change_column(-len(keys))
def select_right(self, keys):
""" Manage select right key """
self.open_selection()
self.change_column(len(keys))
def select_home(self):
""" Manage home key """
self.open_selection()
self.change_column(-100000000000)
def select_end(self):
""" Manage end key """
self.open_selection()
self.change_column(100000000000)
def select_page_up(self, keys):
""" Manage select page up key """
self.open_selection()
self.change_line((-self.view.height-1) * len(keys))
self.change_column(-100000000000)
def select_page_down(self, keys):
""" Manage select page down key """
self.open_selection()
self.change_line((self.view.height+1) * len(keys))
self.change_column(100000000000)
def select_next_word(self):
""" Manage select next word key """
self.open_selection()
self.move_word(1)
def select_previous_word(self):
""" Manage select previous word key """
self.open_selection()
self.move_word(-1)
def select_top(self):
""" Manage select to the first line of text """
self.open_selection()
self.change_line(-100000000000)
def select_bottom(self):
""" Manage select to the last line of text """
self.open_selection()
self.change_line(100000000000)
def page_up(self, keys):
""" Manage page up key """
self.hide_selection()
self.change_line((-self.view.height-1) * len(keys))
def page_down(self, keys):
""" Manage page down key """
self.hide_selection()
self.change_line((self.view.height+1) * len(keys))
def home(self):
""" Manage home key """
self.hide_selection()
self.change_column(-100000000000)
def end(self):
""" Manage end key """
self.hide_selection()
self.change_column(100000000000)
def add_char(self, keys):
""" Manage other key, add character """
result = False
if useful.isascii(keys[0]):
self.remove_selection()
for char in keys:
if useful.isascii(char):
if self.replace_mode:
self.replace_char(char)
else:
self.insert_char(char)
result = True
# if result is False:
# print(useful.dump(keys[0]))
return result
def find_next(self, text):
""" Find next researched text """
# Get the selection
selection_start, selection_end = self.get_selection()
# Hide the selection
self.hide_selection()
# Set the start of search at the cursor position
current_line = self.cursor_line
current_column = self.cursor_column
# If selection activated
if selection_start is not None and selection_end is not None:
# If selection is on one line
if selection_start[1] == selection_end[1] and current_line == selection_start[1]:
# If selection is exactly the size of text
if selection_start[0] == current_column:
# Move the start of search after the text selected
current_column = selection_end[0]
# Find the text in next lines
while current_line < len(self.lines):
# Search text
pos = self.lines[current_line].find(text, current_column)
# If text found
if pos >= 0:
# Move the cursor to the text found
self.cursor_line = current_line
self.cursor_column = pos + len(text)
self.change_column(0)
self.selection_start = [pos, current_line,self.get_tab_cursor(current_line,pos)]
self.selection_end = [pos + len(text), current_line, self.get_tab_cursor(current_line, pos + len(text))]
break
else:
# Set the search position at the begin of next line
current_column = 0
current_line += 1
self.view.move()
def find_previous(self, text):
""" Find previous researched text """
# Get the selection
selection_start, selection_end = self.get_selection()
# Hide the selection
self.hide_selection()
# Set the start of search at the cursor position
current_line = self.cursor_line
current_column = self.cursor_column
# If selection activated
if selection_start is not None and selection_end is not None:
# If selection is on one line
if selection_start[1] == selection_end[1] and current_line == selection_start[1]:
# If selection is exactly the size of text
if selection_end[0] - selection_start[0] == len(text):
# Move the start of search before the text selected
current_column = selection_start[0]
# While the line before the first line not reached
while current_line >= 0:
# Get the current line
line = self.lines[current_line]
# If the current column is negative
if current_column < 0:
# Set the end of line
current_column = len(line)
# Search the text in reverse
pos = line.rfind(text, 0, current_column)
# If text found
if pos >= 0:
self.cursor_line = current_line
self.cursor_column = pos
self.change_column(0)
self.selection_start = [pos, current_line,self.get_tab_cursor(current_line,pos)]
self.selection_end = [pos + len(text), current_line, self.get_tab_cursor(current_line, pos + len(text))]
break
else:
# Set the search position at the end of line
current_column = -1
current_line -= 1
self.view.move()
def hide_selection(self):
""" Hide selection """
self.view.hide_selection()
self.selection_start = self.selection_end = None
def goto(self, lineNumber):
""" Goto specified line """
self.hide_selection()
if lineNumber < 0:
self.cursor_line = len(self.lines)-1
elif lineNumber < 1:
self.cursor_line = 1
elif lineNumber < len(self.lines):
self.cursor_line = lineNumber - 1
else:
self.cursor_line = len(self.lines)-1
self.cursor_column = 0
self.change_column(0)
self.view.move()
def copy_clipboard(self):
""" Copy selection to clipboard """
result = []
if self.selection_start is not None:
selection_start, selection_end = self.get_selection()
sel_column_start, sel_line_start, dummy = selection_start
sel_column_end, sel_line_end, dummy = selection_end
result = []
if sel_line_start == sel_line_end:
result.append(self.lines[sel_line_start][sel_column_start:sel_column_end])
else:
for line in range(sel_line_start, sel_line_end+1):
if line == sel_line_start:
part = self.lines[line][sel_column_start:]
if part != "":
result.append(self.lines[line][sel_column_start:])
elif line == sel_line_end:
part = self.lines[line][:sel_column_end]
if part != "":
result.append(self.lines[line][:sel_column_end])
else:
result.append(self.lines[line])
return result
def remove_selection(self):
""" Remove selection """
if self.selection_start is not None:
self.modified = True
selection_start, selection_end = self.get_selection()
sel_column_start, sel_line_start, dummy = selection_start
sel_column_end, sel_line_end, dummy = selection_end
start = self.lines[sel_line_start][:sel_column_start]
end = self.lines[sel_line_end ][sel_column_end:]
self.lines[sel_line_start] = start + end
if sel_line_start < sel_line_end:
for line in range(sel_line_end, sel_line_start,-1):
del self.lines[line]
self.move_cursor(sel_line_start, sel_column_start)
self.hide_selection()
self.view.set_refresh_all()
return True
return False
def paste_clipboard(self, selection):
""" Paste clipboard at the cursor position """
if selection != []:
# Split the line with insertion
start = self.lines[self.cursor_line][:self.cursor_column]
end = self.lines[self.cursor_line][self.cursor_column:]
# Paste the first line
self.lines[self.cursor_line] = start + selection[0]
self.cursor_line += 1
# Insert all lines from clipboard
for line in selection[1:-1]:
self.lines.insert(self.cursor_line, line)
self.cursor_line += 1
# If the last line of clipboard is not empty
if len(selection[-1]) >= 1:
# If the last line of clipboard contains new line
if selection[-1][-1] == "\n":
if len(selection) > 1:
# Add the new line
self.lines.insert(self.cursor_line, selection[-1])
self.cursor_line += 1
# Add the part after the insertion
self.lines.insert(self.cursor_line, end)
self.cursor_column = 0
else:
if len(selection) > 1:
self.lines.insert(self.cursor_line, selection[-1] + end)
self.cursor_column = len(selection[-1])
else:
self.cursor_line -= 1
self.lines[self.cursor_line] += end
self.cursor_column = len(start) + len(selection[-1])
self.move_cursor(self.cursor_line, self.cursor_column)
def move_cursor(self, line, column):
""" Move the cursor """
self.cursor_line = line
self.cursor_column = column
self.change_column(0)
self.get_tab_cursor_column()
def copy(self):
""" Manage copy key """
self.selection = self.copy_clipboard()
def cut(self):
""" Manage cut key """
self.modified = True
self.selection = self.copy_clipboard()
self.remove_selection()
def paste(self):
""" Manage paste key """
self.modified = True
self.remove_selection()
self.paste_clipboard(self.selection)
self.view.set_refresh_all()
self.hide_selection()
def change_case(self):
""" Change the case of selection """
selection = self.copy_clipboard()
if selection != []:
self.modified = True
selection_start = self.selection_start
selection_end = self.selection_end
self.remove_selection()
isUpper = None
for line in selection:
for char in line:
if useful.isupper(char):
isUpper = True
break
elif useful.islower(char):
isUpper = False
break
if isUpper is not None:
break
# pylint:disable=consider-using-enumerate
for line in range(len(selection)):
if isUpper:
selection[line] = selection[line].lower()
else:
selection[line] = selection[line].upper()
self.paste_clipboard(selection)
self.view.set_refresh_selection()
self.selection_start = selection_start
self.selection_end = selection_end
def comment(self):
""" Comment the selection """
self.modified = True
# If selection
if self.selection_start is not None:
selection_start, selection_end = self.get_selection()
_, sel_line_start, _ = selection_start
_, sel_line_end, _ = selection_end
# Add tabulation
for line in range(sel_line_start, sel_line_end+1):
if len(self.lines[line]) >= 1:
if self.lines[line][0] != '#':
self.lines[line] = "#" + self.lines[line]
else:
if len(self.lines[line]) >= 1:
self.lines[line] = self.lines[line][1:]
# Move the start selection to the start of first selected line
self.selection_start = [0,sel_line_start, 0]
# Get the length of last selected line
len_line_end = len(self.lines[sel_line_end])
# Move the end of selection at the end of line selected
self.selection_end = [len_line_end-1, sel_line_end, self.get_tab_cursor(sel_line_end,len_line_end-1)]
self.view.set_refresh_selection()
else:
if len(self.lines[self.cursor_line]) >= 1:
# If nothing selected
if self.lines[self.cursor_line][0] == "#":
self.lines[self.cursor_line] = self.lines[self.cursor_line][1:]
if self.cursor_column > 0:
self.change_column(-1)
else:
self.lines[self.cursor_line] = "#" + self.lines[self.cursor_line]
self.change_column(1)
self.view.set_refresh_line()
def indent(self, keys):
""" Manage tabulation key """
# If nothing selected
if self.selection_start is None:
self.add_char(keys)
else:
self.modified = True
# Indent selection
selection_start, selection_end = self.get_selection()
sel_column_start, sel_line_start, dummy = selection_start
sel_column_end, sel_line_end, dummy = selection_end
# If a part of line selected
if sel_line_start == sel_line_end and not (sel_column_start == 0 and sel_column_end == len(self.lines[sel_line_end])-1):
self.add_char(INDENT)
else:
# If the last line selected is at beginning of line
if sel_column_end == 0:
# This line must not be indented
sel_line_end -= 1
# Add tabulation
for line in range(sel_line_start, sel_line_end+1):
self.lines[line] = "\t" + self.lines[line]
# Move the start selection to the start of first selected line
self.selection_start = [0,sel_line_start, 0]
# If the last line selected is not at beginning of line
if sel_column_end > 0:
# Get the length of last selected line
len_line_end = len(self.lines[sel_line_end])
# If the end of selection is not on the last line
if sel_line_end < len(self.lines)-1:
len_line_end -= 1
# Move the end of selection at the end of line selected
self.selection_end = [len_line_end, sel_line_end, self.get_tab_cursor(sel_line_end,len_line_end)]
else:
# Move the end of selection at the start of the last line selected
self.selection_end = [0, sel_line_end+1, 0]
self.view.set_refresh_selection()
def unindent(self, keys):
""" Manage the unindentation key """
# If nothing selected
if self.selection_start is None:
self.backspace()
else:
self.modified = True
# Unindent selection
selection_start, selection_end = self.get_selection()
sel_column_start, sel_line_start, dummy = selection_start
sel_column_end, sel_line_end, dummy = selection_end
# If the selection is only alone line
if sel_line_start == sel_line_end:
self.hide_selection()
else:
# If the last line selected is at beginning of line
if sel_column_end == 0:
# This line must not be indented
sel_line_end -= 1
# Remove indentation
for line in range(sel_line_start, sel_line_end+1):
if len(self.lines[line]) >= 1:
if self.lines[line][0] == "\t" or self.lines[line][0] == " ":
self.lines[line] = self.lines[line][1:]
# Move the start selection to the start of first selected line
self.selection_start = [0,sel_line_start, 0]
# If the last line selected is not at beginning of line
if sel_column_end > 0:
# Get the length of last selected line
len_line_end = len(self.lines[sel_line_end])
# If the end of selection is not on the last line
if sel_line_end < len(self.lines)-1:
len_line_end -= 1
# Move the end of selection at the end of line selected
self.selection_end = [len_line_end, sel_line_end, self.get_tab_cursor(sel_line_end,len_line_end)]
else:
# Move the end of selection at the start of the last line selected
self.selection_end = [0, sel_line_end+1, 0]
self.view.set_refresh_selection()
def replace(self, old, new):
""" Replace the selection """
if self.read_only is False:
selection = self.copy_clipboard()
if len(selection) == 1:
if selection[0] == old:
self.delete()
self.insert_char(new)
return True
return False
def get_cursor_char(self):
""" Get the char on the cursor """
try:
return self.lines[self.cursor_line][self.cursor_column]
except:
return None
def move_word(self, direction):
""" Move the cursor to the word """
state = 0
while self.change_column(direction):
current_char = self.get_cursor_char()
if current_char is None:
break
elif useful.ispunctuation(current_char):
if state == 0:
state = 2
elif state == 1:
break
elif useful.isalpha(current_char):
if state == 0:
state = 1
elif state == 2:
break
elif useful.isspace(current_char):
if state == 1:
break
if state == 2:
break
def next_word(self):
""" Move the cursor to the next word """
self.hide_selection()
self.move_word(1)
self.view.move()
def previous_word(self):
""" Move the cursor to the previous word """
self.hide_selection()
self.move_word(-1)
self.view.move()
def top(self):
""" Move the cursor to the first line of text """
self.goto(1)
def bottom(self):
""" Move the cursor to the last line of text """
self.goto(100000000000)
def treat_char(self, keys):
""" Treat character entered """
char = ord(keys[0][0])
if self.read_only is False:
if char >= 0x20 and char != 0x7F:
self.add_char(keys)
return True
return False
def treat_key(self, keys):
""" Treat keys """
if self.treat_char(keys) is False:
# Move in the edit field
if keys[0] in UP : self.arrow_up(keys)
elif keys[0] in DOWN: self.arrow_down(keys)
elif keys[0] in LEFT: self.arrow_left(keys)
elif keys[0] in RIGHT: self.arrow_right(keys)
elif keys[0] in HOME: self.home()
elif keys[0] in END: self.end()
elif keys[0] in PAGE_UP: self.page_up(keys)
elif keys[0] in PAGE_DOWN: self.page_down(keys)
elif keys[0] in TOP: self.top()
elif keys[0] in BOTTOM: self.bottom()
elif keys[0] in NEXT_WORD: self.next_word()
elif keys[0] in PREVIOUS_WORD: self.previous_word()
# Selection the edit field
elif keys[0] in SELECT_UP: self.select_up(keys)
elif keys[0] in SELECT_DOWN: self.select_down(keys)
elif keys[0] in SELECT_RIGHT: self.select_right(keys)
elif keys[0] in SELECT_LEFT: self.select_left(keys)
elif keys[0] in SELECT_HOME: self.select_home()
elif keys[0] in SELECT_END: self.select_end()
elif keys[0] in SELECT_TOP: self.select_top()
elif keys[0] in SELECT_BOTTOM: self.select_bottom()
elif keys[0] in SELECT_PAGE_UP: self.select_page_up(keys)
elif keys[0] in SELECT_PAGE_DOWN:self.select_page_down(keys)
elif keys[0] in SELECT_ALL: self.select_all()
elif keys[0] in SELECT_NEXT_WORD:self.select_next_word()
elif keys[0] in SELECT_PREV_WORD:self.select_previous_word()
# If the edit is not in read only
elif self.read_only is False:
# Modification in the edit field
if keys[0] in COPY: self.copy()
elif keys[0] in CUT: self.cut()
elif keys[0] in PASTE: self.paste()
elif keys[0] in INDENT: self.indent(keys)
elif keys[0] in UNINDENT: self.unindent(keys)
elif keys[0] in CHANGE_CASE: self.change_case()
elif keys[0] in COMMENT: self.comment()
elif keys[0] in BACKSPACE: self.backspace()
elif keys[0] in DELETE: self.delete()
elif keys[0] in NEW_LINE: self.new_line()
elif keys[0] in DELETE_LINE: self.delete_line()
# else: self.add_char(keys)
class Edit:
""" Class which aggregate the View and Text """
def __init__(self, view_top=1, view_height=None, read_only=False):
""" Constructor """
self.view = View(view_height, view_top)
self.text = Text(read_only)
self.text.set_view(self.view)
self.view.set_text(self.text)
class Editor:
""" Class which manage a complete editor """
def __init__(self, filename_, read_only=False):
""" Constructor """
self.file = filename_
self.filename = useful.split(filename_)[1]
self.edit = Edit(read_only=read_only)
self.edit.text.load(filename_)
self.is_refresh_header = True
self.find_text = None
self.replace_text = None
self.keys= []
self.loop = None
if (not useful.exists(filename_) and read_only is True) or useful.isdir(filename_):
print("Cannot open '%s'"%self.filename)
else:
self.run()
def refresh_header(self):
""" Refresh the header of editor """
if self.is_refresh_header:
self.edit.view.move_cursor(0, 0)
filename_ = "File: %s"%(self.filename)
if self.edit.text.read_only is False:
filename_ += " (*)" if self.edit.text.modified else ""
end = "Mode: %s"%("Replace" if self.edit.text.replace_mode else "Insert")
else:
end = "Read only" if self.edit.text.read_only else ""
header = "\x1B[7m %s%s%s \x1B[m"%(filename_, " "*(self.edit.view.width - len(filename_) - len(end)-2), end)
self.edit.view.write(header)
self.edit.view.move_cursor()
self.is_refresh_header = False
def refresh(self):
""" Refresh the editor """
self.refresh_header()
self.edit.view.refresh()
def toggle_mode(self):
""" Change the replace mode """
if self.edit.text.replace_mode:
self.edit.text.replace_mode = False
else:
self.edit.text.replace_mode = True
self.is_refresh_header = True
def save(self):
""" Save the file edited """
self.edit.text.save()
self.is_refresh_header = True
def exit(self):
""" Exit from editor """
self.edit.view.cls()
if self.edit.text.modified:
self.edit.view.write("\nSave file '%s' (\x1b[7mY\x1b[m:Yes, \x1b[7mN\x1b[m:No, \x1b[7mEsc\x1b[m:Cancel) : "%self.filename)
self.edit.view.flush()
while 1:
key = useful.getch()
if key == "Y" or key == "y":
if self.edit.text.save():
self.edit.view.write("Saved\n")
self.edit.view.flush()
else:
self.edit.view.write("Failed to save\n")
self.edit.view.flush()
self.loop = False
break
elif key == "N" or key == "n":
self.edit.view.write("Not saved\n")
self.edit.view.flush()
self.loop = False
break
elif key == ESCAPE:
self.edit.view.set_refresh_all()
self.is_refresh_header = True
break
else:
self.loop = False
def input(self, text, help_=""):
""" Input value, used to get a line number, or text searched """
edit_ = Edit(view_top=2, view_height=1, read_only=False)
edit_.view.cls()
edit_.view.move_cursor(1,0)
edit_.view.write(text)
edit_.view.move_cursor(4,0)
edit_.view.write(help_)
result = None
while 1:
edit_.view.refresh()
key = self.get_key()
if key[0] in NEW_LINE:
result = edit_.text.lines[0]
break
elif key[0] in ESCAPE:
break
else:
edit_.text.treat_key(key)
return result
def find(self):
""" Find a text """
self.find_text = self.input("Find :","\x1B[7mEsc\x1B[m:Abort \x1B[7m^Left\x1B[m,\x1B[7m^Up\x1B[m:Previous \x1B[7m^Down\x1B[m,\x1B[7m^Right\x1B[m:Next")
self.find_next()
self.edit.view.set_refresh_all()
self.is_refresh_header = True
def replace(self):
""" Replace a text """
self.find_text = self.input("Find to replace :","\x1B[7mEsc\x1B[m:Abort")
if self.find_text:
self.replace_text = self.input("Replace with :","\x1B[7mEsc\x1B[m:Abort \x1B[7m^Left\x1B[m,\x1B[7m^Up\x1B[m:Previous \x1B[7m^Down\x1B[m,\x1B[7m^Right\x1B[m:Next \x1B[7m^R\x1B[m:Replace")
self.find_next()
self.edit.view.set_refresh_all()
self.is_refresh_header = True
def replace_current(self):
""" Replace current """
if self.find_text and self.replace_text:
if self.edit.text.replace(self.find_text, self.replace_text):
self.find_next()
def find_next(self):
""" Find next text """
if self.find_text:
self.edit.text.find_next(self.find_text)
def find_previous(self):
""" Find previous text """
if self.find_text:
self.edit.text.find_previous(self.find_text)
def goto(self):
""" Goto line """
lineNumber = self.input("Goto line :","\x1B[7mEsc\x1B[m:Abort")
try:
lineNumber = int(lineNumber)
self.edit.text.goto(int(lineNumber))
except:
pass
self.edit.view.set_refresh_all()
self.is_refresh_header = True
def group_key(self):
""" Group similar key to optimize move of cursor and edition """
result = [self.keys.pop(0)]
while len(self.keys) > 0 and len(result) <= 10:
if self.keys[0] == result[0]:
result.append(self.keys.pop(0))
else:
if useful.isascii(result[0]) and useful.isascii(self.keys[0]):
result.append(self.keys.pop(0))
else:
break
return result
def get_key(self):
""" Get a key pressed """
if len(self.keys) == 0:
while True:
try:
key = useful.getch()
except KeyboardInterrupt:
key = "\x03"
self.keys.append(key)
if useful.kbhit() is False or len(self.keys) > 5:
break
return self.group_key()
def execute(self):
""" Execute the python script edited """
self.save()
loop = True
while loop:
self.edit.view.reset_scroll_region()
self.edit.view.cls()
self.edit.view.flush()
startTime = useful.ticks()
try:
useful.log(None)
useful.import_(self.filename)
except KeyboardInterrupt:
pass
endTime = useful.ticks()
print( "\x1B[7mTime: %d.%03d s Press enter to stop\x1B[m"%((endTime-startTime)/1000, (endTime-startTime)%1000))
while 1:
keys = self.get_key()
if keys[0] in NEW_LINE:
loop = False
break
elif keys[0] in EXECUTE:
break
# else:
# print(useful.dump(keys[0]))
self.edit.view.cls()
self.edit.view.set_refresh_all()
self.is_refresh_header = True
def run(self):
""" Core of the editor """
self.edit.view.cls()
self.edit.view.get_screen_size()
self.loop = True
while(self.loop):
try:
self.refresh()
keys = self.get_key()
modified = self.edit.text.modified
if ord(keys[0][0]) < 0x20:
if keys[0] in TOGGLE_MODE: self.toggle_mode()
elif keys[0] in FIND: self.find()
elif keys[0] in REPLACE: self.replace()
elif keys[0] in FIND_PREVIOUS: self.find_previous()
elif keys[0] in FIND_NEXT: self.find_next()
elif keys[0] in REPLACE_CURRENT:self.replace_current()
elif keys[0] in EXIT: self.exit()
elif keys[0] in GOTO: self.goto()
elif keys[0] in SAVE: self.save()
elif keys[0] in EXECUTE: self.execute()
self.edit.text.treat_key(keys)
if modified != self.edit.text.modified:
self.is_refresh_header = True
except KeyboardInterrupt:
pass
self.edit.view.reset_scroll_region()
self.edit.view.reset()
if __name__ == "__main__":
if len(sys.argv) > 1:
filename = sys.argv[1]
else:
filename = "editor.txt"
edit = Editor(filename, read_only=False)
| StarcoderdataPython |
3309964 | """Routes configuration
The more specific and detailed routes should be defined first so they
may take precedent over the more generic routes. For more information
refer to the routes manual at http://routes.groovie.org/docs/
"""
from routes import Mapper
def make_map(config):
"""Create, configure and return the routes Mapper"""
map = Mapper(directory=config['pylons.paths']['controllers'],
always_scan=config['debug'])
map.minimization = False
map.explicit = False
# The ErrorController route (handles 404/500 error pages); it should
# likely stay at the top, ensuring it can always be resolved
map.connect('/error/{action}', controller='error')
map.connect('/error/{action}/{id}', controller='error')
# CUSTOM ROUTES HERE
map.redirect('/*(url)^(/)', '/{url}/', _redirect_code='302 Moved Temporarily')
map.connect('/', controller='home', action='index')
map.connect('/trade/{id}/', controller='trade', action='index')
map.connect('/bet/{betID}/', controller='bet', action='index')
map.connect('/bet/{betID}/switch/', controller='bet', action='switch')
map.connect('/manage/leagues/add/', controller='manage', action='addLeague')
map.connect('/manage/leagues/edit/{id}/', controller='manage', action='editLeague')
map.connect('/manage/leagues/remove/{id}/', controller='manage', action='removeLeague')
map.connect('/manage/teams/', controller='manage', action='teamsLeagues')
map.connect('/manage/teams/{leagueID}/', controller='manage', action='teamsList')
map.connect('/manage/teams/{leagueID}/add/', controller='manage', action='addTeam')
map.connect('/manage/teams/{leagueID}/remove/{teamID}/', controller='manage', action='removeTeam')
map.connect('/manage/teams/{leagueID}/edit/{teamID}/', controller='manage', action='editTeam')
map.connect('/manage/matches/', controller='manage', action='matchesLeagues')
map.connect('/manage/matches/{leagueID}/', controller='manage', action='matchesList')
map.connect('/manage/matches/{leagueID}/add/', controller='manage', action='addMatch')
map.connect('/manage/matches/{leagueID}/remove/{matchID}/', controller='manage', action='removeMatch')
map.connect('/manage/matches/{leagueID}/edit/{matchID}/', controller='manage', action='editMatch')
map.connect('/manage/users/', controller='manage', action='users')
map.connect('/manage/users/{userID}/', controller='manage', action='user')
map.connect('/api/users/name/{name}/', controller='api', action='users')
map.connect('/api/users/steamid/{steamid}/', controller='api', action='users')
map.connect('/api/users/name/{name}/limit/{limit}/', controller='api', action='users')
map.connect('/api/users/steamid/{steamid}/limit/{limit}/', controller='api', action='users')
map.connect('/api/bet/{betID}/bets/', controller='api', action='bets')
map.connect('/api/bet/{betID}/bets/offset/{offset}/', controller='api', action='bets')
map.connect('/api/bet/{betID}/bets/limit/{limit}/', controller='api', action='bets')
map.connect('/api/bet/{betID}/bets/limit/{limit}/offset/{offset}/', controller='api', action='bets')
map.connect('/api/bet/{betID}/bets/offset/{offset}/limit/{limit}/', controller='api', action='bets')
map.connect('/api/refreshsession/', controller='api', action='refreshSession')
map.connect('/{controller}/', action='index')
map.connect('/{controller}/{action}/')
map.connect('/{controller}/{action}/{id}')
return map
| StarcoderdataPython |
47332 | <reponame>Akash1S/meethub
# Generated by Django 2.0.4 on 2018-05-28 20:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('events', '0009_auto_20180428_0845'),
]
operations = [
migrations.RemoveField(
model_name='comment',
name='created_by',
),
migrations.RemoveField(
model_name='comment',
name='event',
),
migrations.DeleteModel(
name='Comment',
),
]
| StarcoderdataPython |
3201603 | import tensorflow as tf
import rlcard
from rlcard.agents.dqn_agent import DQNAgent
from rlcard.utils.utils import set_global_seed
from rlcard.utils.logger import Logger
# Make environment
# Set the the number of steps for collecting normalization statistics
# and intial memory size
memory_init_size = 1000
norm_step = 100
# The paths for saving the logs and learning curves
## Set a global seed
##set_global_seed(0)
'''
class Dqnmodel is a packed uno agent which is trained and initiallized and ready to be used directly in unogame.
./experiments/uno_dqn_result/models/model1.ckpt is where I stored the trained model, if you want use your own model, please change the path to your own model.
'''
class DqnModel:
def __init__(self):
env=rlcard.make('uno')
self.sess1= tf.compat.v1.Session()
global_step = tf.Variable(0, name='global_step', trainable=False)
self.agent = DQNAgent(self.sess1,
scope='dqn',
action_num=env.action_num,
replay_memory_init_size=memory_init_size,
norm_step=norm_step,
state_shape=env.state_shape,
mlp_layers=[100,100])
self.sess1.run(tf.global_variables_initializer())
self.saver=tf.train.Saver()
self.saver.restore(self.sess1,'./experiments/uno_dqn_result/models/model1.ckpt')
| StarcoderdataPython |
3384348 | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 18 11:42:58 2016
@author: utkarsh
"""
import numpy as np
import sys
from FingerprintImageEnhancer import FingerprintImageEnhancer
import cv2
# if __name__ == '__main__':
# image_enhancer = FingerprintImageEnhancer() # Create object called image_enhancer
# if(len(sys.argv)<2): # load input image
# print('loading sample image');
# img_name = '2.jpg'
# img = cv2.imread('../images/' + img_name)
# elif(len(sys.argv) >= 2):
# img_name = sys.argv[1];
# img = cv2.imread('../images/' + img_name)
# if(len(img.shape)>2): # convert image into gray if necessary
# img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# out = image_enhancer.enhance(img) # run image enhancer
# image_enhancer.save_enhanced_image('../enhanced/' + img_name) # save output
if __name__ == '__main__':
img_path = 'D:/PR/ex4/FingerprintFeatureExtractionAndDescription-master/DB3_B/102_7.tif'
# img = cv2.imdecode(np.fromfile(img_path, dtype=np.uint8), cv2.IMREAD_COLOR)
img = cv2.imread(img_path)
if(len(img.shape)>2): # convert image into gray if necessary
img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
image_enhancer = FingerprintImageEnhancer()
out = image_enhancer.enhance(img)
out
| StarcoderdataPython |
3341768 | <reponame>mrbot-ai/deep_qa
from .babi_instance import BabiInstance, IndexedBabiInstance
from .multiple_true_false_instance import MultipleTrueFalseInstance, IndexedMultipleTrueFalseInstance
from .multiple_true_false_instance import convert_dataset_to_multiple_true_false
from .question_answer_instance import QuestionAnswerInstance, IndexedQuestionAnswerInstance
from .tuple_inference_instance import TupleInferenceInstance, IndexedTupleInferenceInstance
| StarcoderdataPython |
3221901 | from setuptools import setup
from setuptools.command.install import install
import subprocess
class NPMInstall(install):
"""
NPMInstall installs the packages in `package.json`
which are `topojson-client` and `topojson-server`
used to convert between geojson and topojson
"""
def run(self):
print("Installing NodeJS prerequisites")
st,r = subprocess.getstatusoutput("npm")
if st == 1:
# It is expected to have npm installed
subprocess.run(["npm","install"])
# subprocess.run(["npm","--global","install"])
else:
print("You have to install package.json manually")
install.run(self)
setup(name='gis_utils',
cmdclass={
'install' : NPMInstall,
},
version='0.0.1',
description='Convert between Shapefile, GeoJSON and TopoJSON',
url='https://github.com/Daniel-M/gis_utils',
author='Daniel-M',
author_email='<EMAIL>',
license='MIT',
packages=['gis_utils'],
install_requires=[
'pyshp',
'geojson'
],
python_requires='>=3',
zip_safe=False)
| StarcoderdataPython |
3235997 | <reponame>ubcgif/notebooks<gh_stars>0
import sys
sys.path.append("./simpeg")
sys.path.append("./simpegdc/")
import warnings
warnings.filterwarnings('ignore')
from SimPEG import Mesh, Maps
import numpy as np
import simpegDCIP as DC
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import warnings
warnings.filterwarnings('ignore') # ignore warnings: only use this once you are sure things are working
try:
from IPython.html.widgets import interact, IntSlider, FloatSlider, FloatText, ToggleButtons
pass
except Exception, e:
from ipywidgets import interact, IntSlider, FloatSlider, FloatText, ToggleButtons
# Mesh, mapping can be globals global
npad = 8
cs = 1.
hx = [(cs,npad, -1.3),(cs,100),(cs,npad, 1.3)]
hy = [(cs,npad, -1.3),(cs,50)]
mesh = Mesh.TensorMesh([hx, hy], "CN")
circmap = Maps.CircleMap(mesh)
circmap.slope = 1e5
mapping = circmap
dx = 5
xr = np.arange(-40,41,dx)
dxr = np.diff(xr)
def DC2Dsurvey(flag="PoleDipole"):
if flag =="PoleDipole":
ntx, nmax = xr.size-2, 8
elif flag =="DipolePole":
ntx, nmax = xr.size-2, 8
elif flag =="DipoleDipole":
ntx, nmax = xr.size-3, 8
else:
raise Exception('Not Implemented')
xzlocs = getPseudoLocs(xr, ntx, nmax, flag)
txList = []
zloc = -2.5
for i in range(ntx):
if flag == "PoleDipole":
A = np.r_[xr[i], zloc]
B = np.r_[mesh.vectorCCx.min(), zloc]
if i < ntx-nmax+1:
M = np.c_[xr[i+1:i+1+nmax], np.ones(nmax)*zloc]
N = np.c_[xr[i+2:i+2+nmax], np.ones(nmax)*zloc]
else:
M = np.c_[xr[i+1:ntx+1], np.ones(ntx-i)*zloc]
N = np.c_[xr[i+2:i+2+nmax], np.ones(ntx-i)*zloc]
elif flag =="DipolePole":
A = np.r_[xr[i], zloc]
B = np.r_[xr[i+1], zloc]
if i < ntx-nmax+1:
M = np.c_[xr[i+2:i+2+nmax], np.ones(nmax)*zloc]
N = np.c_[np.ones(nmax)*mesh.vectorCCx.max(), np.ones(nmax)*zloc]
else:
M = np.c_[xr[i+2:ntx+2], np.ones(ntx-i)*zloc]
N = np.c_[np.ones(ntx-i)*mesh.vectorCCx.max(), np.ones(ntx-i)*zloc]
elif flag =="DipoleDipole":
A = np.r_[xr[i], zloc]
B = np.r_[xr[i+1], zloc]
if i < ntx-nmax:
M = np.c_[xr[i+2:i+2+nmax], np.ones(len(xr[i+2:i+2+nmax]))*zloc]
N = np.c_[xr[i+3:i+3+nmax], np.ones(len(xr[i+3:i+3+nmax]))*zloc]
else:
M = np.c_[xr[i+2:len(xr)-1], np.ones(len(xr[i+2:len(xr)-1]))*zloc]
N = np.c_[xr[i+3:len(xr)], np.ones(len(xr[i+3:len(xr)]))*zloc]
rx = DC.RxDipole(M, N)
src = DC.SrcDipole([rx], A, B)
txList.append(src)
survey = DC.SurveyDC(txList)
problem = DC.ProblemDC_CC(mesh, mapping = mapping)
problem.pair(survey)
sigblk, sighalf = 2e-2, 2e-3
xc, yc, r = -15, -8, 4
mtrue = np.r_[np.log(sigblk), np.log(sighalf), xc, yc, r]
dtrue = survey.dpred(mtrue)
perc = 0.1
floor = np.linalg.norm(dtrue)*1e-3
np.random.seed([1])
uncert = np.random.randn(survey.nD)*perc + floor
dobs = dtrue + uncert
return dobs, uncert, survey, xzlocs
def getPseudoLocs(xr, ntx, nmax, flag = "PoleDipole"):
xloc = []
yloc = []
for i in range(ntx):
if i < ntx-nmax+1:
if flag is 'DipoleDipole':
txmid = xr[i]+dxr[i]*0.5
rxmid = xr[i+1:i+1+nmax]+dxr[i+1:i+1+nmax]*0.5
elif flag is 'PoleDipole':
txmid = xr[i]
rxmid = xr[i+1:i+1+nmax]+dxr[i+1:i+1+nmax]*0.5
elif flag is 'DipolePole':
txmid = xr[i]+dxr[i]*0.5
rxmid = xr[i+1:i+1+nmax]
mid = (txmid+rxmid)*0.5
xloc.append(mid)
yloc.append(np.arange(nmax)+1.)
else:
if flag is 'DipoleDipole':
txmid = xr[i]+dxr[i]*0.5
rxmid = xr[i+1:ntx+1]+dxr[i+1:ntx+1]*0.5
elif flag is 'PoleDipole':
txmid = xr[i]
rxmid = xr[i+1:ntx+1]+dxr[i+1:ntx+1]*0.5
elif flag is 'DipolePole':
txmid = xr[i]+dxr[i]*0.5
rxmid = xr[i+1:ntx+1]
mid = (txmid+rxmid)*0.5
xloc.append(mid)
yloc.append(np.arange(mid.size)+1.)
xlocvec = np.hstack(xloc)
ylocvec = np.hstack(yloc)
return np.c_[xlocvec, ylocvec]
def PseudoSectionPlotfnc(i,j,survey,flag="PoleDipole"):
matplotlib.rcParams['font.size'] = 14
nmax = 8
dx = 5
xr = np.arange(-40,41,dx)
ntx = xr.size-2
dxr = np.diff(xr)
TxObj = survey.srcList
TxLoc = TxObj[i].loc
RxLoc = TxObj[i].rxList[0].locs
fig = plt.figure(figsize=(10, 3))
ax = fig.add_subplot(111, autoscale_on=False, xlim=(xr.min()-5, xr.max()+5), ylim=(nmax+1, -2))
plt.plot(xr, np.zeros_like(xr), 'ko', markersize=4)
if flag == "PoleDipole":
plt.plot(TxLoc[0][0], np.zeros(1), 'rv', markersize=10)
# print([TxLoc[0][0],0])
ax.annotate('A', xy=(TxLoc[0][0], np.zeros(1)), xycoords='data', xytext=(-4.25, 7.5), textcoords='offset points')
else:
plt.plot([TxLoc[0][0],TxLoc[1][0]], np.zeros(2), 'rv', markersize=10)
# print([[TxLoc[0][0],0],[TxLoc[1][0],0]])
ax.annotate('A', xy=(TxLoc[0][0], np.zeros(1)), xycoords='data', xytext=(-4.25, 7.5), textcoords='offset points')
ax.annotate('B', xy=(TxLoc[1][0], np.zeros(1)), xycoords='data', xytext=(-4.25, 7.5), textcoords='offset points')
# for i in range(ntx):
if i < ntx-nmax+1:
if flag == "PoleDipole":
txmid = TxLoc[0][0]
else:
txmid = (TxLoc[0][0] + TxLoc[1][0])*0.5
MLoc = RxLoc[0][j]
NLoc = RxLoc[1][j]
# plt.plot([MLoc[0],NLoc[0]], np.zeros(2), 'b^', markersize=10)
# ax.annotate('M', xy=(MLoc[0], np.zeros(1)), xycoords='data', xytext=(-4.25, 7.5), textcoords='offset points')
# ax.annotate('N', xy=(NLoc[0], np.zeros(1)), xycoords='data', xytext=(-4.25, 7.5), textcoords='offset points')
if flag == "DipolePole":
plt.plot(MLoc[0], np.zeros(1), 'bv', markersize=10)
ax.annotate('M', xy=(MLoc[0], np.zeros(1)), xycoords='data', xytext=(-4.25, 7.5), textcoords='offset points')
rxmid = MLoc[0]
else:
rxmid = (MLoc[0]+NLoc[0])*0.5
plt.plot(MLoc[0], np.zeros(1), 'bv', markersize=10)
plt.plot(NLoc[0], np.zeros(1), 'b^', markersize=10)
ax.annotate('M', xy=(MLoc[0], np.zeros(1)), xycoords='data', xytext=(-4.25, 7.5), textcoords='offset points')
ax.annotate('N', xy=(NLoc[0], np.zeros(1)), xycoords='data', xytext=(-4.25, 7.5), textcoords='offset points')
mid = (txmid+rxmid)*0.5
midSep = np.sqrt(np.square(txmid-rxmid))
plt.plot(txmid, np.zeros(1), 'ro')
plt.plot(rxmid, np.zeros(1), 'bo')
plt.plot(mid, midSep/2., 'go')
plt.plot(np.r_[txmid, mid], np.r_[0, midSep/2.], 'k:')
# for j in range(nmax):
# plt.plot(np.r_[rxmid[j], mid[j]], np.r_[0, j+1], 'k:')
plt.plot(np.r_[rxmid, mid], np.r_[0, midSep/2.], 'k:')
else:
if flag == "PoleDipole":
txmid = TxLoc[0][0]
else:
txmid = (TxLoc[0][0] + TxLoc[1][0])*0.5
MLoc = RxLoc[0][j]
NLoc = RxLoc[1][j]
if flag == "DipolePole":
plt.plot(MLoc[0], np.zeros(1), 'bv', markersize=10)
ax.annotate('M', xy=(MLoc[0], np.zeros(1)), xycoords='data', xytext=(-4.25, 7.5), textcoords='offset points')
rxmid = MLoc[0]
else:
rxmid = (MLoc[0]+NLoc[0])*0.5
plt.plot(MLoc[0], np.zeros(1), 'bv', markersize=10)
plt.plot(NLoc[0], np.zeros(1), 'b^', markersize=10)
ax.annotate('M', xy=(MLoc[0], np.zeros(1)), xycoords='data', xytext=(-4.25, 7.5), textcoords='offset points')
ax.annotate('N', xy=(NLoc[0], np.zeros(1)), xycoords='data', xytext=(-4.25, 7.5), textcoords='offset points')
# plt.plot([MLoc[0],NLoc[0]], np.zeros(2), 'b^', markersize=10)
# ax.annotate('M', xy=(MLoc[0], np.zeros(1)), xycoords='data', xytext=(-4.25, 7.5), textcoords='offset points')
# ax.annotate('N', xy=(NLoc[0], np.zeros(1)), xycoords='data', xytext=(-4.25, 7.5), textcoords='offset points')
# rxmid = xr[i+1:ntx+1]+dxr[i+1:ntx+1]*0.5
mid = (txmid+rxmid)*0.5
plt.plot((txmid+rxmid)*0.5, np.arange(mid.size)+1., 'bo')
plt.plot(rxmid, np.zeros(rxmid.size), 'go')
plt.plot(np.r_[txmid, mid[-1]], np.r_[0, mid.size], 'k:')
for j in range(ntx-i):
plt.plot(np.r_[rxmid[j], mid[j]], np.r_[0, j+1], 'k:')
plt.xlabel("X (m)")
plt.ylabel("N-spacing")
plt.xlim(xr.min()-5, xr.max()+5)
plt.ylim(nmax*dx/2+dx, -2*dx)
plt.show()
return
def DipoleDipolefun(i):
matplotlib.rcParams['font.size'] = 14
plt.figure(figsize=(10, 3))
nmax = 8
xr = np.linspace(-40, 40, 20)
ntx = xr.size-2
dxr = np.diff(xr)
plt.plot(xr[:-1]+dxr*0.5, np.zeros_like(xr[:-1]), 'ko')
plt.plot(xr[i]+dxr[i]*0.5, np.zeros(1), 'ro')
# for i in range(ntx):
if i < ntx-nmax+1:
txmid = xr[i]+dxr[i]*0.5
rxmid = xr[i+1:i+1+nmax]+dxr[i+1:i+1+nmax]*0.5
mid = (txmid+rxmid)*0.5
plt.plot(rxmid, np.zeros(rxmid.size), 'go')
plt.plot(mid, np.arange(nmax)+1., 'bo')
plt.plot(np.r_[txmid, mid[-1]], np.r_[0, nmax], 'k:')
for j in range(nmax):
plt.plot(np.r_[rxmid[j], mid[j]], np.r_[0, j+1], 'k:')
else:
txmid = xr[i]+dxr[i]*0.5
rxmid = xr[i+1:ntx+1]+dxr[i+1:ntx+1]*0.5
mid = (txmid+rxmid)*0.5
plt.plot((txmid+rxmid)*0.5, np.arange(mid.size)+1., 'bo')
plt.plot(rxmid, np.zeros(rxmid.size), 'go')
plt.plot(np.r_[txmid, mid[-1]], np.r_[0, mid.size], 'k:')
for j in range(ntx-i):
plt.plot(np.r_[rxmid[j], mid[j]], np.r_[0, j+1], 'k:')
plt.xlabel("X (m)")
plt.ylabel("N-spacing")
plt.xlim(xr.min(), xr.max())
plt.ylim(nmax+1, -1)
plt.show()
return
def PseudoSectionWidget(survey,flag):
dx = 5
xr = np.arange(-40,41,dx)
if flag =="PoleDipole":
ntx, nmax = xr.size-2, 8
dxr = np.diff(xr)
elif flag =="DipolePole":
ntx, nmax = xr.size-1, 7
dxr = xr
elif flag =="DipoleDipole":
ntx, nmax = xr.size-3, 8
dxr = np.diff(xr)
xzlocs = getPseudoLocs(dxr, ntx, nmax,flag)
PseudoSectionPlot = lambda i,j: PseudoSectionPlotfnc(i,j,survey,flag)
return interact(PseudoSectionPlot, i=IntSlider(min=0, max=ntx-1, step = 1, value=0),j=IntSlider(min=0, max=nmax-1, step = 1, value=0))
def MidpointPseudoSectionWidget():
ntx = 18
return interact(DipoleDipolefun, i=IntSlider(min=0, max=ntx-1, step = 1, value=0))
def DC2Dfwdfun(mesh, survey, mapping, xr, xzlocs, rhohalf, rhoblk, xc, yc, r, dobs, uncert, predmis, nmax=8, plotFlag=None):
matplotlib.rcParams['font.size'] = 14
sighalf, sigblk = 1./rhohalf, 1./rhoblk
m0 = np.r_[np.log(sighalf), np.log(sighalf), xc, yc, r]
dini = survey.dpred(m0)
mtrue = np.r_[np.log(sigblk), np.log(sighalf), xc, yc, r]
dpred = survey.dpred(mtrue)
xi, yi = np.meshgrid(np.linspace(xr.min(), xr.max(), 120), np.linspace(1., nmax, 100))
appres = dpred/dini/sighalf
appresobs = dobs/dini/sighalf
pred = pylab.griddata(xzlocs[:,0], xzlocs[:,1], appres, xi, yi, interp='linear')
if plotFlag is not None:
fig = plt.figure(figsize = (12, 6))
ax1 = plt.subplot(211)
ax2 = plt.subplot(212)
dat1 = mesh.plotImage(np.log10(1./(mapping*mtrue)), ax=ax1, clim=(0, 3), grid=True, gridOpts={'color':'k', 'alpha':0.5})
cb1 = plt.colorbar(dat1[0], ticks=np.linspace(0, 3, 5), ax=ax1, format="$10^{%4.1f}$")
cb1.set_label("Resistivity (ohm-m)")
ax1.set_ylim(-30, 0.)
ax1.set_xlim(-40, 40)
ax1.set_xlabel("")
ax1.set_ylabel("Depth (m)")
dat2 = ax2.contourf(xi, yi, pred, 10)
ax2.contour(xi, yi, pred, 10, colors='k', alpha=0.5)
ax2.plot(xzlocs[:,0], xzlocs[:,1],'k.', ms = 3)
cb2 = plt.colorbar(dat2, ax=ax2, ticks=np.linspace(appres.min(), appres.max(), 5),format="%4.0f")
cb2.set_label("Apparent Resistivity \n (ohm-m)")
ax2.text(-38, 7, "Predicted")
ax2.set_ylim(nmax+1, 0.)
ax2.set_ylabel("N-spacing")
ax2.set_xlabel("Distance (m)")
else:
obs = pylab.griddata(xzlocs[:,0], xzlocs[:,1], appresobs, xi, yi, interp='linear')
fig = plt.figure(figsize = (12, 8))
ax1 = plt.subplot(311)
dat1 = mesh.plotImage(np.log10(1./(mapping*mtrue)), ax=ax1, clim=(0, 3), grid=True, gridOpts={'color':'k', 'alpha':0.5})
cb1 = plt.colorbar(dat1[0], ticks=np.linspace(0, 3, 5), ax=ax1, format="$10^{%4.1f}$")
cb1.set_label("Resistivity (ohm-m)")
ax1.set_ylim(-30, 0.)
ax1.set_xlim(-40, 40)
ax1.set_xlabel("")
ax1.set_ylabel("Depth (m)")
ax2 = plt.subplot(312)
dat2 = ax2.contourf(xi, yi, obs, 10)
ax2.contour(xi, yi, obs, 10, colors='k', alpha=0.5)
ax2.plot(xzlocs[:,0], xzlocs[:,1],'k.', ms = 3)
cb2 = plt.colorbar(dat2, ax=ax2, ticks=np.linspace(appresobs.min(), appresobs.max(), 5), format="%4.1f")
cb2.set_label("Apparent Resistivity \n (ohm-m)")
ax2.set_ylim(nmax+1, 0.)
ax2.set_ylabel("N-spacing")
ax2.text(-38, 7, "Observed")
ax3 = plt.subplot(313)
if predmis=="pred":
dat3 = ax3.contourf(xi, yi, pred, 10)
ax3.contour(xi, yi, pred, 10, colors='k', alpha=0.5)
ax3.plot(xzlocs[:,0], xzlocs[:,1],'k.', ms = 3)
cb3 = plt.colorbar(dat3, ax=ax3, ticks=np.linspace(appres.min(), appres.max(), 5),format="%4.0f")
cb3.set_label("Apparent Resistivity \n (ohm-m)")
ax3.text(-38, 7, "Predicted")
elif predmis=="mis":
mis = (appresobs-appres)/(0.1*appresobs)
Mis = pylab.griddata(xzlocs[:,0], xzlocs[:,1], mis, xi, yi, interp='linear')
dat3 = ax3.contourf(xi, yi, Mis, 10)
ax3.contour(xi, yi, Mis, 10, colors='k', alpha=0.5)
ax3.plot(xzlocs[:,0], xzlocs[:,1],'k.', ms = 3)
cb3 = plt.colorbar(dat3, ax=ax3, ticks=np.linspace(mis.min(), mis.max(), 5), format="%4.2f")
cb3.set_label("Normalized misfit")
ax3.text(-38, 7, "Misifit")
ax3.set_ylim(nmax+1, 0.)
ax3.set_ylabel("N-spacing")
ax3.set_xlabel("Distance (m)")
plt.show()
return
def DC2DPseudoWidgetWrapper(rhohalf,rhosph,xc,zc,r,surveyType):
dobs, uncert, survey, xzlocs = DC2Dsurvey(surveyType)
DC2Dfwdfun(mesh, survey, mapping, xr, xzlocs, rhohalf, rhosph, xc, zc, r, dobs, uncert, 'pred',plotFlag='PredOnly')
return None
def DC2DPseudoWidget():
# print xzlocs
Q = interact(DC2DPseudoWidgetWrapper,
rhohalf = FloatSlider(min=10, max=1000, step=1, value = 1000),
rhosph = FloatSlider(min=10, max=1000, step=1, value = 50),
xc = FloatSlider(min=-40, max=40, step=1, value = 0),
zc = FloatSlider(min= -20, max=0, step=1, value = -10),
r = FloatSlider(min= 0, max=15, step=0.5, value = 5),
surveyType = ToggleButtons(options=['DipoleDipole','PoleDipole','DipolePole'])
)
return Q
def DC2DfwdWrapper(rhohalf,rhosph,xc,zc,r,predmis,surveyType):
dobs, uncert, survey, xzlocs = DC2Dsurvey(surveyType)
DC2Dfwdfun(mesh, survey, mapping, xr, xzlocs, rhohalf, rhosph, xc, zc, r, dobs, uncert, predmis)
return None
def DC2DfwdWidget():
# print xzlocs
Q = interact(DC2DfwdWrapper,
rhohalf = FloatSlider(min=10, max=1000, step=1, value = 1000),
rhosph = FloatSlider(min=10, max=1000, step=1, value = 50),
xc = FloatSlider(min=-40, max=40, step=1, value = 0),
zc = FloatSlider(min= -20, max=0, step=1, value = -10),
r = FloatSlider(min= 0, max=15, step=0.5, value = 5),
predmis = ToggleButtons(options=['pred','mis']),
surveyType = ToggleButtons(options=['DipoleDipole','PoleDipole','DipolePole'])
)
return Q
| StarcoderdataPython |
4835408 | <gh_stars>1-10
class Vertex:
def __init__(self, x):
self._val = x
def element(self):
return self._val
def __hash__(self):
return hash(id(self))
class Edges:
def __init__(self, o, d, x=None):
self._ori = o
self._des = d
self._val = x
def startpoint(self):
return self._ori
def endpoint(self):
return self._des
def opposite(self, u):
if u == self._ori:
return self._des
else:
return self._ori
def element(self):
return self._val
def __hash__(self):
return hash((self._ori, self._des))
class Graph:
def __init__(self):
self._vertices = {}
def vertex_count(self):
return len(self._vertices)
def edge_count(self):
return len(self._vertices.values())//2
def vertices(self):
return self._vertices.keys()
def edges(self):
edges = []
data = self._vertices.values()
for i in data:
edges.append(data[i])
return edges
def get_edges(self, u, v):
if u in self._vertices.keys():
return self._vertices[u][v]
else:
return self._vertices[v][u]
def degree(self, u):
if u in self._vertices.keys():
return len(self._vertices[u])
else:
return "Vertex not found"
def incedent_edges(self, u):
data = self._vertices.keys()
if u in data:
edges = []
for i in data[u]:
data.append(i.values())
return
| StarcoderdataPython |
1785363 | <gh_stars>10-100
import importlib.util
import pkg_resources
class ModuleChecker:
def find_module(self, module):
"""Search for modules specification."""
try:
return importlib.util.find_spec(module)
except ImportError:
return None
def find_distribution(self, dist):
"""Search for distribution with specified version (eg 'numpy>=1.15')."""
try:
return pkg_resources.require(dist)
except Exception:
return None
def check(self, module):
"""
Return True if module with specified version exists.
>>> ModuleChecker().check('foo>=1.0')
False
>>> ModuleChecker().check('pytest>1.0')
True
"""
mods = self.find_module(module) or self.find_distribution(module)
return bool(mods)
| StarcoderdataPython |
1627494 | <reponame>timgates42/ramses<gh_stars>100-1000
import pytest
from mock import Mock, patch
from ramses import utils
class TestUtils(object):
def test_contenttypes(self):
assert utils.ContentTypes.JSON == 'application/json'
assert utils.ContentTypes.TEXT_XML == 'text/xml'
assert utils.ContentTypes.MULTIPART_FORMDATA == \
'multipart/form-data'
assert utils.ContentTypes.FORM_URLENCODED == \
'application/x-www-form-urlencoded'
def test_convert_schema_json(self):
schema = utils.convert_schema({'foo': 'bar'}, 'application/json')
assert schema == {'foo': 'bar'}
def test_convert_schema_json_error(self):
with pytest.raises(TypeError) as ex:
utils.convert_schema('foo', 'application/json')
assert 'Schema is not a valid JSON' in str(ex.value)
def test_convert_schema_xml(self):
assert utils.convert_schema({'foo': 'bar'}, 'text/xml') is None
def test_is_dynamic_uri(self):
assert utils.is_dynamic_uri('/{id}')
assert not utils.is_dynamic_uri('/collection')
def test_clean_dynamic_uri(self):
clean = utils.clean_dynamic_uri('/{item_id}')
assert clean == 'item_id'
def test_generate_model_name(self):
resource = Mock(path='/zoo/alien-users')
model_name = utils.generate_model_name(resource)
assert model_name == 'AlienUser'
@patch.object(utils, 'get_resource_children')
def test_dynamic_part_name(self, get_children):
get_children.return_value = [
Mock(path='/items'), Mock(path='/{myid}')]
resource = Mock()
part_name = utils.dynamic_part_name(
resource, 'stories', 'default_id')
assert part_name == 'stories_myid'
get_children.assert_called_once_with(resource)
@patch.object(utils, 'get_resource_children')
def test_dynamic_part_name_no_dynamic(self, get_children):
get_children.return_value = [Mock(path='/items')]
resource = Mock()
part_name = utils.dynamic_part_name(
resource, 'stories', 'default_id')
assert part_name == 'stories_default_id'
get_children.assert_called_once_with(resource)
@patch.object(utils, 'get_resource_children')
def test_dynamic_part_name_no_resources(self, get_children):
get_children.return_value = []
resource = Mock(resources=None)
part_name = utils.dynamic_part_name(
resource, 'stories', 'default_id')
assert part_name == 'stories_default_id'
get_children.assert_called_once_with(resource)
def test_extract_dynamic_part(self):
assert utils.extract_dynamic_part('/stories/{id}/foo') == 'id'
assert utils.extract_dynamic_part('/stories/{id}') == 'id'
def test_extract_dynamic_part_fail(self):
assert utils.extract_dynamic_part('/stories/id') is None
def _get_mock_method_resources(self, *methods):
return [Mock(method=meth) for meth in methods]
@patch.object(utils, 'get_resource_children')
@patch.object(utils, 'get_resource_siblings')
def test_resource_view_attrs_no_dynamic_subres(self, get_sib, get_child):
get_child.return_value = []
get_sib.return_value = self._get_mock_method_resources(
'get', 'post', 'put', 'patch', 'delete')
resource = Mock()
attrs = utils.resource_view_attrs(resource, singular=False)
get_sib.assert_called_once_with(resource)
get_child.assert_called_once_with(resource)
assert attrs == set(['create', 'delete_many', 'index', 'update_many'])
@patch.object(utils, 'get_resource_children')
@patch.object(utils, 'get_resource_siblings')
def test_resource_view_attrs_dynamic_subres(self, get_sib, get_child):
get_child.return_value = self._get_mock_method_resources(
'get', 'put', 'patch', 'delete')
get_sib.return_value = self._get_mock_method_resources(
'get', 'post', 'put', 'patch', 'delete')
resource = Mock()
attrs = utils.resource_view_attrs(resource, singular=False)
get_sib.assert_called_once_with(resource)
get_child.assert_called_once_with(resource)
assert attrs == set([
'create', 'delete_many', 'index', 'update_many',
'show', 'update', 'delete', 'replace'
])
@patch.object(utils, 'get_resource_children')
@patch.object(utils, 'get_resource_siblings')
def test_resource_view_attrs_singular(self, get_sib, get_child):
get_child.return_value = []
get_sib.return_value = self._get_mock_method_resources(
'get', 'post', 'put', 'patch', 'delete')
resource = Mock()
attrs = utils.resource_view_attrs(resource, singular=True)
get_sib.assert_called_once_with(resource)
get_child.assert_called_once_with(resource)
assert attrs == set(['create', 'delete', 'show', 'update', 'replace'])
@patch.object(utils, 'get_resource_children')
@patch.object(utils, 'get_resource_siblings')
def test_resource_view_attrs_no_subresources(self, get_sib, get_child):
child_res = self._get_mock_method_resources('get')
child_res[0].path = '/items'
get_child.return_value = child_res
get_sib.return_value = self._get_mock_method_resources(
'get', 'post', 'put', 'patch', 'delete')
resource = Mock()
attrs = utils.resource_view_attrs(resource, singular=False)
get_sib.assert_called_once_with(resource)
get_child.assert_called_once_with(resource)
assert attrs == set(['create', 'delete_many', 'index', 'update_many'])
@patch.object(utils, 'get_resource_children')
@patch.object(utils, 'get_resource_siblings')
def test_resource_view_attrs_no_methods(self, get_sib, get_child):
get_sib.return_value = []
get_child.return_value = []
resource = Mock()
attrs = utils.resource_view_attrs(resource, singular=False)
get_sib.assert_called_once_with(resource)
get_child.assert_called_once_with(resource)
assert attrs == set()
@patch.object(utils, 'get_resource_children')
@patch.object(utils, 'get_resource_siblings')
def test_resource_view_attrs_not_supported_method(
self, get_sib, get_child):
get_sib.return_value = []
get_child.return_value = self._get_mock_method_resources(
'nice_method')
resource = Mock()
attrs = utils.resource_view_attrs(resource, singular=False)
assert attrs == set()
def test_resource_schema_no_body(self):
resource = Mock(body=None)
with pytest.raises(ValueError) as ex:
utils.resource_schema(resource)
expected = 'RAML resource has no body to setup database'
assert expected in str(ex.value)
def test_resource_schema_no_schemas(self):
resource = Mock(body=[Mock(schema=None), Mock(schema='')])
assert utils.resource_schema(resource) is None
def test_resource_schema_success(self):
resource = Mock(body=[
Mock(schema={'foo': 'bar'},
mime_type=utils.ContentTypes.JSON)
])
assert utils.resource_schema(resource) == {'foo': 'bar'}
def test_is_dynamic_resource_no_resource(self):
assert not utils.is_dynamic_resource(None)
def test_is_dynamic_resource_dynamic(self):
resource = Mock(path='/{id}')
assert utils.is_dynamic_resource(resource)
def test_is_dynamic_resource_not_dynamic(self):
resource = Mock(path='/stories')
assert not utils.is_dynamic_resource(resource)
def test_get_static_parent(self):
parent = Mock(path='/stories', method='post')
resource = Mock(path='/{id}')
resource.parent = parent
assert utils.get_static_parent(resource, method='post') is parent
def test_get_static_parent_none(self):
resource = Mock(path='/{id}')
resource.parent = None
assert utils.get_static_parent(resource, method='post') is None
def test_get_static_parent_wrong_parent_method(self):
root = Mock(resources=[
Mock(path='/stories', method='options'),
Mock(path='/users', method='post'),
Mock(path='/stories', method='post'),
])
parent = Mock(path='/stories', method='get', root=root)
resource = Mock(path='/{id}')
resource.parent = parent
res = utils.get_static_parent(resource, method='post')
assert res.method == 'post'
assert res.path == '/stories'
def test_get_static_parent_without_method_parent_present(self):
root = Mock(resources=[
Mock(path='/stories', method='options'),
Mock(path='/stories', method='post'),
])
parent = Mock(path='/stories', method='get', root=root)
resource = Mock(path='/{id}')
resource.parent = parent
res = utils.get_static_parent(resource)
assert res.method == 'get'
assert res.path == '/stories'
def test_get_static_parent_none_found_in_root(self):
root = Mock(resources=[
Mock(path='/stories', method='get'),
])
parent = Mock(path='/stories', method='options', root=root)
resource = Mock(path='/{id}')
resource.parent = parent
assert utils.get_static_parent(resource, method='post') is None
@patch('ramses.utils.get_static_parent')
@patch('ramses.utils.resource_schema')
def test_attr_subresource_no_static_parent(self, mock_schema, mock_par):
mock_par.return_value = None
assert not utils.attr_subresource('foo', 1)
mock_par.assert_called_once_with('foo', method='POST')
@patch('ramses.utils.get_static_parent')
@patch('ramses.utils.resource_schema')
def test_attr_subresource_no_schema(self, mock_schema, mock_par):
parent = Mock()
mock_par.return_value = parent
mock_schema.return_value = None
assert not utils.attr_subresource('foo', 1)
mock_par.assert_called_once_with('foo', method='POST')
mock_schema.assert_called_once_with(parent)
@patch('ramses.utils.get_static_parent')
@patch('ramses.utils.resource_schema')
def test_attr_subresource_not_attr(self, mock_schema, mock_par):
parent = Mock()
mock_par.return_value = parent
mock_schema.return_value = {
'properties': {
'route_name': {
'_db_settings': {
'type': 'string'
}
}
}
}
assert not utils.attr_subresource('resource', 'route_name')
mock_par.assert_called_once_with('resource', method='POST')
mock_schema.assert_called_once_with(parent)
@patch('ramses.utils.get_static_parent')
@patch('ramses.utils.resource_schema')
def test_attr_subresource_dict(self, mock_schema, mock_par):
parent = Mock()
mock_par.return_value = parent
mock_schema.return_value = {
'properties': {
'route_name': {
'_db_settings': {
'type': 'dict'
}
},
'route_name2': {
'_db_settings': {
'type': 'list'
}
}
}
}
assert utils.attr_subresource('resource', 'route_name')
mock_par.assert_called_once_with('resource', method='POST')
mock_schema.assert_called_once_with(parent)
assert utils.attr_subresource('resource', 'route_name2')
@patch('ramses.utils.get_static_parent')
@patch('ramses.utils.resource_schema')
def test_singular_subresource_no_static_parent(self, mock_schema, mock_par):
mock_par.return_value = None
assert not utils.singular_subresource('foo', 1)
mock_par.assert_called_once_with('foo', method='POST')
@patch('ramses.utils.get_static_parent')
@patch('ramses.utils.resource_schema')
def test_singular_subresource_no_schema(self, mock_schema, mock_par):
parent = Mock()
mock_par.return_value = parent
mock_schema.return_value = None
assert not utils.singular_subresource('foo', 1)
mock_par.assert_called_once_with('foo', method='POST')
mock_schema.assert_called_once_with(parent)
@patch('ramses.utils.get_static_parent')
@patch('ramses.utils.resource_schema')
def test_singular_subresource_not_attr(self, mock_schema, mock_par):
parent = Mock()
mock_par.return_value = parent
mock_schema.return_value = {
'properties': {
'route_name': {
'_db_settings': {
'type': 'string'
}
}
}
}
assert not utils.singular_subresource('resource', 'route_name')
mock_par.assert_called_once_with('resource', method='POST')
mock_schema.assert_called_once_with(parent)
@patch('ramses.utils.get_static_parent')
@patch('ramses.utils.resource_schema')
def test_singular_subresource_dict(self, mock_schema, mock_par):
parent = Mock()
mock_par.return_value = parent
mock_schema.return_value = {
'properties': {
'route_name': {
'_db_settings': {
'type': 'relationship',
'uselist': False
}
},
}
}
assert utils.singular_subresource('resource', 'route_name')
mock_par.assert_called_once_with('resource', method='POST')
mock_schema.assert_called_once_with(parent)
def test_is_callable_tag_not_str(self):
assert not utils.is_callable_tag(1)
assert not utils.is_callable_tag(None)
def test_is_callable_tag_not_tag(self):
assert not utils.is_callable_tag('foobar')
def test_is_callable_tag(self):
assert utils.is_callable_tag('{{foobar}}')
def test_resolve_to_callable_not_found(self):
with pytest.raises(ImportError) as ex:
utils.resolve_to_callable('{{foobar}}')
assert str(ex.value) == 'Failed to load callable `foobar`'
def test_resolve_to_callable_registry(self):
from ramses import registry
@registry.add
def foo():
pass
func = utils.resolve_to_callable('{{foo}}')
assert func is foo
func = utils.resolve_to_callable('foo')
assert func is foo
def test_resolve_to_callable_dotted_path(self):
from datetime import datetime
func = utils.resolve_to_callable('{{datetime.datetime}}')
assert func is datetime
func = utils.resolve_to_callable('datetime.datetime')
assert func is datetime
def test_get_events_map(self):
from nefertari import events
events_map = utils.get_events_map()
after, before = events_map['after'], events_map['before']
after_set, before_set = after.pop('set'), before.pop('set')
assert sorted(events.BEFORE_EVENTS.keys()) == sorted(
before.keys())
assert sorted(events.AFTER_EVENTS.keys()) == sorted(
after.keys())
assert after_set == [
events.AfterCreate,
events.AfterUpdate,
events.AfterReplace,
events.AfterUpdateMany,
events.AfterRegister,
]
assert before_set == [
events.BeforeCreate,
events.BeforeUpdate,
events.BeforeReplace,
events.BeforeUpdateMany,
events.BeforeRegister,
]
def test_patch_view_model(self):
view_cls = Mock()
model1 = Mock()
model2 = Mock()
view_cls.Model = model1
with utils.patch_view_model(view_cls, model2):
view_cls.Model()
assert view_cls.Model is model1
assert not model1.called
model2.assert_called_once_with()
def test_get_route_name(self):
resource_uri = '/foo-=-=-=-123'
assert utils.get_route_name(resource_uri) == 'foo123'
def test_get_resource_uri(self):
resource = Mock(path='/foobar/zoo ')
assert utils.get_resource_uri(resource) == 'zoo'
| StarcoderdataPython |
1615923 | <filename>fabfile.py
import os
import json
import subprocess
import shlex
import time
import signal
import urllib2
from fabric.api import run, local, settings, cd, sudo, task, output, puts, prefix
from fabric.contrib.project import upload_project
from fabric.contrib.files import append, upload_template
APPS = 'api courses courses_viz scheduler'.split(' ')
USER = 'www-data'
GROUP = 'www-data'
PYTHON = '/www/yacs/virtualenv/bin/python'
PIP = '/www/yacs/virtualenv/bin/pip'
@task
def verbose():
output['everything'] = True
def exists(name):
with settings(warn_only=True):
return not run('[ -e "%s" ]' % name).failed
def remote_vars(*keys):
sb = []
for key in keys:
value = run('echo $' + key).strip()
sb.append('='.join([key, '"%s"' % value.replace('"', '\\"')]))
return ' '.join(sb)
def upload_monit_conf():
"Uploads the monit conf for gunicorn."
if not exists('/etc/monit/conf.d/'):
puts('monit missing... skipping')
return
puts('Uploading monit config...')
context = dict(
projectpath='/www/yacs/django/',
user=USER,
gunicorn='/www/yacs/virtualenv/bin/gunicorn',
workers=4,
logs='/www/yacs/logs/',
wsgi='yacs.wsgi:application',
pid='/tmp/yacs.pid',
env=remote_vars('YACS_DATABASE_URL', 'YACS_SECRET_KEY'),
)
upload_template('yacs.monit', '/etc/monit/conf.d/yacs.conf',
context=context, use_sudo=True, backup=False)
def update_crontab():
context = dict(
projectpath='/www/yacs/django/',
python='/www/yacs/virtualenv/bin/python',
user=USER,
logpath='/www/yacs/logs/',
)
upload_template('yacs.cron', 'yacs_cron', context=context, backup=False)
sudo('crontab -u {0} yacs_cron'.format(USER))
sudo('rm -f yacs_cron')
def managepy(command, prefix_cmd=''):
sudo('%s %s manage.py %s' % (prefix_cmd, PYTHON, command), user=USER)
@task
def deploy(upgrade=1):
"""Deploys to the given system.
Use salt, chef, or puppet to configure the outside packages.
Things required to be set up:
- python
- database driver
- virtualenv
- coffeescript
- java
- pip
- database (postgres; postgres user)
- created database & user
- webserver (nginx; www-data user)
- webserver config to proxypass to gunicorn (nginx)
- memcached
"""
upload_monit_conf()
clean()
with cd('/www/yacs/'):
if not exists('virtualenv'):
puts('Creating Virtual Environment...')
sudo('virtualenv --distribute virtualenv', user=USER)
puts('Uploading to remote...')
with settings(warn_only=True):
run('rm -rf tmp')
run('mkdir tmp')
upload_project(remote_dir='tmp')
sudo('mv -f tmp/yacs /www/yacs/tmp')
sudo('chown -R %s /www/yacs/tmp' % USER)
sudo('chgrp -R %s /www/yacs/tmp' % GROUP)
run('rm -rf tmp')
with cd('/www/yacs/'):
puts('Replacing remote codebase...')
sudo('rm -rf django', user=USER)
sudo('mv -f tmp django', user=USER)
with cd('/www/yacs/django'):
puts('Removing extra files...')
with settings(warn_only=True):
sudo('find . -name ".*" | xargs rm -r', user=USER)
sudo('rm yacs.db', user=USER)
puts('Installing dependencies...')
pip_prefix = '--upgrade'
if not int(upgrade):
pip_prefix = ''
sudo(PIP + ' install %s -r requirements.txt' % pip_prefix, user=USER)
envs = remote_vars('YACS_ENV', 'YACS_SECRET_KEY', 'YACS_DATABASE_URL')
puts('Running migrations...')
managepy('syncdb --noinput', envs)
managepy('migrate --noinput', envs)
puts('Gathering static files...')
managepy('collectstatic --noinput', envs)
puts("Clearing caches...")
sudo('service memcached restart')
managepy('clear_cache', envs)
puts('Restarting gunicorn...')
sudo('service monit restart')
sudo('monit restart yacs')
update_crontab()
puts('Done!')
@task
def fetch():
"Tells the deployed system to fetch course data."
with cd('/www/yacs/django'):
envs = remote_vars('YACS_ENV', 'YACS_SECRET_KEY', 'YACS_DATABASE_URL') + ' '
puts('Getting course data from SIS...')
sudo(envs + PYTHON + ' manage.py import_course_data')
puts('Fetching catalog data...')
sudo(envs + PYTHON + ' manage.py import_catalog_data')
puts('Generating conflict cache...')
sudo(envs + PYTHON + ' manage.py create_section_cache')
@task
def clean():
"Removes local python cache files."
puts('Removing local object files and caches...')
with settings(warn_only=True):
local('find . -name "*.pyc" | xargs rm')
local('find . -name "*.pyo" | xargs rm')
local('find . -name "__pycache__" -type directory | xargs rm -r')
local('rm -r yacs/static/root')
def wait_for_url(url, timeout=30):
while timeout > 0:
handle = None
try:
handle = urllib2.urlopen(url, timeout=timeout)
handle.getcode()
return
except urllib2.URLError:
time.sleep(1)
timeout -= 1
finally:
if handle:
handle.close()
@task
def jasmine(port=6856):
local('jasmine-ci')
@task
def pep8():
local('pep8 . --exclude=migrations,south_migrations,.ropeproject --statistics --count --ignore=E501')
@task
def test():
"Runs tests."
clean()
verbose()
local('python manage.py test --failfast ' + ' '.join(APPS))
pep8()
local('python manage.py collectstatic --noinput')
clean()
@task
def server(port=8000):
local('python manage.py run_gunicorn -b "127.0.0.1:' + str(port) + '" -w 2')
| StarcoderdataPython |
91529 | import ctypes
import functools
import random
import struct
from fcntl import ioctl
from trio import socket
from wrath.bpf import create_filter
IP_VERSION = 4
IP_IHL = 5
IP_DSCP = 0
IP_ECN = 0
IP_TOTAL_LEN = 40
IP_ID = 0x1337
IP_FLAGS = 0x2 # DF
IP_FRAGMENT_OFFSET = 0
IP_TTL = 255
IP_PROTOCOL = 6 # TCP
IP_CHECKSUM = 0
TCP_SRC = 6969 # source port
TCP_ACK_NO = 0
TCP_DATA_OFFSET = 5
TCP_RESERVED = 0
TCP_NS = 0
TCP_CWR = 0
TCP_ECE = 0
TCP_URG = 0
TCP_ACK = 0
TCP_PSH = 0
TCP_RST = 0
TCP_SYN = 1
TCP_FIN = 0
TCP_WINDOW = 0x7110
TCP_CHECKSUM = 0
TCP_URG_PTR = 0
SIOCGIFADDR = 0x8915
ETH_HDR_LEN = 14
IP_HDR_LEN = 20
TCP_HDR_LEN = 20
class Flags:
SYNACK = 18
RSTACK = 20
@functools.cache
def get_iface_ip(interface: str) -> str:
"""
Returns the IP address of the interface.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ip_addr = socket.inet_ntoa(
ioctl(
sock.fileno(),
SIOCGIFADDR,
struct.pack("256s", bytes(interface[:15], "UTF-8")),
)[20:24]
)
sock.close()
return ip_addr
def create_send_sock() -> socket.SocketType:
"""
Creates a raw AF_INET sending socket requiring an accompanying IP header.
"""
send_sock = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)
send_sock.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
return send_sock
def create_recv_sock(target: str) -> socket.SocketType:
"""
Creates a raw AF_PACKET receiving socket and attaches an eBPF filter to it.
"""
recv_sock = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, 0x0800)
fprog = create_filter(target)
recv_sock.setsockopt(socket.SOL_SOCKET, 26, fprog)
return recv_sock
def inet_checksum(header: bytes) -> int:
checksum = 0
for idx in range(0, len(header), 2):
checksum += (header[idx] << 8) | header[idx + 1]
checksum = (checksum >> 16) + (checksum & 0xFFFF)
checksum = ~checksum & 0xFFFF
return checksum
@functools.cache
def build_ipv4_datagram(interface: str, target: str) -> bytes:
"""
Builds an IPv4 datagram destined to a particular address.
"""
ip_src = get_iface_ip(interface)
src = socket.inet_aton(ip_src)
dest = socket.inet_aton(target)
size = struct.calcsize("!BBHHHBBH4s4s")
assert size == 20
buf = ctypes.create_string_buffer(size)
struct.pack_into(
"!BBHHHBBH4s4s",
buf, # type: ignore
0,
(IP_VERSION << 4) | IP_IHL,
IP_DSCP | IP_ECN,
IP_TOTAL_LEN,
IP_ID,
(IP_FLAGS << 13) | IP_FRAGMENT_OFFSET,
IP_TTL,
IP_PROTOCOL,
IP_CHECKSUM,
src,
dest,
)
struct.pack_into("!H", buf, 10, inet_checksum(bytes(buf))) # type: ignore
return bytes(buf)
def build_tcp_segment(interface: str, target: str, port: int) -> bytes:
"""
Builds a TCP segment destined to a particular port.
"""
ip_src = get_iface_ip(interface)
seq_no = random.randint(0, 2 ** 32 - 1)
size = struct.calcsize("!HHIIBBHHH")
assert size == 20
buf = ctypes.create_string_buffer(size)
struct.pack_into(
"!HHIIHHHH",
buf, # type: ignore
0,
TCP_SRC,
port,
seq_no,
TCP_ACK_NO,
tcp_assemble_halfword(),
TCP_WINDOW,
TCP_CHECKSUM,
TCP_URG_PTR,
)
tcp_pseudo_header = build_tcp_pseudo_hdr(ip_src, target, len(buf))
struct.pack_into("!H", buf, 16, inet_checksum(tcp_pseudo_header + bytes(buf))) # type: ignore
return bytes(buf)
@functools.cache
def build_tcp_pseudo_hdr(ip_src: str, ip_dest: str, length: int) -> bytes:
return struct.pack(
"!4s4sHHH",
socket.inet_aton(ip_src),
socket.inet_aton(ip_dest),
IP_PROTOCOL,
length,
TCP_CHECKSUM,
)
@functools.cache
def tcp_assemble_halfword() -> int:
"""
This is the dumbest function name I could think of right now.
"""
return (TCP_DATA_OFFSET << 12) \
| (TCP_RESERVED << 9) \
| (TCP_NS << 8) \
| build_tcp_flags()
@functools.cache
def build_tcp_flags() -> int:
"""
Assembles TCP flags.
"""
flags = 0
for flag in (
TCP_CWR, TCP_ECE, TCP_URG, TCP_ACK,
TCP_PSH, TCP_RST, TCP_SYN, TCP_FIN
):
flags <<= 1
flags |= flag
return flags
def unpack(data: bytes) -> tuple[int, int]:
"""
Extracts the IPv4 datagram from a raw Ethernet frame and returns
the source port and flags of the TCP segment contained in it.
"""
buf = ctypes.create_string_buffer(
data[ETH_HDR_LEN : ETH_HDR_LEN + IP_HDR_LEN + TCP_HDR_LEN],
IP_HDR_LEN + TCP_HDR_LEN
)
unpacked = struct.unpack("!BBHHHBBH4s4sHHIIBBHHH", buf) # type: ignore
src, flags = unpacked[10], unpacked[15]
return src, flags
| StarcoderdataPython |
1624434 | # Import all libraries we will use
import random
import numpy as np
import cv2
def create_image(p):
# let's create a heigth x width matrix with all pixels in black color
heigth = 1080
width = 1920
diameter = 50
x_correction = int(0.7 * diameter / 2)
y_correction = int(0.7 * diameter / 2)
img = np.ones((heigth, width, 3), np.uint8)*255
hcount = int(diameter/2)
while hcount < (heigth-3):
wcount = int(diameter/2)
while wcount < (width-3):
if random.uniform(0, 1) >= (1-p):
shape = random.uniform(0, 3)
if shape < 1.0:
cv2.circle(img, (wcount, hcount), int(diameter/2), [0, 0, 0], -1)
elif shape < 2.0:
cv2.rectangle(img, (wcount - x_correction, hcount - y_correction), (wcount + x_correction, hcount +
y_correction), [0, 0, 0], -1)
else:
pt1 = (wcount, hcount-y_correction)
pt2 = (wcount-x_correction, hcount+y_correction)
pt3 = (wcount+x_correction, hcount+y_correction)
triangle_cnt = np.array([pt1, pt2, pt3])
cv2.drawContours(img, [triangle_cnt], 0, (0, 0, 0), -1)
# img[hcount, wcount] = [255, 255, 255]
wcount += diameter
hcount += diameter
p = int(p * 100)
# save our image as a "jpg" image
cv2.imwrite("bernoulli" + str(p) + "M" + ".png", img)
if __name__ == '__main__':
create_image(0.08)
| StarcoderdataPython |
4831257 | <reponame>limn2o4/analytics-zoo
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from mxnet import gluon
from gluonnlp.model.transformer import TransformerEncoder, TransformerEncoderCell
class MeanMaxPooling(gluon.nn.HybridBlock):
def __init__(self, axis=1, dropout=0.0, prefix=None, params=None, **kwargs):
super(MeanMaxPooling, self).__init__(**kwargs)
# super().__init__(prefix=prefix, params=params)
self.axis = axis
self.dropout = dropout
def hybrid_forward(self, F, inputs):
mean_out = F.mean(data=inputs, axis=self.axis)
max_out = F.max(data=inputs, axis=self.axis)
outputs = F.concat(mean_out, max_out, dim=1)
if self.dropout:
outputs = F.Dropout(data=outputs, p=self.dropout)
# outputs = F.LayerNorm(outputs)
return outputs
class SequenceTransformer(gluon.nn.HybridBlock):
def __init__(self, num_items, item_embed, item_hidden_size, item_max_length, item_num_heads,
item_num_layers, item_transformer_dropout, item_pooling_dropout, cross_size,
prefix=None, params=None, **kwargs):
super(SequenceTransformer, self).__init__(**kwargs)
# super().__init__(prefix=prefix, params=params)
with self.name_scope():
self.item_pooling_dp = MeanMaxPooling(dropout=item_pooling_dropout)
self.item_encoder = TransformerEncoder(units=item_embed,
hidden_size=item_hidden_size,
num_heads=item_num_heads,
num_layers=item_num_layers,
max_length=item_max_length,
dropout=item_transformer_dropout)
self.embedding = gluon.nn.Embedding(input_dim=num_items, output_dim=item_embed)
self.dense = gluon.nn.Dense(cross_size)
def hybrid_forward(self, F, input_item, item_valid_length=None):
item_embed_out = self.embedding(input_item)
item_encoding, item_att = self.item_encoder(
inputs=item_embed_out, valid_length=item_valid_length)
item_out = self.item_pooling_dp(item_encoding)
item_out = self.dense(item_out)
return item_out
class ContextTransformer(gluon.nn.HybridBlock):
def __init__(self, context_dims, context_embed, context_hidden_size,
context_num_heads, context_transformer_dropout, context_pooling_dropout,
cross_size, prefix=None, params=None, **kwargs):
super(ContextTransformer, self).__init__(**kwargs)
# super().__init__(prefix=prefix, params=params)
self.context_dims = context_dims
self.context_embed = context_embed
self.cross_size = cross_size
with self.name_scope():
self.context_pooling_dp = MeanMaxPooling(dropout=context_pooling_dropout)
self.context_encoder = TransformerEncoderCell(units=context_embed,
hidden_size=context_hidden_size,
num_heads=context_num_heads,
dropout=context_transformer_dropout
)
self.dense = gluon.nn.Dense(self.cross_size)
self.embeddings = gluon.nn.HybridSequential()
for i, context_dim in enumerate(self.context_dims):
self.embeddings.add(gluon.nn.Embedding(self.context_dims[i], self.context_embed))
def hybrid_forward(self, F, input_context_list):
context_embed = [
self.embeddings[i](input_context) for i, input_context in enumerate(input_context_list)]
context_input = []
for i in context_embed:
context_input.append(F.expand_dims(i, axis=1))
context_embedding = F.concat(*context_input, dim=1)
context_encoding, context_att = self.context_encoder(context_embedding)
context_out = self.context_pooling_dp(context_encoding)
context_out = self.dense(context_out)
return context_out
class TxT(gluon.nn.HybridBlock):
def __init__(self, num_items, context_dims, item_embed=100, context_embed=100,
item_hidden_size=256, item_max_length=8, item_num_heads=4, item_num_layers=2,
item_transformer_dropout=0.0, item_pooling_dropout=0.1, context_hidden_size=256,
context_num_heads=2, context_transformer_dropout=0.0, context_pooling_dropout=0.0,
act_type="gelu", cross_size=100, prefix=None, params=None, **kwargs):
super(TxT, self).__init__(**kwargs)
self.act_type = act_type
with self.name_scope():
self.sequence_transformer = SequenceTransformer(
num_items=num_items,
item_embed=item_embed,
item_hidden_size=item_hidden_size,
item_max_length=item_max_length,
item_num_heads=item_num_heads,
item_num_layers=item_num_layers,
item_transformer_dropout=item_transformer_dropout,
item_pooling_dropout=item_pooling_dropout,
cross_size=cross_size,
prefix=prefix, params=params
)
self.context_transformer = ContextTransformer(
context_dims=context_dims,
context_embed=context_embed,
context_hidden_size=context_hidden_size,
context_num_heads=context_num_heads,
context_transformer_dropout=context_transformer_dropout,
context_pooling_dropout=context_pooling_dropout,
cross_size=cross_size,
prefix=prefix, params=params
)
self.dense1 = gluon.nn.Dense(units=num_items//2)
if act_type == "relu":
self.act = gluon.nn.Activation(activation="relu")
elif act_type == "gelu":
self.act = gluon.nn.GELU()
elif act_type == "leakyRelu":
self.act = gluon.nn.LeakyReLU(alpha=0.2)
else:
raise NotImplementedError
self.dense2 = gluon.nn.Dense(units=num_items, activation=None)
def hybrid_forward(self, F, input_item, item_valid_length, input_context_list):
item_outs = self.sequence_transformer(input_item, item_valid_length)
context_outs = self.context_transformer(input_context_list)
outs = F.broadcast_mul(item_outs, context_outs)
outs = self.dense1(outs)
outs = self.act(outs)
outs = self.dense2(outs)
return outs
| StarcoderdataPython |
1622512 | <reponame>igushev/fase_lib
"""Auto-generated file, do not edit by hand. ER metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_ER = PhoneMetadata(id='ER', country_code=291, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[178]\\d{6}', possible_number_pattern='\\d{6,7}', possible_length=(7,), possible_length_local_only=(6,)),
fixed_line=PhoneNumberDesc(national_number_pattern='1(?:1[12568]|20|40|55|6[146])\\d{4}|8\\d{6}', example_number='8370362', possible_length=(7,), possible_length_local_only=(6,)),
mobile=PhoneNumberDesc(national_number_pattern='17[1-3]\\d{4}|7\\d{6}', possible_number_pattern='\\d{7}', example_number='7123456', possible_length=(7,)),
national_prefix='0',
national_prefix_for_parsing='0',
number_format=[NumberFormat(pattern='(\\d)(\\d{3})(\\d{3})', format='\\1 \\2 \\3', national_prefix_formatting_rule='0\\1')])
| StarcoderdataPython |
3392171 | <reponame>Hacker-Davinci/Python_Automate_The_Boring_Stuff_Practice
### this is about the dictionary.
#let's practice a small project about storing birthdays.
## dictionary .key(), .value(), .item()
birthdays = {'Alice': 'Apr 1', 'Bob': 'Dec 12', 'Carol': 'Mar 4'}
while True:
print('Enter a name (blank to quit)')
name = input()
if name == '':
break
if name in birthdays:
print(birthdays[name] + " is the birthday of " + name)
else:
print("I don't have the name of " + name)
print("Can you tell me the birthday information of %s ?" %name)
bornday = input()
birthdays[name] = bornday
for keys in birthdays.keys():
print("%s %s" %(keys, birthdays[keys]))
for item in birthdays.items(): ## after print it, we can observe that .items return tuples.
print(item)
if 'Eric' in birthdays.keys(): ## we can check whether the key exists in it.
pass
if 'Eric' not in birthdays.keys(): ## we can check whether the key exists in it.
pass
## the usage of get() funciton
picnicitems = {'apples': 5, 'cups': 2}
print("I am bringing " + str(picnicitems.get('apples', 0)) + " apples")
print("I am bringing " + str(picnicitems.get('eggs', 0))+ " eggs")
## setdefault
spam = {'name': 'Pooka', 'age': 5}
spam.setdefault('color', 'black') # if the key doesn't exist, we create the item
print(spam) # else we don't do anything
spam['color'] = 'blue'
print(spam)
message = 'It was a bright cold day in April, and the clocks were striking thirteen.'
count = {}
for character in message:
count.setdefault(character, 0)
count[character] += 1
print(count)
import pprint # a beauty way to print the dictionary
pprint.pprint(count)
| StarcoderdataPython |
167711 | <filename>nets/chimney_cnn.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf
import tensorflow.contrib.slim as slim
model_params = {
'basic': ([0, 0, 0, 0], [16, 32, 64, 128]),
'test': ([0, 1, 2, 3, 2], [64, [64,128], [128,256], [256,512], [256,512]], 3, 16),
#'test': ([0, 2, 3, 4, 3], [64, [64,128], [128,256], [256,512], [256,512]], 3, 16),
#'test': ([0, 3, 4, 6, 3], [64, [64,128], [128,256], [256,512], [256,512]], 3, 16),
#'test': ([0, 0, 0, 0, 0], [64, [64,128], [128,256], [256,512], [256,512]], 3, 16),
'50': ([0, 3, 4, 6, 3], [64, [128,256], [256,512], [512,1024], [1024, 2048]], 7, 32),
'101': ([0, 3, 4, 23, 3], [64, [128,256], [256,512], [512,1024], [1024, 2048]], 7, 32),
'152': ([0, 3, 8, 36, 3], [64, [128,256], [256,512], [512,1024], [1024, 2048]], 7, 32),
}
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.995,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# force in-place updates of mean and variance estimates
'updates_collections': None,
# Moving averages ends up in the trainable variables collection
'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
}
batch_norm_params_last = {
# Decay for the moving averages.
'decay': 0.995,
# epsilon to prevent 0s in variance.
'epsilon': 10e-8,
# force in-place updates of mean and variance estimates
'center': False,
# not use beta
'scale': False,
# not use gamma
'updates_collections': None,
# Moving averages ends up in the trainable variables collection
'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
}
activation = tf.nn.relu
# Convolution with special initialization
def convolution(net, num_kernels, kernel_size, groups=1, stride=1, padding='SAME'):
assert num_kernels % groups == 0, '%d %d' % (kernel_size, groups)
stddev = math.sqrt(2/(kernel_size*kernel_size*num_kernels/groups))
if groups==1:
return slim.conv2d(net, num_kernels, kernel_size=kernel_size, stride=stride, padding=padding,
weights_initializer=tf.truncated_normal_initializer(stddev=stddev),
biases_initializer=None)
else:
num_kernels_split = int(num_kernels / groups)
input_splits = tf.split(net, groups, axis=3)
output_splits = [slim.conv2d(input_split, num_kernels_split,
kernel_size=kernel_size, stride=stride, padding=padding,
weights_initializer=tf.truncated_normal_initializer(stddev=stddev),
biases_initializer=None) for input_split in input_splits]
return tf.concat(output_splits, axis=3)
def residual_block(net, num_kernels, cardinality, stride=1, reuse=None, scope=None):
with tf.variable_scope(scope, 'block', [net], reuse=reuse):
net = convolution(net, num_kernels[0], kernel_size=1, groups=1, stride=1, padding='SAME')
net = convolution(net, num_kernels[0], kernel_size=3, groups=cardinality, stride=stride, padding='SAME')
print(net.shape)
with slim.arg_scope([slim.conv2d], activation_fn=None):
net = convolution(net, num_kernels[1], kernel_size=1, groups=1, stride=1, padding='SAME')
return net
def conv_module(net, num_res_layers, num_kernels, trans_kernel_size=3, trans_stride=2,
use_se=False, reuse=None, scope=None):
with tf.variable_scope(scope, 'conv', [net], reuse=reuse):
net = slim.conv2d(net, num_kernels,
kernel_size=trans_kernel_size, stride=trans_stride, padding='SAME',
weights_initializer=slim.xavier_initializer())
shortcut = net
for i in range(num_res_layers):
# num_kernels_sm = int(num_kernels / 2)
net = slim.conv2d(net, num_kernels, kernel_size=3, stride=1, padding='SAME',
weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
biases_initializer=None)
net = slim.conv2d(net, num_kernels, kernel_size=3, stride=1, padding='SAME',
weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
biases_initializer=None)
# net = slim.conv2d(net, num_kernels, kernel_size=1, stride=1, padding='SAME',
# weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
# biases_initializer=None)
print('| ---- block_%d' % i)
if use_se:
net = se_module(net)
net = net + shortcut
shortcut = net
return net
def branch(net, name='Branch', keep_probability=1.0, phase_train=True, bottleneck_layer_size=512,
weight_decay=1e-4, reuse=None, model_version=None):
with slim.arg_scope([slim.conv2d, slim.separable_conv2d, slim.fully_connected],
activation_fn=activation,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
with tf.variable_scope(name, [net], reuse=reuse):
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=phase_train):
print('[{}] input shape:'.format(name), [dim.value for dim in net.shape])
net = conv_module(net, 0, 64, scope='global_conv3')
print('[{}] conv_1 shape:'.format(name), [dim.value for dim in net.shape])
net = conv_module(net, 0, 128, scope='global_conv4')
print('[{}] conv_2 shape:'.format(name), [dim.value for dim in net.shape])
net = slim.avg_pool2d(net, net.get_shape()[1:3], scope='avgpool5')
feat = slim.flatten(net)
net = slim.fully_connected(feat, 1, scope='Bottleneck',
# weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
# weights_initializer=tf.constant_initializer(0.),
weights_initializer=slim.xavier_initializer(),
activation_fn=None, normalizer_fn=None)
return feat, net
def inference(images, keep_probability, phase_train=True, bottleneck_layer_size=512,
weight_decay=1e-4, reuse=None, model_version=None):
with slim.arg_scope([slim.conv2d, slim.separable_conv2d, slim.fully_connected],
activation_fn=activation,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
with tf.variable_scope('ResNeXt', [images], reuse=reuse):
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=phase_train):
## COMMON JOINT LEARNING
print('input shape:', [dim.value for dim in images.shape])
net = conv_module(images, 0, 16, scope='Common/global_conv1')
print('module_1 she:', [dim.value for dim in net.shape])
net = conv_module(net, 0, 32, scope='Common/global_conv2')
## BRANCHING JOINT LEARNING
feat_1 , score_1 = branch(net, 'AdversarialBranch')
feat_2 , score_2 = branch(net, 'DigitalBranch')
feat_3 , score_3 = branch(net, 'PhysicalBranch')
final_feat = tf.concat([feat_1, feat_2, feat_3], axis=-1)
net = slim.fully_connected(final_feat, 1, scope='Bottleneck',
# weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
# weights_initializer=tf.constant_initializer(0.),
weights_initializer=slim.xavier_initializer(),
activation_fn=None, normalizer_fn=None)
return [score_1, score_2, score_3, net], final_feat
| StarcoderdataPython |
171854 | """The Matrix bot component."""
import asyncio
import logging
import mimetypes
import os
import tempfile
import aiofiles
import aiofiles.os
import aiohttp
import ffmpeg
import homeassistant.components.notify as hanotify
import homeassistant.const as haconst
import homeassistant.helpers.config_validation as cv
import nio
import PIL
import voluptuous as vol
from markdown import markdown
from . import const
_LOGGER = logging.getLogger(__name__)
SESSION_FILE = ".matrix.conf"
CONF_HOMESERVER = "homeserver"
CONF_MARKDOWN = "markdown"
CONF_ROOMS = "rooms"
CONF_COMMANDS = "commands"
CONF_WORD = "word"
CONF_EXPRESSION = "expression"
CONF_CONVERSATION = "conversation"
ATTR_MARKDOWN = "markdown"
ATTR_NOTICE = "notice"
ATTR_FILE = "file"
ATTR_URL = "url"
EVENT_MATRIX_COMMAND = "matrix_command"
COMMAND_SCHEMA = vol.All(
vol.Schema(
{
vol.Exclusive(CONF_WORD, "trigger"): cv.string,
vol.Exclusive(CONF_EXPRESSION, "trigger"): cv.is_regex,
vol.Required(haconst.CONF_NAME): cv.string,
vol.Optional(CONF_ROOMS, default=[]): vol.All(cv.ensure_list, [cv.string]),
}
),
cv.has_at_least_one_key(CONF_WORD, CONF_EXPRESSION),
)
CONVERSATION_SCHEMA = vol.Schema(
{vol.Optional(CONF_ROOMS, default=[]): vol.All(cv.ensure_list, [cv.string]),}
)
CONFIG_SCHEMA = vol.Schema(
{
const.DOMAIN: vol.Schema(
{
vol.Required(CONF_HOMESERVER): cv.url,
vol.Optional(haconst.CONF_VERIFY_SSL, default=True): cv.boolean,
vol.Required(haconst.CONF_USERNAME): cv.matches_regex("@[^:]*:.*"),
vol.Required(haconst.CONF_PASSWORD): cv.string,
vol.Optional(CONF_ROOMS, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_COMMANDS, default=[]): [COMMAND_SCHEMA],
vol.Optional(CONF_CONVERSATION, default={}): CONVERSATION_SCHEMA,
}
)
},
extra=vol.ALLOW_EXTRA,
)
SERVICE_SCHEMA_SEND_MESSAGE = vol.Schema(
{
vol.Required(hanotify.ATTR_MESSAGE): cv.string,
vol.Required(hanotify.ATTR_TARGET): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_MARKDOWN, default=False): cv.boolean,
vol.Optional(ATTR_NOTICE, default=False): cv.boolean,
}
)
SERVICE_SCHEMA_SEND_MEDIA = vol.Schema(
{
vol.Optional(ATTR_FILE): cv.string,
vol.Optional(ATTR_URL): cv.string,
vol.Required(hanotify.ATTR_TARGET): vol.All(cv.ensure_list, [cv.string]),
}
)
async def async_setup(hass, config):
"""Set up the Matrix bot component."""
config = config[const.DOMAIN]
try:
bot = MatrixBot(
hass,
os.path.join(hass.config.path(), SESSION_FILE),
config[CONF_HOMESERVER],
config[haconst.CONF_VERIFY_SSL],
config[haconst.CONF_USERNAME],
config[haconst.CONF_PASSWORD],
config[CONF_ROOMS],
config[CONF_COMMANDS],
config[CONF_CONVERSATION],
)
# Start listener in the background
await bot.login()
hass.bus.async_listen_once(haconst.EVENT_HOMEASSISTANT_STOP, bot.close)
asyncio.create_task(bot.startup_and_listen())
hass.data[const.DOMAIN] = bot
except nio.ProtocolError as exception:
_LOGGER.error("Matrix failed to log in: %s", str(exception))
return False
hass.services.async_register(
const.DOMAIN,
const.SERVICE_SEND_MESSAGE,
bot.handle_send_message,
schema=SERVICE_SCHEMA_SEND_MESSAGE,
)
hass.services.async_register(
const.DOMAIN,
const.SERVICE_SEND_MEDIA,
bot.handle_send_image,
schema=SERVICE_SCHEMA_SEND_MEDIA,
)
_LOGGER.debug("Matrix component ready to use.")
return True
class MatrixBot:
"""Matrix bot."""
def __init__(
self,
hass,
config_file,
homeserver,
verify_ssl,
username,
password,
listening_rooms,
commands,
conversation,
):
"""Matrix bot.
Args:
hass: The homeassistant object
config_file: The path for the matrix bot (generated dynamically)
homeserver: The url for the matrix homeserver
verify_ssl: True if the bot should check the validity
of the SSL certificate otherwise False
username: The username of the bot (like: @bot:matrix.org)
password: <PASSWORD>
listening_room: The list of the rooms the bot should listen in
commands: COMMAND_SCHEMA like object from the configuration
conversation: CONVERSATION_SCHEMA like object from the
configuration
"""
self.hass = hass
self._session_filepath = config_file
self._homeserver = homeserver
self._verify_tls = verify_ssl
self._mx_id = username
self._password = password
self._device_id = "hamatrix"
self._listening_rooms = listening_rooms
self._commands = commands
self._conversation = conversation
self._callbacks = None
self._listening_room_ids = None
# We have to fetch the aliases for every room to make sure we don't
# join it twice by accident. However, fetching aliases is costly,
# so we only do it once per room.
self._aliases_fetched_for = {}
# Word commands are stored dict-of-dict: First dict indexes by room ID
# / alias, second dict indexes by the word
self._word_commands = {}
# Regular expression commands are stored as a list of commands per
# room, i.e., a dict-of-list
self._expression_commands = {}
# Configuration options for the AsyncClient
_client_config = nio.AsyncClientConfig(
max_limit_exceeded=0,
max_timeouts=0,
store_sync_tokens=True,
encryption_enabled=False,
)
# Initialize the matrix client
self._client = nio.AsyncClient(
self._homeserver,
self._mx_id,
device_id=self._device_id,
store_path=self._session_filepath + "_session",
config=_client_config,
)
async def get_listening_room_ids(self):
"""Return the room ids of the rooms the bot have to listen.
Returns:
A list of the room ids where the bot should listen.
"""
if self._listening_room_ids:
return self._listening_room_ids
self._listening_room_ids = []
for _room_id_or_alias in self._listening_rooms:
self._listening_room_ids.append(
await self.resolve_room_id(_room_id_or_alias)
)
return self._listening_room_ids
async def compute_commands(self):
"""Set up the variables for a different kind of command types."""
async def _set_word_command(_room_id, _command):
"""Set the word commands."""
_room_id = await self.resolve_room_id(_room_id)
if _room_id in self._conversation[CONF_ROOMS]:
return
if _room_id not in self._word_commands:
self._word_commands[_room_id] = {}
if len(_command[CONF_ROOMS]) > 0:
_room_id_list = []
for _room_id_or_alias in _command[CONF_ROOMS]:
_id = await self.resolve_room_id(_room_id_or_alias)
if _id not in self._conversation[CONF_ROOMS]:
_room_id_list.append(_id)
_command[CONF_ROOMS] = list(_room_id_list)
_LOGGER.debug("Word command: %s", str(_command))
self._word_commands[_room_id][_command[CONF_WORD]] = _command
async def _set_expression_command(_room_id, _command):
"""Set the expression commands."""
_room_id = await self.resolve_room_id(_room_id)
if (
self._conversation[CONF_ROOMS]
and _room_id in self._conversation[CONF_ROOMS]
):
return
if _room_id not in self._expression_commands:
self._expression_commands[_room_id] = []
if len(_command[CONF_ROOMS]) > 0:
_room_id_list = []
for _room_id_or_alias in _command[CONF_ROOMS]:
_id = await self.resolve_room_id(_room_id_or_alias)
if _id not in self._conversation[CONF_ROOMS]:
_room_id_list.append(_id)
_command[CONF_ROOMS] = list(_room_id_list)
_LOGGER.debug("Exp. command: %s", str(_command))
self._expression_commands[_room_id].append(_command)
# Compute the rooms the bot listens and sends everything to conversation
if self._conversation:
_LOGGER.debug("There is Conversation defined.")
if self._conversation.get(CONF_ROOMS):
_room_ids = []
for _room_id_or_alias in self._conversation[CONF_ROOMS]:
_room_ids.append(await self.resolve_room_id(_room_id_or_alias))
self._conversation[CONF_ROOMS] = _room_ids
_LOGGER.debug("Conversation: %s", str(self._conversation))
# Compute the rooms the bot should listen for particular expressions
for _command in self._commands:
if not _command.get(CONF_ROOMS):
for _room_id in await self.get_listening_room_ids():
if (
self._conversation
and _room_id not in self._conversation[CONF_ROOMS]
):
_command[CONF_ROOMS].append(_room_id)
if _command.get(CONF_WORD):
for _room_id in _command[CONF_ROOMS]:
await _set_word_command(_room_id, _command)
else:
for _room_id in _command[CONF_ROOMS]:
await _set_expression_command(_room_id, _command)
_LOGGER.debug("Word commands: %s", str(self._word_commands))
_LOGGER.debug("Expression commands: %s", str(self._expression_commands))
_LOGGER.debug("Conversation rooms: %s", str(self._conversation))
async def get_commands(self):
"""Get the defined commands for the Callbacks.
Returns:
A dict with the commands for different kinds for
a callback.
"""
return dict(
word_commands=self._word_commands,
expression_commands=self._expression_commands,
conversation=self._conversation[CONF_ROOMS],
)
async def login(self):
"""Login to Matrix."""
_LOGGER.debug("Login with %s", self._mx_id)
login_response = await self._client.login(
password=<PASSWORD>, device_name=self._device_id,
)
# Check if login failed
if isinstance(login_response, nio.LoginError):
_LOGGER.error("Failed to login: %s", login_response.message)
raise nio.ProtocolError
async def join_all_rooms(self):
"""Join all rooms if not already joined."""
_LOGGER.debug("Join all rooms if not already joined.")
for _room in self._listening_rooms:
_room_id = await self.resolve_room_id(_room)
await self.join_room_if_not_in(_room_id)
async def startup_and_listen(self):
"""Initialize the bot."""
await self.sync()
await self.join_all_rooms()
await self.sync()
await self.compute_commands()
await self.listen()
async def listen(self):
"""Make Matrix client listening for events in rooms."""
_LOGGER.debug("Add callbacks.")
self._callbacks = Callbacks(self.hass, self._client, await self.get_commands())
self._client.add_event_callback(self._callbacks.message, (nio.RoomMessageText,))
_LOGGER.debug("Listening forever on Matrix rooms...")
while True:
try:
await self._client.sync_forever(timeout=30000, full_state=True)
except (
nio.ProtocolError,
aiohttp.client_exceptions.ServerDisconnectedError,
) as exception:
_LOGGER.warning(
"Unable to connect to homeserver (%s), retrying in 15s...",
str(exception),
)
# Sleep so we don't bombard the server with login requests
await asyncio.sleep(15)
finally:
await self._client.close()
async def close(self, junk):
"""Close the client connection."""
_LOGGER.debug("Matrix connection closed.")
await self._client.close()
async def sync(self):
"""Sync the state."""
_LOGGER.debug("Syncing...")
await self._client.sync()
_LOGGER.debug("Syncing... Done.")
async def handle_send_message(self, service):
"""Handle the send messages to the rooms."""
_LOGGER.debug("Sending message to %s", str(service.data[hanotify.ATTR_TARGET]))
for _room_id_or_alias in service.data[hanotify.ATTR_TARGET]:
_room_id = await self.resolve_room_id(_room_id_or_alias)
if _room_id:
await self.join_room_if_not_in(_room_id)
await self.send_text_to_room(
_room_id,
service.data[hanotify.ATTR_MESSAGE],
markdown_convert=service.data[ATTR_MARKDOWN],
notice=service.data[ATTR_NOTICE],
)
async def handle_send_image(self, service):
"""Handle the send image to the rooms."""
_LOGGER.debug("Sending image to %s", str(service.data[hanotify.ATTR_TARGET]))
if ATTR_URL in service.data:
_file_path = tempfile.NamedTemporaryFile(delete=False).name
async with aiohttp.ClientSession() as session:
async with session.get(service.data[ATTR_URL]) as _resp:
if _resp.status == 200:
file_object = await aiofiles.open(_file_path, mode="wb")
await file_object.write(await _resp.read())
await file_object.close()
else:
_LOGGER.warning(
"Downloading the url %s failed with response code: %s",
str(service.data[ATTR_URL]),
str(_resp.status),
)
return
else:
_file_path = service.data[ATTR_FILE]
_room_ids = []
for _room_id_or_alias in service.data[hanotify.ATTR_TARGET]:
_room_id = await self.resolve_room_id(_room_id_or_alias)
if _room_id:
_room_ids.append(_room_id)
await self.join_room_if_not_in(_room_id)
await self.send_image_to_rooms(_room_ids, _file_path)
if ATTR_URL in service.data:
try:
os.unlink(_file_path)
except OSError as exception:
_LOGGER.warning(
"The deletion of %s failed. (%s)", str(_file_path), str(exception)
)
async def resolve_room_id(self, room_id_or_alias):
"""Resolve the room id if we put in a room alias.
Returns:
Returns the room id for the alias/id or
False if there is no match
"""
if room_id_or_alias.startswith("#"):
# This is an alias (first character is #)
if room_id_or_alias in self._aliases_fetched_for.keys():
_LOGGER.debug("Room ID fetched from cache.")
return self._aliases_fetched_for[room_id_or_alias]
_LOGGER.debug("Resolv room id from room alias: %s", room_id_or_alias)
room_id = await self._client.room_resolve_alias(room_id_or_alias)
if not isinstance(room_id, nio.RoomResolveAliasResponse):
_LOGGER.error("The room id can't be found: %s", str(room_id))
return False
room_id = room_id.room_id
self._aliases_fetched_for[room_id_or_alias] = room_id
_LOGGER.debug("The resolved room id is: %s", str(room_id))
elif room_id_or_alias.startswith("!"):
# This is a room id (first character is !)
room_id = room_id_or_alias
else:
_LOGGER.error(
"This doesn't look like a valid room id or alias: %s",
str(room_id_or_alias),
)
return False
return room_id
async def join_room_if_not_in(self, room_id):
"""Join rooms.
If the bot is not in the room already, then
join the bot in the room.
"""
_joined_rooms = await self._client.joined_rooms()
_LOGGER.debug("Joined rooms: %s", str(_joined_rooms.rooms))
if room_id not in _joined_rooms.rooms:
_LOGGER.debug("Joining to room: %s", str(room_id))
_response = await self._client.join(room_id)
if not isinstance(_response, nio.JoinResponse):
_LOGGER.error("Unable to join to the room: %s", str(_response))
return False
_LOGGER.debug("Joined into the room: %s", str(room_id))
async def send_text_to_room(
self,
room_id: str,
message: str,
markdown_convert: bool = False,
notice: bool = False,
reply_to_event_id: str = None,
):
"""Send text to a matrix room.
Args:
client: The client to communicate to matrix with.
room_id: The ID of the room to send the message to.
message: The message content.
markdown_convert: Whether to convert the message content to markdown.
Defaults to true.
notice: Whether the message should be sent with an "m.notice" message type
(will not ping users).
reply_to_event_id: Whether this message is a reply to another event. The event
ID this is message is a reply to.
Returns:
A RoomSendResponse if the request was successful, else an ErrorResponse.
"""
_LOGGER.debug("Send message to %s", room_id)
# Determine whether to ping room members or not
_msgtype = "m.notice" if notice else "m.text"
_content = {
"msgtype": _msgtype,
"format": "org.matrix.custom.html",
"body": message,
}
if markdown_convert:
_content["formatted_body"] = markdown(message)
if reply_to_event_id:
_content["m.relates_to"] = {
"m.in_reply_to": {"event_id": reply_to_event_id}
}
try:
_response = await self._client.room_send(
room_id, "m.room.message", _content, ignore_unverified_devices=True,
)
if not isinstance(_response, nio.RoomSendResponse):
_LOGGER.error("Unable to send message response: %s", str(_response))
return False
_LOGGER.debug("Response: %s", str(_response))
except nio.SendRetryError:
_LOGGER.error("Unable to send message response to %s", str(room_id))
async def send_image_to_rooms(self, room_ids, image):
"""Process image.
Arguments:
---------
room_id : list of room_ids
image : str
file name of image from --image argument
caption : str of the caption text
This is a working example for a JPG image.
It can be viewed or downloaded from:
https://matrix.example.com/_matrix/media/r0/download/
example.com/SomeStrangeUriKey
{
"type": "m.room.message",
"sender": "@someuser:example.com",
"content": {
"body": "someimage.jpg",
"info": {
"size": 5420,
"mimetype": "image/jpeg",
"thumbnail_info": {
"w": 100,
"h": 100,
"mimetype": "image/jpeg",
"size": 2106
},
"w": 100,
"h": 100,
"thumbnail_url": "mxc://example.com/SomeStrangeThumbnailUriKey"
},
"msgtype": "m.image",
"url": "mxc://example.com/SomeStrangeUriKey"
},
"origin_server_ts": 12345678901234576,
"unsigned": {
"age": 268
},
"event_id": "$skdhGJKhgyr548654YTr765Yiy58TYR",
"room_id": "!JKHgyHGfytHGFjhgfY:example.com"
}
"""
if not room_ids:
_LOGGER.warning(
"No rooms are given. This should not happen. This image is being dropped and NOT sent."
)
return
if not os.path.isfile(image):
_LOGGER.warning(
"Image file %s is not a file. Doesn't exist or "
"is a directory."
"This image is being dropped and NOT sent.",
str(image),
)
return
# 'application/pdf' "image/jpeg"
width = None
height = None
thumbnail_info = None
thumbnail_url = None
mime_type = mimetypes.guess_type(image)[0]
if mime_type.startswith("image/"):
media_type = "image"
image_object = PIL.Image.open(image)
(
width,
height,
) = image_object.size # image_object.size returns (width,height) tuple
elif mime_type.startswith("video/"):
media_type = "video"
probe = ffmpeg.probe(image)
video_stream = next(
(
stream
for stream in probe["streams"]
if stream["codec_type"] == "video"
),
None,
)
width = int(video_stream["width"])
height = int(video_stream["height"])
thumb = f"{image}_matrix_thumbnail.jpg"
(
ffmpeg.input(image, ss=1)
.output(thumb, vframes=1)
.overwrite_output()
.run(capture_stdout=True, capture_stderr=True)
)
file_stat = await aiofiles.os.stat(thumb)
async with aiofiles.open(thumb, "r+b") as file_object:
resp, maybe_keys = await self._client.upload(
file_object,
content_type="image/jpeg", # image/jpeg
filename=os.path.basename(thumb),
filesize=file_stat.st_size,
)
thumbnail_info = {
"h": height,
"w": width,
"size": file_stat.st_size,
"mimetype": "image/jpeg",
}
thumbnail_url = resp.content_uri
os.remove(thumb)
else:
_LOGGER.warning(
"Image file %s does not have an image mime type. "
"Should be something like image/jpeg. "
"Found mime type %s. "
"This image is being dropped and NOT sent.",
str(image),
str(mime_type),
)
return
# first do an upload of image, see upload() documentation
# http://matrix-nio.readthedocs.io/en/latest/nio.html#nio.AsyncClient.upload
# then send URI of upload to room
file_stat = await aiofiles.os.stat(image)
async with aiofiles.open(image, "r+b") as file_object:
resp, maybe_keys = await self._client.upload(
file_object,
content_type=mime_type, # image/jpeg
filename=os.path.basename(image),
filesize=file_stat.st_size,
)
del maybe_keys
if isinstance(resp, nio.UploadResponse):
_LOGGER.debug(
"Image was uploaded successfully to server. " "Response is: %s",
str(resp),
)
else:
_LOGGER.warning(
"The bot failed to upload. "
"Please retry. This could be temporary issue on "
"your server. "
"Sorry."
)
_LOGGER.warning(
'file="%s"; mime_type="%s"; ' 'filessize="%s"' "Failed to upload: %s",
str(image),
str(mime_type),
str(file_stat.st_size),
str(repr),
)
content = {
"body": os.path.basename(image), # descriptive title
"info": {
"size": file_stat.st_size,
"mimetype": mime_type,
"thumbnail_info": thumbnail_info,
"w": width, # width in pixel
"h": height, # height in pixel
"thumbnail_url": thumbnail_url,
},
"msgtype": f"m.{media_type}",
"url": resp.content_uri,
}
for _room_id in room_ids:
try:
await self._client.room_send(
_room_id, message_type="m.room.message", content=content
)
_LOGGER.debug(
'This image file was sent: "%s" to room "%s".',
str(image),
str(_room_id),
)
except nio.ProtocolError as exception:
_LOGGER.warning(
"Image send of file %s failed. Sorry. (%s)",
str(image),
str(exception),
)
class Callbacks:
"""Callbacks to handle messages from the room."""
def __init__(self, hass, client: nio.AsyncClient, commands):
"""
Callbacks to handle messages from the room.
Args:
client: nio client used to interact with matrix.
"""
_LOGGER.debug("Matrix Callbacks Class.")
self.hass = hass
self._client = client
self._expression_commands = commands["expression_commands"]
self._word_commands = commands["word_commands"]
self._conversation = commands["conversation"]
async def message(self, room: nio.MatrixRoom, event: nio.RoomMessageText) -> None:
"""
Message event Callback.
Args:
room: The room the event came from.
event: The event.
"""
# Extract the message text
_msg = event.body
_room_id = room.room_id
_LOGGER.debug("Received a message: %s in room: %s", str(_msg), str(_room_id))
# Ignore messages from ourselves
if event.sender == self._client.user:
return
if _room_id in self._conversation:
# This message need to be delivered to conversation service
_LOGGER.debug("Type conversation.")
_response = await self.hass.services.async_call(
"conversation", "process", service_data=dict(text=_msg), blocking=True
)
_LOGGER.debug("Response: %s", str(_response))
return
if _msg[0] == "!":
# Could trigger a single-word command
_LOGGER.debug("Type word: %s", str(_msg))
pieces = _msg.split(" ")
cmd = pieces[0][1:]
command = self._word_commands.get(_room_id, {}).get(cmd)
if command:
event_data = {
"command": command[haconst.CONF_NAME],
"sender": event.sender,
"room": _room_id,
"args": pieces[1:],
}
self.hass.bus.fire(EVENT_MATRIX_COMMAND, event_data)
# After single-word commands, check all regex commands in the room
for command in self._expression_commands.get(_room_id, []):
_LOGGER.debug("Type expression: %s", str(command[haconst.CONF_NAME]))
match = command[CONF_EXPRESSION].match(_msg)
if not match:
continue
event_data = {
"command": command[haconst.CONF_NAME],
"sender": event.sender,
"room": _room_id,
"args": match.groupdict(),
}
self.hass.bus.fire(EVENT_MATRIX_COMMAND, event_data)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.