id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
5156797
|
"""
j2lsnek, a python-based list server for Jazz Jackrabbit 2
By Stijn (https://stijn.chat)
Thanks to DJazz for a reference implementation and zepect for some misc tips.
"""
import urllib.request
import urllib.error
import subprocess
import importlib
import logging
import sqlite3
import socket
import json
import time
import sys
import os
from logging.handlers import RotatingFileHandler
import config
import helpers.servernet
import helpers.functions
import helpers.listener
import helpers.interact
import helpers.serverpinger
import helpers.webhooks
import helpers.jj2
class listserver:
"""
Main list server thread
Sets up port listeners and broadcasts data to connected mirror list servers
"""
looping = True # if False, will exit
sockets = {} # sockets the server is listening it
mirrors = [] # ServerNet connections
last_ping = 0 # last time this list server has sent a ping to ServerNet
last_sync = 0 # last time this list server asked for a full sync
reboot_mode = "quit" # "quit" (default), "restart" (reload everything), or "reboot" (restart complete list server)
banlist = {}
def __init__(self):
"""
Sets up the database connection, logging, and starts port listeners
"""
self.start = int(time.time())
self.address = socket.gethostname()
# initialise logger
self.log = logging.getLogger("j2lsnek")
self.log.setLevel(logging.INFO)
# first handler: output to console, only show warnings (i.e. noteworthy messages)
console = logging.StreamHandler()
console.setLevel(logging.WARNING)
console.setFormatter(logging.Formatter("%(asctime)-15s | %(message)s", "%d-%m-%Y %H:%M:%S"))
self.log.addHandler(console)
# second handler: rotating log file, max 5MB big, log all messages
handler = RotatingFileHandler("j2lsnek.log", maxBytes=5242880, backupCount=1)
handler.setLevel(logging.INFO)
handler.setFormatter(logging.Formatter("%(asctime)-15s | %(message)s", "%d-%m-%Y %H:%M:%S"))
self.log.addHandler(handler)
# third and fourth handlers (optional): webhook handlers
if config.WEBHOOK_DISCORD:
handler = helpers.webhooks.DiscordLogHandler(config.WEBHOOK_DISCORD, self.address)
handler.setLevel(logging.ERROR)
self.log.addHandler(handler)
if config.WEBHOOK_SLACK:
handler = helpers.webhooks.SlackLogHandler(config.WEBHOOK_SLACK, self.address)
handler.setLevel(logging.WARN)
self.log.addHandler(handler)
# try to get own IP
try:
self.ip = json.loads(str(urllib.request.urlopen("http://httpbin.org/ip", timeout=5).read().decode("ascii", "ignore")))["origin"]
except (ValueError, urllib.error.URLError, socket.timeout) as e:
self.log.info("Could not retrieve own IP via online API - guessing")
self.ip = helpers.functions.get_own_ip() # may be wrong, but best we got
# say hello
os.system("cls" if os.name == "nt" else "clear") # clear screen
print("\n .-=-. .--.")
print(" __ .' s n '. / \" )")
print(" _ .' '. / l .-. e \ / .-'\\")
print(" ( \ / .-. \ / 2 / \ k \ / / |\\ ssssssssssssss")
print(" \ `-` / \ `-' j / \ `-` /")
print(" `-.-` '.____.' `.____.'\n")
self.log.warning("Starting list server! This one's name is: %s (%s)" % (self.address, self.ip))
print("Current time: %s" % time.strftime("%d-%M-%Y %H:%M:%S"))
print("Enter 'q' to quit (q + enter).")
print("")
self.prepare_database()
# let other list servers know we're live and ask them for the latest
self.broadcast(action="request", data=[{"from": self.address}])
# only listen on port 10059 if auth mechanism is available
# check if certificates are available for auth and encryption of port 10059 traffic
can_auth = os.path.isfile(config.CERTFILE) and os.path.isfile(config.CERTKEY) and os.path.isfile(
config.CERTCHAIN)
ports = [10053, 10054, 10055, 10056, 10057, 10058, 10059]
if not can_auth:
ports.remove(10059)
self.log.warning("Not listening on port 10059 as SSL certificate authentication is not available")
# "restart" to begin with, then assume the script will quit afterwards. Value may be modified back to
# "restart" in the meantime, which will cause all port listeners to re-initialise when listen_to finishes
self.reboot_mode = "restart"
while self.reboot_mode == "restart":
self.reboot_mode = "quit"
self.looping = True
self.listen_to(ports)
# restart script if that mode was chosen
if self.reboot_mode == "reboot":
if os.name == "nt":
from subprocess import Popen
import signal
p = Popen([sys.executable] + sys.argv)
signal.signal(signal.SIGINT, signal.SIG_IGN)
p.wait()
sys.exit(p.returncode)
else:
interpreter = sys.executable.split("/")[-1]
os.execvp(sys.executable, [interpreter] + sys.argv)
def listen_to(self, ports):
"""
Set up threaded listeners at given ports
:param ports: A list of ports to listen at
:return: Nothing
"""
self.log.info("Opening port listeners...")
for port in ports:
self.sockets[port] = helpers.listener.port_listener(port=port, ls=self)
self.sockets[port].start()
self.log.info("Listening.")
print("Port listeners started.")
# have a separate thread wait for input so this one can go on sending pings every so often
poller = helpers.interact.key_poller(ls=self)
poller.start()
# have a separate thread ping servers every so often
pinger = helpers.serverpinger.pinger(ls=self)
pinger.start()
while self.looping:
current_time = int(time.time())
if self.last_ping < current_time - 120:
# let other servers know we're still alive
self.broadcast(action="ping", data=[{"from": self.address}])
self.last_ping = current_time
if self.last_sync < current_time - 900:
# ask for sync from all servers - in case we missed any servers being listed
self.broadcast(action="request", data=[{"from": self.address, "fragment": "servers"}])
self.last_sync = current_time
time.sleep(config.MICROSLEEP)
self.log.warning("Waiting for listeners to finish...")
for port in self.sockets:
self.sockets[port].halt()
for port in self.sockets:
self.sockets[port].join()
pinger.halt()
pinger.join()
self.log.info("j2lsnek succesfully shut down.")
print("Bye!")
return
def broadcast(self, action, data, recipients=None, ignore=None):
"""
Send data to servers connected via ServerNET
:param action: Action with which to call the API
:param data: Data to send
:param recipients: List of IPs to send to, will default to all known mirrors
:param ignore: List of IPs *not* to send to
:return: Nothing
"""
if not self.looping:
return False # shutting down
data = json.dumps({"action": action, "data": data, "origin": self.address})
if not recipients:
recipients = helpers.functions.all_mirrors()
if ignore is None:
ignore = []
for ignored in ignore:
if ignored in recipients:
recipients.remove(ignored)
transmitters = {}
for mirror in recipients:
if mirror == "localhost" or mirror == "127.0.0.1" or mirror == self.ip:
continue # may be a mirror but should never be sent to because it risks infinite loops
transmitters[mirror] = helpers.servernet.broadcaster(ip=mirror, data=data, ls=self)
transmitters[mirror].start()
return
def halt(self):
"""
Halt program execution
Sets self.looping to False, which ends the main loop and allows the thread to start halting other threads.
:return:
"""
self.looping = False
def prepare_database(self):
"""
Creates database tables if they don't exist yet
No lock is required for the database action since no other database shenanigans should be going on at this point
as this is before threads get started
:return: result of connection.commit()
"""
dbconn = sqlite3.connect(config.DATABASE)
dbconn.row_factory = sqlite3.Row
db = dbconn.cursor()
# servers is emptied on restart, so no harm in recreating the table (just in case any columns were added/changed)
db.execute("DROP TABLE IF EXISTS servers")
db.execute(
"CREATE TABLE servers (id TEXT UNIQUE, ip TEXT, port INTEGER, created INTEGER DEFAULT 0, lifesign INTEGER DEFAULT 0, last_ping INTEGER DEFAULT 0, private INTEGER DEFAULT 0, remote INTEGER DEFAULT 0, origin TEXT, version TEXT DEFAULT '1.00', plusonly INTEGER DEFAULT 0, mode TEXT DEFAULT 'unknown', players INTEGER DEFAULT 0, max INTEGER DEFAULT 0, name TEXT, prefer INTEGER DEFAULT 0)")
try:
db.execute("SELECT * FROM settings")
except sqlite3.OperationalError:
self.log.info("Table 'settings' does not exist yet, creating and populating.")
db.execute("CREATE TABLE settings (item TEXT UNIQUE, value TEXT)")
db.execute("INSERT INTO settings (item, value) VALUES (?, ?), (?, ?), (?, ?)", ("motd", "", "motd-updated", "0", "motd-expires", int(time.time()) + (3 * 86400)))
# was not a setting initially, so may need to add entry
setting = db.execute("SELECT * FROM settings WHERE item = ?", ("motd-expires", )).fetchone()
if not setting:
db.execute("INSERT INTO settings (item, value) VALUES (?, ?)", ("motd-expires", int(time.time()) + (3 * 86400)))
try:
db.execute("SELECT * FROM banlist").fetchall()
except sqlite3.OperationalError:
self.log.info("Table 'banlist' does not exist yet, creating.")
db.execute("CREATE TABLE banlist (address TEXT, type TEXT, note TEXT, origin TEXT, reserved TEXT DEFAULT '')")
try:
db.execute("SELECT reserved FROM banlist")
except sqlite3.OperationalError:
db.execute("ALTER TABLE banlist ADD COLUMN reserved TEXT DEFAULT ''")
try:
db.execute("SELECT * FROM mirrors")
except sqlite3.OperationalError:
self.log.info("Table 'mirrors' does not exist yet, creating.")
db.execute("CREATE TABLE mirrors (name TEXT, address TEXT, lifesign INTEGER DEFAULT 0)")
try:
master_fqdn = "list.jj2.plus"
master = socket.gethostbyname(master_fqdn)
if master != self.address: # don't add if *this* server has that hostname
self.log.info("Adding %s as mirror" % master_fqdn)
db.execute("INSERT INTO mirrors (name, address) VALUES (?, ?)", (master_fqdn, master))
except socket.gaierror:
self.log.error("Could not retrieve IP for %s - no master list server available!" % master_fqdn)
# if this method is run, it means the list server is restarted, which breaks all open connections, so clear all
# servers and such - banlist will be synced upon restart
db.execute("DELETE FROM banlist WHERE origin != ?", (self.address, ))
db.execute("DELETE FROM servers")
result = dbconn.commit()
db.close()
dbconn.close()
return result
def reload(self, mode=1):
"""
Reload list server
Depending on the mode, the configuration is reload; modules are re-imported; or the list server is shut down and
completely restarted.
:param mode: "reload" (config only), "restart" (reload modules), "reboot" (restart list server
:return:
"""
if mode == 2 or mode == 3:
self.log.warning("Pulling latest code from github...")
subprocess.call("git reset HEAD --hard".split(" "))
subprocess.call("git pull origin master".split(" "))
if mode == 2:
self.log.warning("Reloading modules...")
importlib.reload(helpers.servernet)
importlib.reload(helpers.functions)
importlib.reload(helpers.listener)
importlib.reload(helpers.jj2)
self.reboot_mode = "restart"
self.halt()
elif mode == 3:
self.log.warning("Restarting list server...")
self.reboot_mode = "reboot"
self.halt()
else:
self.log.warning("Reloading configuration...")
importlib.reload(config)
def bridge(self):
"""
Mirror server data from another list
For testing purposes only
:return:
"""
listserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listserver.settimeout(4)
listserver.connect(("172.16.31.10", 10057))
buffer = ""
while True:
try:
add = listserver.recv(1024).decode("ascii", "ignore")
except UnicodeDecodeError:
break
if not add or add == "":
break
buffer += add
servers = buffer.split("\n")
payload = []
for server in servers:
try:
bits = server.split(" ")
if len(bits) < 9:
continue
key = bits[0]
ip = bits[0].split(":")[0]
port = bits[0].split(":")[1]
private = 1 if bits[2] == "private" else 0
mode = bits[3]
version = bits[4]
rest = " ".join(bits[7:]) if bits[7] != " " else " ".join(bits[8:])
bits = rest.split(" ")
created = int(time.time()) - int(bits[0])
players = int(bits[1][1:-1].split("/")[0])
max_players = int(bits[1][1:-1].split("/")[1])
name = " ".join(bits[2:]).strip()
data = {"id": key, "ip": ip, "port": port, "created": created, "lifesign": int(time.time()),
"private": private, "remote": 1, "origin": self.address, "version": version, "mode": mode,
"players": players, "max": max_players, "name": name}
payload.append(data)
srv = helpers.jj2.jj2server(key)
for item in data:
if item != "id":
srv.set(key, data[key])
except ValueError:
continue
self.broadcast(action="server", data=payload)
self.log.warning("Retrieved server data from external list")
listserver.shutdown(socket.SHUT_RDWR)
listserver.close()
listserver() # all systems go
|
StarcoderdataPython
|
101011
|
<filename>lab_exercise_05/lab_exercise_05_solution.py
# LAB EXERCISE 05
print('Lab Exercise 05 \n')
# SETUP
pop_tv_shows = [
{"Title": "WandaVision", "Creator": ["<NAME>"], "Rating": 8.2, "Genre": "Action"},
{"Title": "Attack on Titan", "Creator": ["<NAME>"], "Rating": 8.9, "Genre": "Animation"},
{"Title": "Bridgerton", "Creator": ["<NAME>"], "Rating": 7.3, "Genre": "Drama"},
{"Title": "Game of Thrones", "Creator": ["<NAME>", "<NAME>"], "Rating": 9.3, "Genre": "Action"},
{"Title": "The Mandalorian", "Creator": ["<NAME>"], "Rating": 8.8, "Genre": "Action"},
{"Title": "The Queen's Gambit", "Creator": ["<NAME>", "<NAME>"], "Rating": 8.6, "Genre": "Drama"},
{"Title": "Schitt's Creek", "Creator": ["<NAME>", "<NAME>"], "Rating": 8.5, "Genre": "Comedy"},
{"Title": "The Equalizer", "Creator": ["<NAME>", "<NAME>"], "Rating": 4.3, "Genre": "Action"},
{"Title": "Your Honor", "Creator": ["<NAME>"], "Rating": 7.9, "Genre": "Crime"},
{"Title": "<NAME>", "Creator": ["<NAME>", "<NAME>", "<NAME>"] , "Rating": 8.6, "Genre": "Action"}
]
# END SETUP
# Problem 01 (4 points)
print('/nProblem 01')
action_shows = []
for show in pop_tv_shows:
if show['Genre'] == 'Action':
action_shows.append(show['Title'])
print(f'Action show list:{action_shows}')
# Problem 02 (4 points)
print('/nProblem 02')
high_rating = 0
highest_rated_show = None
for show in pop_tv_shows:
if show["Rating"] > high_rating:
high_rating = show["Rating"]
highest_rated_show = show["Title"]
print(f'Highest rated show is {highest_rated_show} with a rating of {high_rating}')
# Problem 03 (4 points)
print('/nProblem 03')
low_rating = 10
lowest_rated_show = None
for show in pop_tv_shows:
if show["Rating"] < low_rating and show['Genre'] != "Action":
low_rating = show["Rating"]
lowest_rated_show = show["Title"]
print(f'Lowest rated non-action show is {lowest_rated_show} with a rating of {low_rating}')
# Problem 04 (4 points)
print('/nProblem 04')
multiple_creators = []
for show in pop_tv_shows:
if len(show["Creator"]) > 1:
multiple_creators.append(show["Title"])
print(f'Show with multiple creators: {multiple_creators}')
# Problem 05 (4 points)
print('/nProblem 05')
show_genre = []
for show in pop_tv_shows:
if show['Genre'] not in ["Action", "Drama"] or show["Rating"] >= 9:
item = {'Title': show['Title'], 'Genre': show['Genre']}
show_genre.append(item)
print(f'Show and genre: {show_genre}')
|
StarcoderdataPython
|
6649485
|
from numpy import arange, setdiff1d
from numpy.random import choice
class batchSampler:
def __init__(self, indices, train_keys, bsize, shuffle = False):
self.indices = indices
self.train_keys = train_keys
self.bsize = bsize
self.shuffle = shuffle
def __iter__(self):
if self.shuffle:
indices = choice(self.indices, size = len(self.indices), replace = False)
else:
indices = self.indices
minibatch_idx = []
bool_idx = []
for idx in indices:
minibatch_idx.append(idx)
bool_idx.append(sum([int(idx >= x) for x in self.train_keys]))
if len(minibatch_idx) >= self.bsize:
yield minibatch_idx, bool_idx
minibatch_idx, bool_idx = [], []
if minibatch_idx:
yield minibatch_idx, bool_idx
def __len__(self):
return len(self.indices)
def build_trainSamplers(adata, n_train, bsize = 128, val_frac = 0.1):
num_val = round(val_frac * len(adata))
assert num_val >= 1
idx = arange(len(adata))
val_idx = choice(idx, num_val, replace = False)
train_indices, val_indices = setdiff1d(idx, val_idx).tolist(), val_idx.tolist()
train_sampler = batchSampler(train_indices, n_train, bsize, shuffle = True)
val_sampler = batchSampler(val_indices, n_train, bsize)
return train_sampler, val_sampler
def build_testSampler(adata, train_keys, bsize = 128):
indices = list(range(len(adata)))
return batchSampler(indices, train_keys, bsize)
|
StarcoderdataPython
|
6457364
|
from sys import executable
from subprocess import check_output, CalledProcessError
from os import listdir
def run_script(script_path, input_str):
file_ext = script_path.suffix
script_runner = script_types.get(file_ext, None)
if script_runner is None:
raise ValueError('Not a valid filetype: ' + '"' + file_ext + '"')
else:
return script_runner(script_path, input_str)
def run_python(script_path, input_str):
output = check_output([executable, str(script_path.resolve())],
input=input_str,
universal_newlines=True)
return output
def run_cpp(cpp_path, input_str):
# Compile using gcc
absolute_path = cpp_path.resolve()
try:
# Match Kattis configuration
check_output(['g++', str(absolute_path), '-O2', '--std=gnu++17', '-o', str(cpp_path.parent / 'problem.exe')])
except CalledProcessError as e:
raise ValueError(e.output)
# Run binary with parameters
output = check_output([str(cpp_path.parent / 'problem.exe')],
input=input_str,
universal_newlines=True)
(cpp_path.parent / 'problem.exe').unlink()
return output
def script_paths(root, problem_names):
"""
Returns a list of pairs (problem_name, path)
:param root: A path object directing to root
:param problem_names: An array of problem names to fetch the path of
:return:
"""
problem_files = {}
p = root.glob('**/*')
dir_files = [str(x) for x in p if x.is_file()]
for problem_name in problem_names:
for filename in dir_files:
if problem_name in filename:
problem_path = root / filename
if problem_path.suffix in script_types.keys():
problem_files[problem_name] = problem_files.get(problem_name, []) + [problem_path]
return problem_files
script_types = {
'.py': run_python,
'.cpp': run_cpp
}
|
StarcoderdataPython
|
4820427
|
# -*- coding: utf-8 -*-
# @Time : 2019/3/30 9:57 PM
# @Author : 章光辉
# @FileName: amap.py
# @Software: PyCharm
from multiprocessing import Manager
from multiprocessing.pool import ThreadPool
from time import sleep
import logging
import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from urllib3.exceptions import HTTPError
import numpy as np
import pandas as pd
session = requests.session()
retry = Retry(connect=3, backoff_factor=0.5)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
logger = logging.getLogger(__name__)
class AMAPCrawler:
def __init__(self, keys: (list, tuple, str) = None):
if keys is None:
self.keys = ['87a08092f3e9c212e6f06e6327d9f385']
else:
if isinstance(keys, str):
keys = [keys]
self.keys = keys
def geocode(self, address):
"""
单个地址的地理编码
:param address:地址,类型为字符串
:return:坐标
"""
result = {
'location': None,
'province': None,
'city': None,
'district': None,
'count': 0,
'address': address,
'lng': None,
'lat': None,
}
try:
url = 'https://restapi.amap.com/v3/geocode/geo'
params = {
'address': address,
'key': np.random.choice(self.keys),
}
res = _request(url, params)
count = int(res.get('count'))
if count:
first_poi = res.get('geocodes')[0]
location = tuple([float(i) for i in first_poi.get('location').split(',')])
province = first_poi.get('province') or ''
city = first_poi.get('city') or ''
district = first_poi.get('district') or ''
result.update({
'location': location,
'province': province,
'city': city,
'district': district,
'count': count,
'lng': location[0],
'lat': location[1],
})
else:
logger.warning(f'geocode高德API没有返回结果: {address}')
except Exception as e:
logger.warning('geocode', address, e)
return result
def regeocode(self, location_or_lng: (tuple, list, str, float, int), lat: (float, int) = None):
"""
单个坐标的逆地理编码
:param location_or_lng:坐标,类型为字符串
:param lat:坐标,类型为字符串
:return:坐标
"""
if lat is None:
if isinstance(location_or_lng, (list, tuple)):
location = location_or_lng
elif isinstance(location_or_lng, str):
location = tuple([float(i) for i in location_or_lng.split(',')])
else:
raise ValueError('坐标类型错误')
else:
location = (location_or_lng, lat)
result = {
'address': None,
'province': None,
'city': None,
'district': None,
'lng': None,
'lat': None,
'location': None,
'adcode': None,
}
try:
result.update({
'lng': location[0],
'lat': location[1],
'location': location,
})
url = 'https://restapi.amap.com/v3/geocode/regeo'
params = {
'location': f'{location[0]},{location[1]}',
'key': np.random.choice(self.keys),
}
res = _request(url, params)
regeo_codes = res.get('regeocode')
if regeo_codes:
address = regeo_codes.get('formatted_address')
addressComponent = regeo_codes.get('addressComponent', {})
province = addressComponent.get('province')
city = addressComponent.get('city') or province
district = addressComponent.get('district')
adcode = addressComponent.get('adcode')
result.update({
'address': address,
'province': province,
'city': city,
'district': district,
'adcode': adcode,
})
else:
logger.error(f'regeocode高德API没有返回结果: {location}')
except Exception as e:
logger.error(location, e)
return result
def __batch_geocode_new(self, location_list):
"""
批量火星坐标(GCJ-02)逆解析为地址
:param location_list: 坐标列表,每个元素的类型均为字符串
:return:地址列表
"""
address_list = run_thread_pool(location_list, self.regeocode, 10, split_params=False)
columns_rename = {
'province': 'provinces',
'city': 'cities',
'district': 'districts',
'address': 'addresses',
'adcode': 'adcodes',
}
results = pd.DataFrame(address_list).rename(columns=columns_rename).to_dict('list')
return results
def __batch_geocode_new_new(self, list_location_str, pool_size=10, step=400):
"""
批量坐标逆解析(坐标一定要是有效的),每400个坐标合并成一组参数
:param list_location_str: 坐标字符串构成的列表
:param pool_size: 线程池大小
:param step: 批量接口上限
:return:
"""
list_params = [list_location_str[i:(i + step)] for i in range(0, len(list_location_str), step)]
results = run_thread_pool(list_params, self.__batch_400_regeocode, pool_size, split_params=False)
adcodes, provinces, cities, districts, addresses = list(zip(*results))
adcodes = sum(adcodes, [])
provinces = sum(provinces, [])
cities = sum(cities, [])
districts = sum(districts, [])
addresses = sum(addresses, [])
return adcodes, provinces, cities, districts, addresses
def __batch_400_regeocode(self, list_location_str, step=20):
"""
调用高德的批量接口来解析坐标,最多解析20*20个坐标字符串
:param list_location_str:
:param step: 批量接口上限
:return:
"""
assert len(list_location_str) <= 400
# 生成子请求的url
def get_url(locs):
return f'/v3/geocode/regeo?key={key}&batch=true&location=' + '|'.join(locs)
# 整理出请求所需的url和params
key = np.random.choice(self.keys)
url_batch = f'https://restapi.amap.com/v3/batch&key={key}'
locations = [list_location_str[i:(i + step)] for i in range(0, len(list_location_str), step)]
params = {
"ops": [{'url': get_url(ls)} for ls in locations]
}
def request_success(r, index, num):
return sum([i['body']['infocode'] != '10000' for i in r]) == 0 or index > num
# 有的时候,接口超时,但是会返回数据,这里要做校验,当infocode不为'10000'时再请求一次
try_num = 5
try_index = 0
while True:
results = _request(url_batch, params, 'post')
try:
if request_success(results, try_index, try_num):
break
except (TypeError, KeyError) as e:
logger.error(f'解析失败,再爬一次:{e}')
try_index += 1
sleep(1)
# 从结果里提取数据
def get_result(r, key1=None, key2=None):
rs = sum([[j[key1] if key2 is None else j[key1][key2] for j in i['body']['regeocodes']] for i in r], [])
return [r or None for r in rs]
# 提取数据,如果有坐标落在国外,则会提取不到相应的数据,需要把这组数据都拆分。
try:
adcodes = get_result(results, 'addressComponent', 'adcode')
provinces = get_result(results, 'addressComponent', 'province')
cities = get_result(results, 'addressComponent', 'city')
districts = get_result(results, 'addressComponent', 'district')
addresses = get_result(results, 'formatted_address')
except KeyError:
dict_result = self.__batch_geocode_new(list_location_str)
adcodes = [r or None for r in dict_result['adcodes']]
provinces = [r or None for r in dict_result['provinces']]
cities = [r or None for r in dict_result['cities']]
districts = [r or None for r in dict_result['districts']]
addresses = [r or None for r in dict_result['addresses']]
return adcodes, provinces, cities, districts, addresses
def batch_process_regeocode(self, list_location, pool_size=1):
"""
批量坐标逆解析(包含对无效坐标的处理)
:param list_location: 坐标字符串构成的列表
:param pool_size: 线程池大小
:return:
"""
def get_location_str(lnglat):
try:
if isinstance(lnglat, str):
lng, lat = list(map(float, lnglat.split(',')))
else:
lng, lat = lnglat
if in_china(lng, lat):
location_str = f'{lng:.6f},{lat:.6f}'
else:
location_str = None
except (ValueError, TypeError):
location_str = None
return location_str
list_location_str = [get_location_str(lnglat) for lnglat in list_location]
df = pd.DataFrame(list_location_str, columns=['location_str'])
rows = df['location_str'].notnull()
adcodes, provinces, cities, districts, addresses = self.__batch_geocode_new_new(list(df[rows]['location_str']),
pool_size)
df.loc[rows, 'adcodes'] = [i or np.nan for i in adcodes]
df.loc[rows, 'provinces'] = [i or np.nan for i in provinces]
df.loc[rows, 'cities'] = [i or np.nan for i in cities]
df.loc[rows, 'districts'] = [i or np.nan for i in districts]
df.loc[rows, 'addresses'] = [i or np.nan for i in addresses]
# 修复直辖市(只有省名称,没有城市名称):用省名称当做城市名称
rows = df['provinces'].fillna('').str.endswith('市') & df['cities'].replace('nan', np.nan).isnull()
df.loc[rows, 'cities'] = df.loc[rows, 'provinces']
# 当省的最后一个字不为市时(省或其他情况),修复缺失值为区名称
rows = ~df['provinces'].fillna('').str.endswith('市') & df['cities'].replace('nan', np.nan).isnull()
df.loc[rows, 'cities'] = df.loc[rows, 'districts']
# 处理数据,合并成一个字典
df = df.drop(['location_str'], axis=1)
dict_result = dict((k, [i if str(i) != 'nan' else None for i in v])
for k, v in df.to_dict('list').items())
return dict_result
def _batch_200_geocode(self, list_address_str, step=10):
"""
调用高德的批量接口来解析坐标,最多解析20*10个地址
:param list_address_str:
:param step: 批量接口上限
:return:
"""
assert len(list_address_str) <= 200
# 地址不能太长
list_address_str = [add[:20] for add in list_address_str]
# 生成子请求的url
def get_url(adds):
return f'/v3/geocode/geo?key={key}&batch=true&address=' + '|'.join(adds)
# 整理出请求所需的url和params
key = np.random.choice(self.keys)
url_batch = f'https://restapi.amap.com/v3/batch&key={key}'
addresses = [list_address_str[i:(i + step)] for i in range(0, len(list_address_str), step)]
params = {
"ops": [{'url': get_url(ls)} for ls in addresses]
}
# 有的时候,接口超时,但是会返回数据,这里要做校验,当infocode不为'10000'时再请求一次
results = _request(url_batch, params, 'post')
# 从结果里提取数据
def get_result(r, key1=None, key2=None):
rs = sum([[j[key1] if key2 is None else j[key1][key2] for j in i['body']['geocodes']] for i in r], [])
return [r or None for r in rs]
# 提取数据
adcodes = get_result(results, 'adcode')
provinces = get_result(results, 'province')
cities = get_result(results, 'city')
districts = get_result(results, 'district')
locations = get_result(results, 'location')
return adcodes, provinces, cities, districts, locations
def _batch_geocode_new(self, list_address_str, pool_size=3, step=200):
"""
批量地址解析(地址可以是无效的),每200个地址合并成一组参数
:param list_address_str: 地址字符串构成的列表
:param pool_size: 线程池大小
:param step: 批量接口上限
:return:
"""
list_params = [list_address_str[i:(i + step)] for i in range(0, len(list_address_str), step)]
results = run_thread_pool(list_params, self._batch_200_geocode, pool_size, split_params=False)
adcodes, provinces, cities, districts, locations = list(zip(*results))
adcodes = sum(adcodes, [])
provinces = sum(provinces, [])
cities = sum(cities, [])
districts = sum(districts, [])
locations = sum(locations, [])
return adcodes, provinces, cities, districts, locations
def batch_process_geocode(self, list_address_str, pool_size=1):
"""
批量地址解析(包含对无效数据的处理)
:param list_address_str: 坐标字符串构成的列表
:param pool_size: 线程池大小
:return:
"""
def get_address_str(address_str):
if isinstance(address_str, str):
return address_str
return None
df = pd.DataFrame(list_address_str, columns=['address'])
df['address_str'] = df['address'].apply(get_address_str)
rows = df['address_str'].notnull()
adcodes, provinces, cities, districts, locations = self._batch_geocode_new(list(df[rows]['address_str']),
pool_size)
df.loc[rows, 'adcodes'] = adcodes
df.loc[rows, 'provinces'] = provinces
df.loc[rows, 'cities'] = cities
df.loc[rows, 'districts'] = districts
df.loc[rows, 'locations'] = locations
# 修复城市名称:
# 当省的最后一个字为市时(直辖市),修复缺失值为省名称
rows = df['provinces'].fillna('').str.endswith('市') & df['cities'].isnull()
df.loc[rows, 'cities'] = df.loc[rows, 'provinces']
# 当省的最后一个字不为市时(省或其他情况),修复缺失值为区名称
rows = ~df['provinces'].fillna('').str.endswith('市') & df['cities'].isnull()
df.loc[rows, 'cities'] = df.loc[rows, 'districts']
# 处理数据,合并成一个字典
df = df.drop(['address', 'address_str'], axis=1)
dict_result = dict((k, [i if str(i) != 'nan' else None for i in v])
for k, v in df.to_dict('list').items())
return dict_result
def in_china(lng, lat):
"""
粗略判断是否在中国
:param lng: 经度
:param lat: 纬度
:return:
"""
return (73 <= lng <= 136) & (18 <= lat <= 54)
def foo_process(p):
param, job, key, return_dict, split_params, total_num = p
if split_params:
if isinstance(param, (list, tuple)) and len(param) > 0:
r = job(*param)
else:
r = job()
else:
r = job(param)
return_dict[key] = r
def run_thread_pool(params, job, pool_size=50, split_params=True):
"""
多线程处理任务
:param params: 每个线程的所需参数的的列表
:param job: 任务函数
:param pool_size: 线程池大小,默认50
:param split_params: 是否拆分参数
:return: 结果列表
"""
manager = Manager()
return_dict = manager.dict()
total_num = len(params)
def get_params(p):
for key, param in enumerate(p):
yield (param, job, key, return_dict, split_params, total_num)
pool_size = min(pool_size, total_num)
pool = ThreadPool(processes=pool_size)
pool.map(foo_process, get_params(params))
pool.close()
list_result = [return_dict.get(i) for i in range(len(params))]
return list_result
def _request(url, params=None, method='get', max_tries=10, **kwargs):
"""
向服务器发送get或post请求
:param url: 请求url,类型为字符串
:param params: 参数,类型为字典
:param method: 请求类型,'get' or 'post'
:param max_tries: 最大请求次数
:return: 字典格式的结果
"""
try_num = 0
while try_num < max_tries:
try:
headers = {
'Content-Type': 'application/json',
}
if method == 'get':
res = session.get(url, params=params, headers=headers, timeout=10, **kwargs).json()
else:
res = session.post(url, json=params, headers=headers, timeout=10, **kwargs).json()
return res
except HTTPError:
try_num += 1
logger.debug(f'尝试第{try_num}次')
# sleep(1 * try_num)
if try_num == max_tries:
logger.debug(f'无法连接{url}')
return {}
|
StarcoderdataPython
|
11227797
|
# RUN: %PYTHON %s | npcomp-opt -split-input-file -npcomp-cpa-type-inference -canonicalize | FileCheck %s --dump-input=fail
import numpy as np
from npcomp.compiler import test_config
from npcomp.compiler.frontend import EmittedError
import_global = test_config.create_import_dump_decorator()
global_data = (np.zeros((2, 3)) + [1.0, 2.0, 3.0] * np.reshape([1.0, 2.0],
(2, 1)))
a = np.asarray([1.0, 2.0])
b = np.asarray([3.0, 4.0])
# Test the basic flow of invoking a ufunc call with constants captured from
# a global using explicit function syntax (np.add(a, b)).
# CHECK-LABEL: func @global_add
# CHECK-SAME: -> !numpy.ndarray<*:f64>
@import_global
def global_add():
# CHECK-NOT: UnknownType
# CHECK: numpy.builtin_ufunc_call<"numpy.multiply"> ({{.*}}, {{.*}}) : (tensor<2xf64>, tensor<2xf64>) -> tensor<*xf64>
# CHECK: numpy.builtin_ufunc_call<"numpy.add"> ({{.*}}, {{.*}}) : (tensor<2xf64>, tensor<*xf64>) -> tensor<*xf64>
# CHECK-NOT: UnknownType
return np.add(a, np.multiply(a, b))
|
StarcoderdataPython
|
9702711
|
import os
import imageList
from PythonCard import util
# this is just a copy and paste of the list in samples.rsrc.py
# and should be updated as new samples are added
SAMPLES = ['addresses', 'chat', 'companies', 'conversions', 'custdb', 'dbBrowser', \
'dialogs', 'doodle', 'flatfileDatabase', 'gadflyDatabase', \
'hopalong', 'jabberChat', 'life', 'minimal', 'minimalStandalone', 'noresource', \
'pictureViewer', 'proof', 'pysshed', 'radioclient', 'redemo', 'rpn', \
'samples', 'saveClipboardBitmap', 'searchexplorer', \
'simpleBrowser', 'simpleIEBrowser', 'slideshow', 'sounds', 'SourceForgeTracker', \
'spirograph', 'stockprice', 'textIndexer', 'textRouter', \
'tictactoe', 'turtle', 'webgrabber', 'webserver', 'widgets', 'worldclock']
# used to auto-generate a batch file that can be
# run to upload all the newly generated files
UPLOAD_COMMAND = 'c:\cvshome\pscp %s.html k<EMAIL>:/home/groups/p/py/pythoncard/htdocs/samples/%s.html\n'
# this could be done easier
# with a regular expression
# suggestions welcome
def expandUrls(text):
newtext = ''
for s in text.splitlines():
stripped = s.strip()
if stripped.startswith('http://'):
url = ' <a href="%s">%s</a>' % (stripped, stripped)
newtext += url + "\n"
else:
newtext += s + "\n"
return newtext
def readFile(path):
fp = open(path)
data = fp.read()
fp.close()
return data
def writeFile(path, data):
fp = open(path, 'w')
fp.write(data)
fp.close()
def main():
html_template = readFile(os.path.join('templates', 'sample_template.html'))
contents_template = readFile(os.path.join('templates', 'contents_template.html'))
samplesDir = os.path.join('..', '..', '..', 'samples')
batch = ''
contents = '<b>Samples</b><br>\n'
max = len(SAMPLES) - 1
for i in range(len(SAMPLES)):
name = SAMPLES[i]
contents += '<a href="%s.html">%s</a><br>\n' % (name, name)
for i in range(len(SAMPLES)):
name = SAMPLES[i]
if name == 'samples':
path = os.path.join(samplesDir, 'readme.txt')
else:
path = os.path.join(samplesDir, name, 'readme.txt')
readme = readFile(path)
html = html_template
html = html.replace('[title]', name)
if i == 0:
previousSample = SAMPLES[-1]
nextSample = SAMPLES[i + 1]
elif i == max:
previousSample = SAMPLES[i - 1]
nextSample = SAMPLES[0]
else:
previousSample = SAMPLES[i - 1]
nextSample = SAMPLES[i + 1]
data = ''
template = ''
try:
images = imageList.images[name]
for i in range(len(images)):
figure, url = images[i]
if figure == '':
figure = "Figure %d" % (i + 1)
else:
figure = "Figure %d: %s" % (i + 1, figure)
if not url.startswith('http:'):
url = imageList.BASE_IMAGE_URL + url
template += '<p><IMG SRC="%s" BORDER=0></p>\n<b>%s</b><br>\n' % (url, figure)
except:
pass
html = html.replace('[contents]', contents)
html = html.replace('[images]', template)
html = html.replace('[previous_sample]', previousSample)
html = html.replace('[next_sample]', nextSample)
# using a slightly longer wrap hopefully
# avoids problems when the readme.txt has its
# own line feeds
readme = expandUrls(util.wordwrap(readme, 86))
html = html.replace('[readme.txt]', readme)
writeFile(name + '.html', html)
#contents += '<a href="%s.html">%s</a><br>\n' % (name, name)
batch += UPLOAD_COMMAND % (name, name)
i += 1
contents_template = contents_template.replace('[contents]', contents)
writeFile('index.html', contents_template)
batch += UPLOAD_COMMAND % ('index', 'index')
writeFile('upload.bat', batch)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
117183
|
"""
This code is taken from PyPi stream_service package
https://github.com/BR1py/stream_service
We publish here all the relevant classes for Users
"""
from __future__ import absolute_import
from .server import StreamChannelServer_Process
from .client import StreamChannelClient_Thread
from .lib import *
from .lib.frame import *
from .lib.buffer import RING_BUFFER_FULL,RAISE_BUFFER_FULL,SKIP_BUFFER_FULL,CLEAR_BUFFER_FULL,WAIT_BUFFER_FULL
|
StarcoderdataPython
|
6496745
|
<filename>Validation/HGCalValidation/scripts/makeHGCalValidationPlots.py<gh_stars>1-10
#!/usr/bin/env python
from __future__ import print_function
import os
import argparse
from Validation.RecoTrack.plotting.validation import SimpleValidation, SimpleSample
import Validation.HGCalValidation.hgcalPlots as hgcalPlots
import Validation.RecoTrack.plotting.plotting as plotting
def main(opts):
drawArgs={}
if opts.no_ratio:
drawArgs["ratio"] = False
if opts.separate:
drawArgs["separate"] = True
if opts.png:
drawArgs["saveFormat"] = ".png"
if opts.verbose:
plotting.verbose = True
filenames = [(f, f.replace(".root", "")) for f in opts.files]
sample = SimpleSample(opts.subdirprefix[0], opts.html_sample, filenames)
val = SimpleValidation([sample], opts.outputDir[0])
htmlReport = val.createHtmlReport(validationName=opts.html_validation_name[0])
if opts.collection=="hgcalLayerClusters":
hgclayclus = [hgcalPlots.hgcalLayerClustersPlotter]
val.doPlots(hgclayclus, plotterDrawArgs=drawArgs)
elif opts.collection in ["hgcalMultiClusters", "multiClustersFromTrackstersMIP", "multiClustersFromTrackstersTrk", "multiClustersFromTrackstersEM", "multiClustersFromTrackstersHAD"]:
hgcmulticlus = [hgcalPlots.hgcalMultiClustersPlotter]
val.doPlots(hgcmulticlus, plotterDrawArgs=drawArgs)
elif opts.collection=="hitValidation":
hgchit = [hgcalPlots.hgcalHitPlotter]
val.doPlots(hgchit, plotterDrawArgs=drawArgs)
elif opts.collection=="hitCalibration":
hgchitcalib = [hgcalPlots.hgcalHitCalibPlotter]
val.doPlots(hgchitcalib, plotterDrawArgs=drawArgs)
else :
#In case of all you have to keep a specific order in one to one
#correspondance between subdirprefix and collections and validation names
#layer clusters
hgclayclus = [hgcalPlots.hgcalLayerClustersPlotter]
val.doPlots(hgclayclus, plotterDrawArgs=drawArgs)
#multiclusters
sample = SimpleSample(opts.subdirprefix[1], opts.html_sample, filenames)
val = SimpleValidation([sample], opts.outputDir[1])
htmlReport_2 = val.createHtmlReport(validationName=opts.html_validation_name[1])
hgcmulticlus = [hgcalPlots.hgcalMultiClustersPlotter]
val.doPlots(hgcmulticlus, plotterDrawArgs=drawArgs)
#hits
sample = SimpleSample(opts.subdirprefix[2], opts.html_sample, filenames)
val = SimpleValidation([sample], opts.outputDir[2])
htmlReport_3 = val.createHtmlReport(validationName=opts.html_validation_name[2])
hgchit = [hgcalPlots.hgcalHitPlotter]
val.doPlots(hgchit, plotterDrawArgs=drawArgs)
#calib
sample = SimpleSample(opts.subdirprefix[3], opts.html_sample, filenames)
val = SimpleValidation([sample], opts.outputDir[3])
htmlReport_4 = val.createHtmlReport(validationName=opts.html_validation_name[3])
hgchitcalib = [hgcalPlots.hgcalHitCalibPlotter]
val.doPlots(hgchitcalib, plotterDrawArgs=drawArgs)
if opts.no_html:
print("Plots created into directory '%s'." % opts.outputDir)
else:
htmlReport.write()
if(opts.collection=="all"):
htmlReport_2.write()
htmlReport_3.write()
htmlReport_4.write()
print("Plots and HTML report created into directory '%s'. You can just move it to some www area and access the pages via web browser" % (','.join(opts.outputDir)))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create set of HGCal validation plots from one or more DQM files.")
parser.add_argument("files", metavar="file", type=str, nargs="+",
default = "DQM_V0001_R000000001__Global__CMSSW_X_Y_Z__RECO.root",
help="DQM file to plot the validation plots from")
parser.add_argument("-o", "--outputDir", type=str, default=["plots1","plots2"], nargs="+",
help="Plot output directories (default: 'plots1'")
parser.add_argument("--subdirprefix", type=str, default=["plots1","plots2"], nargs="+",
help="Prefix for subdirectories inside outputDir (default: 'plots1')")
parser.add_argument("--no-ratio", action="store_true", default = False,
help="Disable ratio pads")
parser.add_argument("--separate", action="store_true", default = False,
help="Save all plots separately instead of grouping them")
parser.add_argument("--png", action="store_true",
help="Save plots in PNG instead of PDF")
parser.add_argument("--no-html", action="store_true", default = False,
help="Disable HTML page generation")
parser.add_argument("--html-sample", default="Sample",
help="Sample name for HTML page generation (default 'Sample')")
parser.add_argument("--html-validation-name", type=str, default=["",""], nargs="+",
help="Validation name for HTML page generation (enters to <title> element) (default '')")
parser.add_argument("--verbose", action="store_true", default = False,
help="Be verbose")
parser.add_argument("--collection", choices=["hgcalLayerClusters", "hgcalMultiClusters", "multiClustersFromTrackstersMIP", "multiClustersFromTrackstersTrk", "multiClustersFromTrackstersEM", "multiClustersFromTrackstersHAD", "hitValidation", "hitCalibration", "all"], default="hgcalLayerClusters",
help="Choose output plots collections: hgcalLayerCluster, hgcalMultiClusters, multiClustersFromTrackstersMIP, multiClustersFromTrackstersTrk, multiClustersFromTrackstersEM, multiClustersFromTrackstersHAD, hitValidation, hitCalibration, all")
opts = parser.parse_args()
if opts.collection == "all" and len(opts.outputDir)==1:
raise RuntimeError("need to assign names for all dirrectories")
for f in opts.files:
if not os.path.exists(f):
parser.error("DQM file %s does not exist" % f)
main(opts)
|
StarcoderdataPython
|
8150508
|
<gh_stars>0
from wasol import logger
from wasol import core
from wasol import daemon
|
StarcoderdataPython
|
3342250
|
try:
from zcrmsdk.src.com.zoho.crm.api.exception import SDKException
from zcrmsdk.src.com.zoho.crm.api.util import Constants
except Exception:
from ..exception import SDKException
from ..util import Constants
class Options(object):
def __init__(self):
"""Creates an instance of Options"""
pass
|
StarcoderdataPython
|
3264832
|
<filename>py/storage.py
class A(object):
def __init__(self, name):
self.name = name
print '%s is inited' % self.name
def __del__(self):
print '%s is killed' % self.name
a = A('A')
b = A('B')
c = A('C')
b.x = c
c.x = b
|
StarcoderdataPython
|
3589577
|
<reponame>yuyasugano/jpyc-rinkeby<filename>tests/test_approve.py
#!/usr/bin/python3
import pytest
@pytest.mark.parametrize("idx", range(5))
def test_initial_approval_is_zero(token, accounts, idx):
assert token.allowance(accounts[0], accounts[idx]) == 0
def test_approve(token, accounts):
amount = 1000 * 1e18
token.approve(accounts[1], amount, {'from': accounts[0]})
assert token.allowance(accounts[0], accounts[1]) == amount
def test_modify_approve(token, accounts):
amount = 1000 * 1e18
modified = 1234
token.approve(accounts[1], amount, {'from': accounts[0]})
token.approve(accounts[1], modified, {'from': accounts[0]})
assert token.allowance(accounts[0], accounts[1]) == modified
def test_revoke_approve(token, accounts):
amount = 1000 * 1e18
token.approve(accounts[1], amount, {'from': accounts[0]})
token.approve(accounts[1], 0, {'from': accounts[0]})
assert token.allowance(accounts[0], accounts[1]) == 0
def test_approve_self(token, accounts):
amount = 1000 * 1e18
token.approve(accounts[0], amount, {'from': accounts[0]})
assert token.allowance(accounts[0], accounts[0]) == amount
def test_only_affects_target(token, accounts):
amount = 1000 * 1e18
token.approve(accounts[1], amount, {'from': accounts[0]})
assert token.allowance(accounts[1], accounts[0]) == 0
def test_returns_true(token, accounts):
amount = 1000 * 1e18
tx = token.approve(accounts[1], amount, {'from': accounts[0]})
assert tx.return_value is True
def test_approval_event_fires(accounts, token):
amount = 1000 * 1e18
tx = token.approve(accounts[1], amount, {'from': accounts[0]})
assert len(tx.events) == 1
assert tx.events["Approval"].values() == [accounts[0], accounts[1], amount]
|
StarcoderdataPython
|
8060007
|
# https://codeforces.com/problemset/problem/977/B
n = int(input())
s = input()
d = {}
for i in range(n-1):
two_gram = f"{s[i]}{s[i+1]}"
if two_gram in d:
d[two_gram] += 1
else:
d[two_gram] = 1
print(max(d, key=d.get))
|
StarcoderdataPython
|
337777
|
<gh_stars>0
'''
Created on Oct 28, 2021
@author: mballance
'''
import os
import jinja2
class TemplateLoader(jinja2.BaseLoader):
def get_source(self, environment, template):
path = template
if not os.path.exists(path):
raise jinja2.TemplateNotFound(template)
mtime = os.path.getmtime(path)
f = open(path, "r")
try:
source = f.read()
except:
print("Error reading file \"" + path + "\"");
f.close()
return source, path, lambda: mtime == os.path.getmtime(path)
|
StarcoderdataPython
|
262839
|
# Copyright 2017 Suomi Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import abc
class TransactionHandler(object, metaclass=abc.ABCMeta):
"""
TransactionHandler is the Abstract Base Class that defines the business
logic for a new transaction family.
The family_name, family_versions, and namespaces properties are
used by the processor to route processing requests to the handler.
"""
@abc.abstractproperty
def family_name(self):
"""
family_name should return the name of the transaction family that this
handler can process, e.g. "intkey"
"""
pass
@abc.abstractproperty
def family_versions(self):
"""
family_versions should return a list of versions this transaction
family handler can process, e.g. ["1.0"]
"""
pass
@abc.abstractproperty
def namespaces(self):
"""
namespaces should return a list containing all the handler's
namespaces, e.g. ["abcdef"]
"""
pass
@abc.abstractmethod
def apply(self, transaction, context):
"""
Apply is the single method where all the business logic for a
transaction family is defined. The method will be called by the
transaction processor upon receiving a TpProcessRequest that the
handler understands and will pass in the TpProcessRequest and an
initialized instance of the Context type.
"""
pass
|
StarcoderdataPython
|
5073537
|
# Add a floor below a sphere and plot it.
#
import pyvista
pl = pyvista.Plotter()
actor = pl.add_mesh(pyvista.Sphere())
actor = pl.add_floor()
pl.show()
|
StarcoderdataPython
|
6594346
|
<filename>code/BOBO_hypterparameter_search.py<gh_stars>0
from argparse import Namespace
from psycopg2.extensions import connection
import torch
from torch import nn
import numpy as np
import ax
from ax.plot.contour import plot_contour
from ax.plot.trace import optimization_trace_single_method
from ax.service.managed_loop import optimize
from ax.utils.notebook.plotting import render, init_notebook_plotting
from typing import Dict
import time
from utils.dataloader_provider import get_dataloaders
from utils.postgres_functions import table_row_sql, insert_row, update_row, make_sure_table_exist
from utils.consts import optimizer_dict, loss_dict, data_compositions
from selector import calc_metrics
from utils.model_manipulator import manipulateModel
conn=None
cur = None
args = None
ss = None
data_composition_key = None
model_key = None
def train(
model: torch.nn.Module,
train_data_loader: torch.utils.data.DataLoader,
parameters: Dict,
device: torch.device,
dtype: torch.dtype,
) -> torch.nn.Module:
model.to(device=device,dtype=dtype)
model.train()
criterion = loss_dict[parameters.get("criterion","MSELoss")]
optimizer = optimizer_dict[parameters.get("optimizer","Adam")](model.parameters(), lr=parameters.get("lr",1e-3),weight_decay=parameters.get("weight_decay",1e-5))
running_loss = 0
correct= 0
total=0
softmax = torch.nn.Softmax(dim=1)
tp = 0
fn = 0
fp = 0
tn = 0
tp_c = 0
fp_c = 0
tn_c = 0
fn_c = 0
num_epochs = parameters.get("num_epochs", 20)
print(num_epochs)
for e in range(num_epochs):
for step,data in enumerate(train_data_loader):
tmp_batch_size = len(data["labels"])
lbl_onehot = torch.FloatTensor(tmp_batch_size,2).to(device=device,dtype=dtype)
# =============datapreprocessing=================
img = torch.FloatTensor(data["imagery"].float()).to(device=device,dtype=dtype)
lbl_onehot.zero_()
lbl_onehot = lbl_onehot.scatter(1,data["labels"].to(device=device,dtype=torch.long),1).to(device=device,dtype=dtype)
# ===================forward=====================
output = model(img)
loss = criterion(output, lbl_onehot)
# ===================backward====================
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss+=(loss.item()*tmp_batch_size)
#determine acc
out_softmax = torch.nn.Softmax(dim=1)
confidence, predicted = torch.max(out_softmax, 1)
total += tmp_batch_size
labels = data["labels"].view(tmp_batch_size)
pred_cpu = predicted.cpu()
correct += (pred_cpu == labels).sum().item()
label_ones_idx = labels.nonzero()
label_zeroes_idx = (labels==0).nonzero()
tp_idx = pred_cpu[label_ones_idx]==labels[label_ones_idx]
tp += (tp_idx).sum().item()
fp_idx = pred_cpu[label_ones_idx]!=labels[label_ones_idx]
fp += (fp_idx).sum().item()
tn_idx = pred_cpu[label_zeroes_idx]==labels[label_zeroes_idx]
tn += (tn_idx).sum().item()
fn_idx = pred_cpu[label_zeroes_idx]!=labels[label_zeroes_idx]
fn += (fn_idx).sum().item()
tp_c += confidence[tp_idx].sum().item()
fp_c += confidence[fp_idx].sum().item()
tn_c += confidence[tn_idx].sum().item()
fn_c += confidence[fn_idx].sum().item()
metrics = {"acc":correct/total, "loss":running_loss/total,"TP":tp,"FN":fn,"FP":fp,"TN":tn,"TPC":tp_c/total,"FPC":fp_c/total,"TNC":tn_c/total,"FNC":fn_c/total}
return model,metrics
def evaluate(
model: torch.nn.Module,
eval_data_loader: torch.utils.data.DataLoader,
parameters: Dict,
device: torch.device,
dtype: torch.dtype
) -> Dict:
model.to(device=device,dtype=dtype)
model.eval()
criterion = loss_dict[parameters.get("criterion","MSELoss")]
correct = 0
total = 0
running_loss=0
softmax = torch.nn.Softmax(dim=1)
tp = 0
fn = 0
fp = 0
tn = 0
tp_c = 0
fp_c = 0
tn_c = 0
fn_c = 0
with torch.no_grad():
for data in eval_data_loader:
tmp_batch_size = len(data["labels"])
lbl_onehot = torch.FloatTensor(tmp_batch_size,2).to(device=device,dtype=dtype)
# =============datapreprocessing=================
img = torch.FloatTensor(data["imagery"]).to(device=device,dtype=dtype)
# ===================forward=====================
output = model(img)
out_softmax = softmax(output)
lbl_onehot.zero_()
loss = criterion(output, lbl_onehot)
running_loss+=(loss.item()*tmp_batch_size)
confidence, predicted = torch.max(out_softmax.data, 1)
total += tmp_batch_size
labels = data["labels"].view(tmp_batch_size)
pred_cpu = predicted.cpu()
correct += (pred_cpu == labels).sum().item()
label_ones_idx = labels.nonzero()
label_zeroes_idx = (labels==0).nonzero()
tp_idx = pred_cpu[label_ones_idx]==labels[label_ones_idx]
tp += (tp_idx).sum().item()
fp_idx = pred_cpu[label_ones_idx]!=labels[label_ones_idx]
fp += (fp_idx).sum().item()
tn_idx = pred_cpu[label_zeroes_idx]==labels[label_zeroes_idx]
tn += (tn_idx).sum().item()
fn_idx = pred_cpu[label_zeroes_idx]!=labels[label_zeroes_idx]
fn += (fn_idx).sum().item()
tp_c += confidence[tp_idx].sum().item()
fp_c += confidence[fp_idx].sum().item()
tn_c += confidence[tn_idx].sum().item()
fn_c += confidence[fn_idx].sum().item()
metrics = {"acc":correct/total, "loss":running_loss/total,"TP":tp,"FN":fn,"FP":fp,"TN":tn,"TPC":tp_c/total,"FPC":fp_c/total,"TNC":tn_c/total,"FNC":fn_c/total}
return metrics
def objective(parameters):
train_data_loader, valid_data_loader, test_data_loader = get_dataloaders(args,ss,data_composition_key, model_key)
model = manipulateModel(model_key,args.is_feature_extraction,data_compositions[data_composition_key])
criterion = parameters.get("criterion")
optimizer = parameters.get("optimizer")(model.parameters(), lr=parameters.get("lr"),weight_decay=parameters.get("weight_decay"))
for epoch in range(args.epochs+1):
start = time.time()
model,train_metrics = train(model,train_data_loader,criterion,optimizer,args.batch_size)
valid_metrics = evaluate(model,valid_data_loader,criterion,optimizer,args.batch_size)
train_metrics = calc_metrics(train_metrics)
valid_metrics = calc_metrics(valid_metrics)
curr_exec_time = time.time()-start
train_metrics["exec_time"] = curr_exec_time
if valid_metrics["acc"] > best_acc:
best_acc = valid_metrics["acc"]
best_loss = valid_metrics["loss"]
update=True
elif valid_metrics["acc"] == best_acc and best_loss < valid_metrics["loss"]:
best_loss = valid_metrics["loss"]
update=True
elif valid_metrics["acc"] == best_acc and best_loss == valid_metrics["loss"] and curr_exec_time<best_exec_time:
update=True
if update:
best_acc_curr_iteration = valid_metrics["acc"]
best_loss_curr_iteration = valid_metrics["loss"]
no_improve_it = 0
best_exec_time = curr_exec_time
valid_metrics["exec_time"]=best_exec_time
torch.save({"epoch":epoch,"model_state_dict":model.state_dict(),"optimizer_state_dict":optimizer.state_dict()}, best_checkpoint_path)
cur.execute(update_row(args.best_validation_results_table_name,task,iteration,epoch,valid_metrics))
conn.commit()
update=False
elif valid_metrics["acc"] > best_acc_curr_iteration or valid_metrics["loss"] < best_loss_curr_iteration:
best_acc_curr_iteration = valid_metrics["acc"]
best_loss_curr_iteration = valid_metrics["loss"]
no_improve_it = 0
else:
no_improve_it+=1
torch.save({"epoch":epoch,"model_state_dict":model.state_dict(),"optimizer_state_dict":optimizer.state_dict()}, state_checkpoint_path)
cur.execute(insert_row(args.states_current_task_table_name,args, task,iteration,epoch,timestamp=time.time(),m1=valid_metrics,m2=train_metrics))
conn.commit()
print('epoch [{}/{}], loss:{:.4f}, {:.4f}%, time: {}'.format(epoch, args.epochs, valid_metrics["loss"],valid_metrics["acc"]*100, curr_exec_time))
if no_improve_it == args.earlystopping_it:
break
# except Exception as e:
# print(f"Exception occured in iteration {iteration}, epoch {epoch}",e)
#TODO load best model ;)
try:
os.remove(state_checkpoint_path)
except Exception as e:
print("Deleting state dict failed",e)
model = manipulateModel(model_key,args.is_feature_extraction,data_compositions[data_composition_key])
if not os.path.isfile(best_checkpoint_path):
print("Best checkpoint file does not exist!!!")
return True
best_checkpoint = torch.load(best_checkpoint_path)
model.load_state_dict(best_checkpoint["model_state_dict"])
optimizer.load_state_dict(best_checkpoint["optimizer_state_dict"])
start = time.time()
test_metrics = test(model,test_data_loader,criterion,optimizer,args.batch_size)
test_metrics = calc_metrics(test_metrics)
test_metrics["exec_time"] = time.time()-start
cur.execute(insert_row(args.best_test_results_table_name, args, task, iteration, -1, timestamp=time.time(),m1=test_metrics))
conn.commit()
except KeyboardInterrupt as e:
print(e)
print("GOODBY :)))")
return False
return True
def hyperparameter_optimization(a:Namespace,c:connection,task:str):
dtype = torch.float
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
global cur
cur = c.cursor()
global conn
conn = c
global args
args = a
global ss
global data_composition_key
global model_key
_,ss,data_composition_key,model_key=task.split(":")
make_sure_table_exist(args, conn, cur, args.states_current_task_table_name)
make_sure_table_exist(args, conn, cur, args.best_validation_results_table_name)
make_sure_table_exist(args, conn, cur, args.best_test_results_table_name)
range_lr = ax.RangeParameter(name="lr",lower=1e-7,upper=0.5,parameter_type=ax.ParameterType.FLOAT)
range_weight_decay = ax.RangeParameter(name="weight_decay",lower=1e-8,upper=0.5,parameter_type=ax.ParameterType.FLOAT)
choice_optimizer = ax.ChoiceParameter(name="optimizer", values=["Adadelta","Adagrad","Adam","AdamW","Adamax","ASGD","RMSprop","Rprop","SGD"], parameter_type=ax.ParameterType.STRING)
choice_criterion = ax.ChoiceParameter(name="criterion",values=["BCELoss","MSELoss"],parameter_type=ax.ParameterType.STRING)
search_space = ax.SearchSpace(parameters=[range_lr, range_weight_decay,choice_optimizer,choice_criterion])
experiment = ax.Experiment(name="experiment_building_blocks",search_space=search_space)
sobol = ax.Models.SOBOL(search_space=experiment.search_space)
generator_run = sobol.gen(1)
return True
|
StarcoderdataPython
|
6521577
|
<reponame>Siddhant021295/Gesture-Recognation-IPN-Hands<filename>Googleopts.py
import argparse
def parse_opts():
# Offline means not real time
parser = argparse.ArgumentParser()
parser.add_argument('--root_path',default='',type=str,help='Root directory path of data')
parser.add_argument('--video_path',default='dataset',type=str,help='Directory path of Videos')
parser.add_argument('--annotation_path',default='annotation/IPN.json',type=str,help='Annotation file path')
parser.add_argument('--result_path',default='results',type=str,help='Result directory path')
parser.add_argument('--store_name', default='model', type=str, help='Name to store checkpoints')
parser.add_argument('--modality', default='RGB', type=str, help='Modality of input data. RGB, Depth, or RGB-D')
parser.add_argument('--dataset',default='kinetics',type=str,help='Used dataset (activitynet | kinetics | ucf101 | hmdb51)')
parser.add_argument('--n_classes',default=13,type=int,help='Number of classes (activitynet: 200, kinetics: 400, ucf101: 101, hmdb51: 51)')
parser.add_argument('--n_finetune_classes',default=13,type=int,help='Number of classes for fine-tuning. n_classes is set to the number when pretraining.')
parser.add_argument('--sample_size',default=112,type=int,help='Height and width of inputs')
parser.add_argument('--sample_duration',default=16,type=int,help='Temporal duration of inputs')
parser.add_argument('--initial_scale',default=1.0,type=float,help='Initial scale for multiscale cropping')
parser.add_argument('--n_scales',default=5,type=int,help='Number of scales for multiscale cropping')
parser.add_argument('--scale_step',default=0.84089641525,type=float,help='Scale step for multiscale cropping')
parser.add_argument('--train_crop',default='random',type=str,help='Spatial cropping method in training. random is uniform. corner is selection from 4 corners and 1 center. (random | corner | center)')
parser.add_argument('--train_temporal',default='random',type=str,help='Temporal transformation method in training. (random | ranpad)')
parser.add_argument('--temporal_pad',default=0,type=int,help='Pad number for temporal transformation method (ranpad)')
parser.add_argument('--learning_rate',default=0.001,type=float,help='Initial learning rate (divided by 10 while training by lr scheduler)')
parser.add_argument('--lr_steps', default=[10, 25, 50, 80, 100], type=float, nargs="+", metavar='LRSteps', help='epochs to decay learning rate by 10')
parser.add_argument('--momentum', default=0.9, type=float, help='Momentum')
parser.add_argument('--dampening', default=0.9, type=float,help='dampening of SGD')
parser.add_argument('--weight_decay', default=1e-3, type=float, help='Weight Decay')
parser.add_argument('--mean_dataset',default='activitynet',type=str,help='dataset for mean values of mean subtraction (activitynet | kinetics)')
parser.add_argument('--no_mean_norm',action='store_true',help='If true, inputs are not normalized by mean.')
parser.add_argument('--std_norm',action='store_true',help='If true, inputs are normalized by standard deviation.')
parser.add_argument('--nesterov', action='store_true', help='Nesterov momentum')
parser.add_argument('--optimizer',default='sgd',type=str,help='Currently only support SGD')
parser.add_argument('--lr_patience',default=10,type=int,help='Patience of LR scheduler. See documentation of ReduceLROnPlateau.')
parser.add_argument('--batch_size', default=32, type=int, help='Batch Size')
parser.add_argument('--n_epochs',default=20,type=int,help='Number of total epochs to run')
parser.add_argument('--begin_epoch',default=1,type=int,help='Training begins at this epoch. Previous trained model indicated by resume_path is loaded.')
parser.add_argument('--n_val_samples',default=3,type=int,help='Number of validation samples for each activity')
parser.add_argument('--resume_path',default='',type=str,help='Save data (.pth) of previous training')
parser.add_argument('--pretrain_path', default='', type=str, help='Pretrained model (.pth)')
parser.add_argument('--pretrain_dataset', default='', type=str, help='dataset from pretrained model')
parser.add_argument('--ft_begin_index',default=0,type=int,help='Begin block index of fine-tuning')
parser.add_argument('--no_train',default= False ,action='store_true',help='If true, training is not performed.')
parser.add_argument('--fine_tuning',action='store_true',help='If true, fine-tuning starts from epoch 1.')
parser.add_argument('--no_val',action='store_true',help='If true, validation is not performed.')
parser.add_argument('--true_valid',action='store_true',help='If true avg recognition per clip is performent (not only temporal center crop)')
parser.add_argument('--adap_temp',action='store_true',help='If true the input frames > x is reduced (adap temporal cropping)')
parser.add_argument('--test', action='store_true',help='If true, test is performed.')
parser.add_argument('--test_subset',default='val',type=str,help='Used subset in test (val | test)')
parser.add_argument('--train_validate', action='store_true', help='If true, test is performed.')
parser.add_argument('--scale_in_test',default=1.0,type=float,help='Spatial scale in test')
parser.add_argument('--crop_position_in_test',default='c',type=str,help='Cropping method (c | tl | tr | bl | br) in test')
parser.add_argument('--no_softmax_in_test',action='store_true',help='If true, output for each clip is not normalized using softmax.')
parser.add_argument('--no_scrop',action='store_true',help='If true, denso images are not normalized cropped by subject')
parser.add_argument('--no_cuda', action='store_true', help='If true, cuda is not used.')
parser.add_argument('--n_threads',default=4,type=int,help='Number of threads for multi-thread loading')
parser.add_argument('--checkpoint',default=10,type=int,help='Trained model is saved at every this epochs.')
parser.add_argument('--no_hflip',action='store_true',help='If true holizontal flipping is not performed.')
parser.add_argument('--norm_value',default=1,type=int,help='If 1, range of inputs is [0-255]. If 255, range of inputs is [0-1].')
parser.add_argument('--model',default='resnet',type=str,help='(resnet | preresnet | wideresnet | resnext | densenet | ')
parser.add_argument('--model_depth',default=18,type=int,help='Depth of resnet (10 | 18 | 34 | 50 | 101)')
parser.add_argument('--no_first_lay', action='store_true', help='If true, first conv layer is changed.')
parser.add_argument('--resnet_shortcut',default='B',type=str,help='Shortcut type of resnet (A | B)')
parser.add_argument('--wide_resnet_k', default=2, type=int,help='Wide resnet k')
parser.add_argument('--resnext_cardinality',default=32,type=int,help='ResNeXt cardinality')
parser.add_argument('--manual_seed', default=1, type=int, help='Manually set random seed')
parser.add_argument('--weighted', action='store_true', help='If true, loss is weighted')
parser.add_argument('--clf_threshold', default=0.1, type=float, help='Threshold to predict none gestures')
parser.add_argument('--width_mult', default=1.0, type=float, help='The applied width multiplier to scale number of filters')
parser.set_defaults(no_mean_norm=False)
parser.set_defaults(weighted=False)
parser.set_defaults(no_first_lay=False)
parser.set_defaults(no_hflip=False)
parser.set_defaults(no_cuda=False)
parser.set_defaults(no_scrop=False)
parser.set_defaults(no_softmax_in_test=False)
parser.set_defaults(train_validate=False)
parser.set_defaults(test=False)
parser.set_defaults(adap_temp=False)
parser.set_defaults(true_valid=False)
parser.set_defaults(no_val=False)
parser.set_defaults(fine_tuning=False)
parser.set_defaults(no_train=False)
parser.set_defaults(nesterov=False)
parser.set_defaults(std_norm=False)
args = parser.parse_args()
return args
|
StarcoderdataPython
|
1862916
|
<filename>PyPoll/main.py
import os
import csv
csvpath = os.path.join("PyPoll.csv")
# Create lists and variables
candidates = []
total_votes = 0
candidate_votes = []
election_data = ['1', '2']
# Open CSV
with open(csvpath) as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
line = next(csvreader,None)
# Iterate through
for line in csvreader:
# Determine vote count and candidate count
total_votes = total_votes +1
candidate = line[2]
# Assign votes to candidates
if candidate in candidates:
candidate_index = candidates.index(candidate)
candidate_votes[candidate_index] = candidate_votes[candidate_index] + 1
# Append votes to candidate and total votes
else:
candidates.append(candidate)
candidate_votes.append(1)
# Variables percent of votes per candidate
percentages = []
max_votes = candidate_votes[0]
max_index = 0
#Work out percentages and winner (in a For Loop)
for count in range(len(candidates)):
vote_percentage = candidate_votes[count]/total_votes*100
percentages.append(vote_percentage)
if candidate_votes[count] > max_votes:
max_votes = candidate_votes[count]
print(max_votes)
max_index = count
winner = candidates[max_index]
percentages = [round(i,2) for i in percentages]
# Summary print test of election results
print("Election Results")
print("--------------------------")
print(f"Total Votes: {total_votes}")
print("--------------------------")
for count in range(len(candidates)):
print(f"{candidates[count]}: {percentages[count]}% ({candidate_votes[count]})")
print("--------------------------")
print(f"Winner: {winner}")
print("--------------------------")
#Export file name and open as text file
output_file = csvpath[0:-4]
write_csvpath = f"{output_file}PyPoll_results.txt"
filewriter = open(write_csvpath, mode = 'w')
# Write results to export text file
filewriter.write("Election Results\n")
filewriter.write("-----------------------------\n")
filewriter.write(f"Total Votes: {total_votes}\n")
filewriter.write("-----------------------------\n")
for count in range(len(candidates)):
filewriter.write(f"{candidates[count]}: {percentages[count]}% ({candidate_votes[count]})\n")
filewriter.write("-----------------------------\n")
filewriter.write(f"Winner: {winner}\n")
filewriter.write("-----------------------------\n")
# Close the text file
filewriter.close()
|
StarcoderdataPython
|
396610
|
<reponame>heijp06/AoC-2021
def part1(rows):
return go(rows, 1)
def part2(rows):
return go(rows, 2)
def go(rows, part):
position = 0
depth = 0
aim = 0
for row in rows:
command, arg = row.split()
match command, int(arg):
case "forward", value:
position += value
depth += aim * value
case "down", value:
aim += value
case "up", value:
aim -= value
return aim * position if part == 1 else depth * position
|
StarcoderdataPython
|
6422071
|
import matplotlib.pyplot as plt
import numpy as np
class Visualizer:
def __init__(self):
print("Opening visualizer")
def plotPlane(self, X, Y, plane):
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(X, Y, plane,
cmap=plt.cm.coolwarm,
linewidth=0,
antialiased=True)
ax.set_xlabel('x space')
ax.set_ylabel('y space')
ax.set_zlabel('Gaussians representing target sensor returns')
plt.show()
|
StarcoderdataPython
|
11289624
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2017-05-02 18:41
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('hives', '0028_inspectionbox_inspection'),
]
operations = [
migrations.RemoveField(
model_name='inspectionbox',
name='timestamp',
),
]
|
StarcoderdataPython
|
6439671
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""=================================================================
@Project : Algorithm_YuweiYin/LeetCode-All-Solution/Python3
@File : LC-0015-3Sum.py
@Author : [YuweiYin](https://github.com/YuweiYin)
@Date : 2022-01-16
=================================================================="""
import sys
import time
from typing import List
"""
LeetCode - 0015 - (Medium) - 3Sum
https://leetcode.com/problems/3sum/
Description & Requirement:
Given an integer array nums, return all the triplets [nums[i], nums[j], nums[k]]
such that i != j, i != k, and j != k, and nums[i] + nums[j] + nums[k] == 0.
Notice that the solution set must not contain duplicate triplets.
Example 1:
Input: nums = [-1,0,1,2,-1,-4]
Output: [[-1,-1,2],[-1,0,1]]
Example 2:
Input: nums = []
Output: []
Example 3:
Input: nums = [0]
Output: []
Constraints:
0 <= nums.length <= 3000
-10^5 <= nums[i] <= 10^5
"""
class Solution:
def threeSum(self, nums: List[int]) -> List[List[int]]:
# exception case
if not isinstance(nums, list) or len(nums) <= 2:
return []
if len(nums) == 3:
return [nums] if sum(nums) == 0 else []
# main method: (sort + 2Sum (two pointers))
return self._threeSum(nums)
def _threeSum(self, nums: List[int]) -> List[List[int]]:
len_nums = len(nums)
assert len_nums > 3
res = []
nums.sort()
for first_index in range(len_nums):
if first_index > 0 and nums[first_index] == nums[first_index - 1]:
continue # skip adjacent same number
third_index = len_nums - 1 # set the third number as the last one
two_sum = 0 - nums[first_index] # the target sum of the second number and the third number
# scan search all valid second number
for second_index in range(first_index + 1, len_nums):
if second_index > first_index + 1 and nums[second_index] == nums[second_index - 1]:
continue # skip adjacent same number
while second_index < third_index and nums[second_index] + nums[third_index] > two_sum:
third_index -= 1 # move third index till nums[second_index] + nums[third_index] <= two_sum
if second_index >= third_index:
break # pointer cross, break
if nums[second_index] + nums[third_index] == two_sum: # find a valid combo
res.append([nums[first_index], nums[second_index], nums[third_index]])
return res
def main():
# Example 1: Output: [[-1,-1,2],[-1,0,1]]
nums = [-1, 0, 1, 2, -1, -4]
# Example 2: Output: []
# nums = []
# Example 3: Output: []
# nums = [0]
# init instance
solution = Solution()
# run & time
start = time.process_time()
ans = solution.threeSum(nums)
end = time.process_time()
# show answer
print('\nAnswer:')
print(ans)
# show time consumption
print('Running Time: %.5f ms' % ((end - start) * 1000))
if __name__ == "__main__":
sys.exit(main())
|
StarcoderdataPython
|
9682911
|
<reponame>SankarST/RepoOpenAcadTraining<gh_stars>0
# -*- coding: utf-8 -*-
from datetime import timedelta
from odoo import fields, models ,api ,exceptions
class Course(models.Model):
_name = 'oa.course'
_description = 'Course OA'
name = fields.Char(string='Title', required=True)
description = fields.Text()
responsible_id = fields.Many2one('res.users', string="Responsible", help="Need not be the Instructor")
session_ids = fields.One2many('oa.session', 'course_id', string="Sessions")
level = fields.Selection([('1', 'Easy'), ('2', 'Medium'), ('3', 'Hard')], string="Difficulty Level")
session_count = fields.Integer(compute="_compute_session_count")
attendee_count = fields.Integer(compute="_compute_attendee_count")
_sql_constraints = [
('name_description_check',
'CHECK(name != description)',
"The title of the course should not be the description"),
('name_unique',
'UNIQUE(name)',
"The course title must be unique"),
]
def copy(self, default=None):
default = dict(default or {})
copied_count = self.search_count(
[('name', '=like', u"Copy of {}%".format(self.name))])
if not copied_count:
new_name = u"Copy of {}".format(self.name)
else:
new_name = u"Copy of {} ({})".format(self.name, copied_count)
default['name'] = new_name
return super(Course, self).copy(default)
def open_attendees(self):
self.ensure_one()
attendee_ids = self.session_ids.mapped('attendee_ids')
return {
'name': 'Attendees of %s' % (self.name),
'type': 'ir.actions.act_window',
'res_model': 'res.partner',
'view_mode': 'tree,form',
'view_type': 'form',
'domain': [('id', 'in', attendee_ids.ids)],
}
@api.depends('session_ids')
def _compute_session_count(self):
for course in self:
course.session_count = len(course.session_ids)
@api.depends('session_ids.attendees_count')
def _compute_attendee_count(self):
for course in self:
course.attendee_count = len(course.mapped('session_ids.attendee_ids'))
class Session(models.Model):
_name = 'oa.session'
_inherit = ['mail.thread']
_order = 'name'
_description = 'Session OA'
name = fields.Char(required=True)
description = fields.Html()
active = fields.Boolean(default=True)
state = fields.Selection([('draft', "Draft"), ('confirmed', "Confirmed"), ('done', "Done")], default='draft')
level = fields.Selection(related='course_id.level', readonly=True)
responsible_id = fields.Many2one(related='course_id.responsible_id', readonly=True, store=True)
start_date = fields.Date(default=fields.Date.context_today)
end_date = fields.Date(string='End Date', store=True, compute='_get_end_date', inverse='_set_end_date')
duration = fields.Float(digits=(6, 2), help="Duration in days", default=1)
instructor_id = fields.Many2one('res.partner', string="Instructor")
course_id = fields.Many2one('oa.course', ondelete='cascade', string="Course", required=True)
attendee_ids = fields.Many2many('res.partner', string="Attendees")
attendees_count = fields.Integer(compute='_get_attendees_count', store=True)
seats = fields.Integer()
## Using computed fields
taken_seats = fields.Float(compute='_compute_taken_seats', store=True)
percentage_per_day = fields.Integer("%", default=100)
def _warning(self, title, message):
return {'warning': {
'title': title,
'message': message,
}}
@api.depends('seats', 'attendee_ids')
def _compute_taken_seats(self):
for session in self:
if not session.seats:
session.taken_seats = 0.0
else:
session.taken_seats = 100.0 * len(session.attendee_ids) / session.seats
@api.depends('attendee_ids')
def _get_attendees_count(self):
for session in self:
session.attendees_count = len(session.attendee_ids)
@api.onchange('seats', 'attendee_ids')
def _verify_valid_seats(self):
if self.seats < 0:
return self._warning("Incorrect 'seats' value", "The number of available seats may not be negative")
if self.seats < len(self.attendee_ids):
return self._warning("Too many attendees", "Increase seats or remove excess attendees")
@api.constrains('instructor_id', 'attendee_ids')
def _check_instructor_not_in_attendees(self):
for session in self:
if session.instructor_id and session.instructor_id in session.attendee_ids:
raise exceptions.ValidationError("A session's instructor can't be an attendee")
@api.depends('start_date', 'duration')
def _get_end_date(self):
for session in self:
if not (session.start_date and session.duration):
session.end_date = session.start_date
else:
# Add duration to start_date, but: Monday + 5 days = Saturday,
# so subtract one second to get on Friday instead
start = fields.Datetime.from_string(session.start_date)
duration = timedelta(days=session.duration, seconds=-1)
session.end_date = str(start + duration)
def _set_end_date(self):
for session in self:
if session.start_date and session.end_date:
# Compute the difference between dates, but: Friday - Monday = 4
# days, so add one day to get 5 days instead
start_date = fields.Datetime.from_string(session.start_date)
end_date = fields.Datetime.from_string(session.end_date)
session.duration = (end_date - start_date).days + 1
###
## using onchange
###
## done by _verify_valid_seats
# @api.onchange('seats', 'attendee_ids')
# def _change_taken_seats(self):
# if self.taken_seats > 100:
# return {'warning': {
# 'title': 'Too many attendees',
# 'message': 'The room has %s available seats and there is %s attendees registered' % (self.seats, len(self.attendee_ids))
# }}
###
## using python constrains
###
## done by _verify_valid_seats
# @api.constrains('seats', 'attendee_ids')
# def _check_taken_seats(self):
# for session in self:
# if session.taken_seats > 100:
# raise exceptions.ValidationError('The room capacity is %s seats and there already %s attendees registered' % (session.seats, len(session.attendee_ids)))
def action_draft(self):
for rec in self:
rec.state = 'draft'
rec.message_post(body="Session %s of the course %s reset to draft" % (rec.name, rec.course_id.name))
def action_confirm(self):
for rec in self:
rec.state = 'confirmed'
rec.message_post(body="Session %s of the course %s confirmed" % (rec.name, rec.course_id.name))
def action_done(self):
for rec in self:
rec.state = 'done'
rec.message_post(body="Session %s of the course %s done" % (rec.name, rec.course_id.name))
def _auto_transition(self):
for rec in self:
if rec.taken_seats >= 50.0 and rec.state == 'draft':
rec.action_confirm()
def write(self, vals):
res = super(Session, self).write(vals)
for rec in self:
rec._auto_transition()
if vals.get('instructor_id'):
self.message_subscribe([vals['instructor_id']])
return res
@api.model
def create(self, vals):
res = super(Session, self).create(vals)
res._auto_transition()
if vals.get('instructor_id'):
res.message_subscribe([vals['instructor_id']])
return res
## done in get , set end date
# @api.onchange('start_date', 'end_date')
# def _compute_duration(self):
# if not (self.start_date and self.end_date):
# return
# if self.end_date < self.start_date:
# return {'warning': {
# 'title': "Incorrect date value",
# 'message': "End date is earlier then start date",
# }}
# delta = fields.Date.from_string(self.end_date) - fields.Date.from_string(self.start_date)
# self.duration = delta.days + 1
###
## using SQL constrains
###
# _sql_constraints = [
# # possible only if taken_seats is stored
# ('session_full', 'CHECK(taken_seats <= 100)', 'The room is full'),
# ]
|
StarcoderdataPython
|
3203714
|
<reponame>dgerod/ariac-2018_gear
#!/usr/bin/env python
from __future__ import print_function
import sys
import time
from test_example_node import ExampleNodeTester
from ariac_example import ariac_example
import rospy
import rostest
class GripperTester(ExampleNodeTester):
def test(self):
self.comp_class = ariac_example.MyCompetitionClass()
ariac_example.connect_callbacks(self.comp_class)
time.sleep(1.0)
# Pre-defined initial pose because sometimes the arm starts "droopy"
self._send_arm_to_initial_pose()
# Pre-defined pose that puts the gripper in contact with a product.
self._send_arm_to_product()
# Enable the gripper so that it picks up the product.
self._test_enable_gripper()
# Move the product over the shipping box using a pre-defined sequence of poses.
self._send_arm_to_shipping_box()
self.assertTrue(
self.comp_class.current_gripper_state.enabled, 'Gripper no longer enabled')
self.assertTrue(
self.comp_class.current_gripper_state.attached, 'Product no longer attached')
# Disable the gripper so that it drops the product.
self._test_disable_gripper()
time.sleep(1.0)
def _test_enable_gripper(self):
success = self._enable_gripper()
self.assertTrue(success, 'Gripper not successfully controlled')
time.sleep(1.0)
self.assertTrue(
self.comp_class.current_gripper_state.enabled, 'Gripper not successfully enabled')
self.assertTrue(
self.comp_class.current_gripper_state.attached, 'Product not successfully attached')
def _enable_gripper(self):
success = ariac_example.control_gripper(True)
time.sleep(0.5)
return success
def _test_disable_gripper(self):
success = self._disable_gripper()
self.assertTrue(success, 'Gripper not successfully controlled')
time.sleep(1.0)
self.assertFalse(
self.comp_class.current_gripper_state.enabled, 'Gripper not successfully disabled')
self.assertFalse(
self.comp_class.current_gripper_state.attached, 'Product not successfully dettached')
def _disable_gripper(self):
success = ariac_example.control_gripper(False)
time.sleep(0.5)
return success
def _send_arm_to_product(self):
trajectory = [
[-1.272, -1.102, 0.050, 1.112, -1.329, 1.360, 0.902, -0.663],
[0.444, -1.885, -1.726, 1.945, -0.941, 1.754, -2.380, -0.018],
[0.025, -1.484, -2.085, 0.046, -1.041, 1.317, -2.134, 0.259],
[0.100, -1.751, -2.046, 0.010, -1.11, 1.312, -2.088, 0.190],
]
for positions in trajectory:
self.comp_class.send_arm_to_state(positions)
time.sleep(1.5)
def _send_arm_to_shipping_box(self):
trajectory = [
[0.216, -1.672, -2.10, 0.584, -1.140, 1.574, -2.380, 0.150],
[0.678, -2.060, -2.031, 1.876, -1.107, 1.914, -3.020, 0.294],
[1.601, -1.893, -2.465, 0.800, -0.893, 1.919, -2.572, 0.887],
[2.795, -2.009, -2.316, 0.556, -0.746, 1.745, -1.215, 0.206],
]
for positions in trajectory:
self.comp_class.send_arm_to_state(positions)
time.sleep(1.0)
if __name__ == '__main__':
rospy.init_node('test_gripper', anonymous=True)
# Wait until /clock is being published; this can take an unpredictable
# amount of time when we're downloading models.
while rospy.Time.now().to_sec() == 0.0:
print('Waiting for Gazebo to start...')
time.sleep(1.0)
# Take an extra nap, to allow plugins to be loaded
time.sleep(12.0)
print('OK, starting test.')
rostest.run('test_ariac', 'test_gripper', GripperTester, sys.argv)
|
StarcoderdataPython
|
6500769
|
<reponame>mightymercado/PythonPosit
from numpy import exp, array, random, dot
class NeuralNetwork():
def __init__(self):
# Seed the random number generator, so it generates the same numbers
# every time the program runs.
random.seed(1)
# We model a single neuron, with 3 input connections and 1 output connection.
# We assign random weights to a 3 x 1 matrix, with values in the range -1 to 1
# and mean 0.
self.synaptic_weights = 2 * random.random((3, 1)) - 1
# The Sigmoid function, which describes an S shaped curve.
# We pass the weighted sum of the inputs through this function to
# normalise them between 0 and 1.
def __sigmoid(self, x):
# print("A", x)
#print("B", 1 / (1 + exp(-x)))
return 1 / (1 + exp(-x))
# The derivative of the Sigmoid function.
# This is the gradient of the Sigmoid curve.
# It indicates how confident we are about the existing weight.
def __sigmoid_derivative(self, x):
return x * (1 - x)
# We train the neural network through a process of trial and error.
# Adjusting the synaptic weights each time.
def train(self, training_set_inputs, training_set_outputs, number_of_training_iterations):
for iteration in xrange(number_of_training_iterations):
# Pass the training set through our neural network (a single neuron).
output = self.think(training_set_inputs)
# Calculate the error (The difference between the desired output
# and the predicted output).
error = training_set_outputs - output
# Multiply the error by the input and again by the gradient of the Sigmoid curve.
# This means less confident weights are adjusted more.
# This means inputs, which are zero, do not cause changes to the weights.
#print("Z", self.__sigmoid_derivative(output))
adjustment = dot(training_set_inputs.T, error * self.__sigmoid_derivative(output))
# Adjust the weights.
self.synaptic_weights += adjustment
# The neural network thinks.
def think(self, inputs):
# Pass inputs through our neural network (our single neuron).
return self.__sigmoid(dot(inputs, self.synaptic_weights))
if __name__ == "__main__":
#Intialise a single neuron neural network.
neural_network = NeuralNetwork()
print "Random starting synaptic weights: "
print neural_network.synaptic_weights
# The training set. We have 4 examples, each consisting of 3 input values
# and 1 output value.
training_set_inputs = array([[0, 0, 1], [1, 1, 1], [1, 0, 1], [0, 1, 1]])
training_set_outputs = array([[0, 1, 1, 0]]).T
# Train the neural network using a training set.
# Do it 10,000 times and make small adjustments each time.
neural_network.train(training_set_inputs, training_set_outputs, 10000)
print "New synaptic weights after training: "
print neural_network.synaptic_weights
# Test the neural network with a new situation.
print "Considering new situation [1, 0, 0] -> ?: "
print(neural_network.think(array([1, 0, 0])))
print(neural_network.think(array([0, 0, 0])))
print(neural_network.think(array([1, 1, 0])))
print(neural_network.think(array([0, 1, 0])))
|
StarcoderdataPython
|
4977546
|
<reponame>beaupreda/cnn-rgbir<filename>stereo/testing.py<gh_stars>1-10
'''
script used to execute multiple runs of testing
simply calls corresponding lua script with chosen arguments
David-<NAME>
'''
import os
import subprocess
from utils import InputParser, YamlReader
def prepare_test(gpu_id=None,
test_nb=None,
data_root=None,
testing=None,
tb=None,
psz=None,
half_range=None,
fold=None,
weights=None,
bn=None):
params = locals()
call = list()
call.append('th')
call.append('test.lua')
for key in params.keys():
if params[key] is not None:
call.append('--' + key)
call.append(params[key])
return call
def test():
input_parser = InputParser()
input_parser.add_arguments('--fold', '1', 'Fold to test data')
input_parser.add_arguments('--config', '/home/travail/dabeaq/litiv/masters/pbvs2019/cnn-rgbir/shared/config.yml', 'Path to the configuration file')
args = input_parser.get_arguments()
yml = YamlReader(args.config)
config = yml.parse()
data_root = config['output_dataset']
if int(args.fold) == config['fold1']['id']:
data_root = os.path.join(data_root, config['fold1']['dataset'])
weights_file = config['fold1']['weights']
bn_file = config['fold1']['bn']
test_file = config['fold1']['test']
test_nb = str(config['fold1']['test_nb'])
elif int(args.fold) == config['fold2']['id']:
data_root = os.path.join(data_root, config['fold2']['dataset'])
weights_file = config['fold2']['weights']
bn_file = config['fold2']['bn']
test_file = config['fold2']['test']
test_nb = str(config['fold2']['test_nb'])
elif int(args.fold) == config['fold3']['id']:
data_root = os.path.join(data_root, config['fold3']['dataset'])
weights_file = config['fold3']['weights']
bn_file = config['fold3']['bn']
test_file = config['fold3']['test']
test_nb = str(config['fold3']['test_nb'])
else:
data_root = os.path.join(data_root, config['custom']['dataset'])
weights_file = config['custom']['weights']
bn_file = config['custom']['bn']
test_file = config['custom']['test']
test_nb = str(config['custom']['test_nb'])
disp_root = config['disp_root']
param_root = None
if int(args.fold) == 1 or int(args.fold) == 2 or int(args.fold) == 3:
disp_root = config['pretrain_disp_root']
param_root = config['pretrain_param_root']
gpu_id = str(config['gpu_id'])
tb = str(config['tb'])
patch_size = str(config['half_width'])
half_range = str(config['half_range'])
testing = os.path.join(disp_root, test_file)
weights = os.path.join(param_root, weights_file)
bn = os.path.join(param_root, bn_file)
run = prepare_test(gpu_id=gpu_id,
test_nb=test_nb,
data_root=data_root,
testing=testing,
tb=tb,
psz=patch_size,
half_range=half_range,
fold=args.fold,
weights=weights,
bn=bn)
subprocess.call(run)
if __name__ == '__main__':
test()
|
StarcoderdataPython
|
67721
|
<reponame>itzmeanjan/tgnize<gh_stars>1-10
#!/usr/bin/python3
from __future__ import annotations
from typing import List, Tuple
from functools import reduce
from subprocess import run
from sys import argv
from os import mkdir
from os.path import abspath, exists, join
from datetime import timedelta
from .util import parseChat
from .plotting_scripts.minuteBasedAccumulatedTraffic import (
extractMinuteBasedTraffic,
extractMinuteBasedTrafficByUser,
plotAnimatedGraphForAccumulatedTrafficByMinuteFor24HourSpan,
calculateChatTrafficPercentageInPartOfDay
)
from .plotting_scripts.activeParticipantsOverTime import (
getTopXParticipantsAlongWithContribution,
getTopXParticipantsFromMessageRangeAlongWithContribution,
plotAnimatedGraphShowingTopXActiveParticipantsOverTime
)
'''
Given sink directory path and target file name,
it joins them into a single component & returns
sink file path ( absolute )
'''
def _getSinkFilePath(dirName: str, fileName: str) -> str:
return join(abspath(dirName), fileName)
'''
Checks presence of sink directory on current machine,
if doesn't exists, it builds so.
'''
def _sinkDirBuilder(targetPath: str):
_tmp = abspath(targetPath)
if not exists(_tmp):
mkdir(_tmp)
'''
Displays a simple banner, depicting usage of script,
along with author name & repository address
'''
def _displayBanner():
print('\x1b[1;6;36;49m[+]tgnize v0.1.3 - How about another Telegram Chat Analyzer ?\x1b[0m\n\n\t\x1b[3;30;47m$ tgnize `path-to-exported-chat-dir` `path-to-sink-dir`\x1b[0m\n\n[+]Author: <NAME><<EMAIL>>\n[+]Source: \x1b[4mhttps://github.com/itzmeanjan/tgnize\x1b[0m ( MIT Licensed )\n')
'''
Retuns source directory path ( holding exported telegram chat data set ) &
sink directory ( where we'll store generated plots )
'''
def _handleCMDInput() -> Tuple[str, str]:
return tuple(argv[1:len(argv)]) if len(argv) == 3 else (None, None)
'''
Escapes troublesome special characters present in chat participant's
names, which might cause some issue, if we put it in generated plots ( animated )
name
'''
def _getEscapedName(proposedName: str) -> str:
return proposedName.translate(
proposedName.maketrans(
{'/': r'_',
'\\': r'_',
' ': r'_'
}
)
)
'''
Calculates rate of success of execution of this script on
exported chat data
'''
def __calculateSuccess__(data: List[bool]) -> float:
return 0.0 if not data else reduce(lambda acc, cur: (acc + 1) if cur else acc, data, 0) / len(data) * 100
def _choiceHandler(ch: int, chat: Chat):
if ch == -1 or ch == 12:
print('\n[!]Terminated')
exit(0)
elif ch == 0:
print('\n\x1b[5;31;49m[!]Invalid choice\x1b[0m')
elif ch == 1:
print('\nFound \x1b[1;31;49m{}\x1b[0m participants in Chat'.format(chat.userCount))
elif ch == 2:
print('\nFound \x1b[1;31;49m{}\x1b[0m messages in Chat'.format(chat.totalMessageCount))
elif ch == 3:
print('\nFound \x1b[1;31;49m{}\x1b[0m events in Chat'.format(chat.totalEventCount))
elif ch == 4:
print('\nFound \x1b[1;31;49m{}\x1b[0m activities in Chat ( in total )'.format(chat.activityCount))
elif ch == 5:
try:
print('\n`X` > ', end='')
print('{}'.format(''.join(['\n{} - \x1b[1;3;34;50m{}\x1b[0m ( {:.4f} % )'.format(i + 1, k, v) for i, (k, v) in enumerate(getTopXParticipantsAlongWithContribution(int(input()), chat).items())])))
except Exception:
print('\n[!]Bad Input')
elif ch == 6:
_from, _to = chat.getChatTimeRange()
print('\nFrom \x1b[3;31;50m{}\x1b[0m to \x1b[3;31;50m{}\x1b[0m\nSpans over : \x1b[3;33;50m{}\x1b[0m'.format(_from, _to, _to - _from))
elif ch == 7:
_tmp = chat.getUserCountWhoUsedBot()
print('\nFound \x1b[1;31;49m{}\x1b[0m ( {:.4f} % ) participants who sent message via Bot'.format(_tmp, _tmp * 100 / chat.userCount))
elif ch == 8:
_tmp = chat.getUserCountWhoDidNotUseBot()
print('\nFound \x1b[1;31;49m{}\x1b[0m ( {:.4f} % ) participants who didn\'t send message via Bot'.format(_tmp, _tmp * 100 / chat.userCount))
elif ch == 9:
print(''.join(['\n\x1b[1;3;34;50m{}\x1b[0m ( {:.4f} % )'.format(k, v) for k, v in calculateChatTrafficPercentageInPartOfDay(chat).items()]))
elif ch == 10:
print('\nMinimum Delay : \x1b[3;33;50m{}\x1b[0m\nMaximum Delay : \x1b[3;33;50m{}\x1b[0m\nAverage Delay : \x1b[3;33;50m{}\x1b[0m'.format(*chat.delayInMessagesWithInTimeRange(chat.getChatTimeRange())))
elif ch == 11:
try:
print('\nUser > ', end='')
print(''.join(['\n{}'.format(i) for i in chat.findUserWildCard(input())]))
except Exception:
print('\n[!]Bad Input')
else:
print('\n\x1b[1;6;36;49m\_(^-^)_/\x1b[0m')
def _menu() -> int:
try:
print("\n[+]Options ::\n\n\t1 > Get Chat Participant Count\n\t2 > Get Message Count in Chat\n\t3 > Get Event Count in Chat\n\t4 > Get Total Activity Count in Chat\n\t5 > Get Top `X` Chat Participant(s)\n\t6 > Get Time Range of Chat\n\t7 > Get participant count, who sent message via Bot\n\t8 > Get participant count, who didn't send message via Bot\n\t9 > Accumulated Chat traffic in parts of Day\n\t10 > Delay in between messages sent ( including min, max & avg )\n\t11 > Find User\n\t12 > Exit\n\n\x1b[1;1;32;50mtgnize >> \x1b[0m", end="")
return int(input())
except EOFError:
return -1
except Exception:
return 0
'''
Main entry point of script
'''
def main() -> float:
run('clear')
_result = []
try:
source, sink = _handleCMDInput()
if not source or not sink or not exists(source):
_displayBanner()
raise Exception('Improper Invocation of `tgnize`')
_sinkDirBuilder(sink)
_displayBanner()
print('[*]Preparing ...')
# a reusable reference, which will be used, over lifetime of this script,
chat = parseChat(source)
# holding full chat, currently under consideration
while(1):
_choiceHandler(_menu(), chat)
'''
_result.append(
plotAnimatedGraphForAccumulatedTrafficByMinuteFor24HourSpan(
extractMinuteBasedTraffic(chat),
'Accumulated Chat Traffic by Minute',
_getSinkFilePath(sink, 'accumulatedChatTrafficByMinute.gif')
)
)
for i in chat.getTopXParticipants(5):
_result.append(
plotAnimatedGraphForAccumulatedTrafficByMinuteFor24HourSpan(
extractMinuteBasedTrafficByUser(chat, i),
'Accumulated Chat Traffic by Minute for {}'.format(i),
_getSinkFilePath(sink, 'accumulatedChatTrafficByMinuteFor{}.gif'.format(
_getEscapedName(i)))
)
)
_result.append(
plotAnimatedGraphShowingTopXActiveParticipantsOverTime(
[('Top 5 Active Participants from {} to {}'.format(i[0].strftime('%b %d %Y, %I:%M:%S %p'), i[1].strftime('%b %d %Y, %I:%M:%S %p')), \
getTopXParticipantsFromMessageRangeAlongWithContribution(5, chat, chat.findActivitiesWithInTimeRange(i))) \
for i in chat.splitTimeRangeWithGap(chat.getChatTimeRange(), timedelta(days=30))],
_getSinkFilePath(sink, 'topXActiveChatParticipantsOverTime.gif')
)
)
'''
'''
for i in chat.users:
_result.append(
plotAnimatedGraphForAccumulatedTrafficByMinuteFor24HourSpan(
extractMinuteBasedTrafficByUser(chat, i.name),
'Accumulated Chat Traffic by Minute for {}'.format(
i.name[:8] + '...' if len(i.name) > 10 else i.name),
'./plots/accumulatedChatTrafficByMinuteFor{}.gif'.format(
_getEscapedName(i.name))
)
)
'''
except KeyboardInterrupt:
print('[!]Terminated')
except Exception as e:
print('[!]{}'.format(e))
'''
finally:
print('[+]Success : {:.2f} %'.format(__calculateSuccess__(_result)))
'''
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('\n[!]Terminated')
finally:
exit(0)
|
StarcoderdataPython
|
8121095
|
<reponame>stefantaubert/tts-preparation
import shutil
from logging import getLogger
from pathlib import Path
from typing import List, Set, Tuple
from speech_dataset_preprocessing import load_final_ds
from speech_dataset_preprocessing import FinalDsEntryList
from text_utils import SpeakersDict
from text_utils.types import Speaker, Symbol
from tts_preparation.app.io import (get_merged_dir,
load_merged_symbol_converter,
save_merged_symbol_converter)
from tts_preparation.core.merge_ds import (DsName, PreparedDataList, merge,
remove_unwanted_symbols)
from general_utils import load_obj, save_obj
_merge_data_csv = "data.pkl"
_merge_speakers_json = "speakers.json"
def load_merged_data(merge_dir: Path) -> PreparedDataList:
path = merge_dir / _merge_data_csv
return load_obj(path)
def save_merged_data(merge_dir: Path, result: PreparedDataList) -> None:
path = merge_dir / _merge_data_csv
save_obj(result, path)
def load_merged_speakers_json(merge_dir: Path) -> SpeakersDict:
path = merge_dir / _merge_speakers_json
return SpeakersDict.load(path)
def save_merged_speakers_json(merge_dir: Path, speakers: SpeakersDict) -> None:
path = merge_dir / _merge_speakers_json
speakers.save(path)
def merge_ds(base_dir: Path, sdp_dir: Path, merge_name: str, ds_speakers: List[Tuple[DsName, Speaker]], ds_final_name: List[Tuple[DsName, str]], overwrite: bool = True) -> None:
logger = getLogger(__name__)
logger.info(f"Merging dataset: {merge_name}...")
dest_merge_dir = get_merged_dir(base_dir, merge_name)
if dest_merge_dir.is_dir() and dest_merge_dir.exists() and not overwrite:
logger.info("Already created.")
return
datasets: List[Tuple[DsName, FinalDsEntryList]] = []
for ds_name, final_name in set(ds_final_name):
final_data_list = load_final_ds(
base_dir=sdp_dir,
ds_name=ds_name,
final_name=final_name,
)
datasets.append((ds_name, final_data_list))
dest_data, dest_symbol_ids_dict, dest_speaker_ids_dict = merge(
datasets=datasets,
ds_speakers=ds_speakers,
)
assert overwrite
if dest_merge_dir.is_dir():
shutil.rmtree(dest_merge_dir)
dest_merge_dir.mkdir(parents=True, exist_ok=False)
save_merged_data(dest_merge_dir, dest_data)
save_merged_symbol_converter(dest_merge_dir, dest_symbol_ids_dict)
save_merged_speakers_json(dest_merge_dir, dest_speaker_ids_dict)
logger.info("Done.")
def ds_filter_symbols(base_dir: Path, orig_merge_name: str, dest_merge_name: str, allowed_symbols: Set[Symbol], overwrite: bool = True) -> None:
logger = getLogger(__name__)
dest_merge_dir = get_merged_dir(base_dir, dest_merge_name)
if dest_merge_dir.is_dir() and dest_merge_dir.exists() and not overwrite:
logger.info("Already created.")
return
orig_merge_dir = get_merged_dir(base_dir, orig_merge_name)
orig_data = load_merged_data(orig_merge_dir)
result = remove_unwanted_symbols(
data=orig_data,
allowed_symbols=allowed_symbols,
)
if result is None:
dest_data = orig_data
dest_symbol_ids_dict = load_merged_symbol_converter(orig_merge_dir)
dest_speaker_ids_dict = load_merged_symbol_converter(orig_merge_dir)
else:
dest_data, dest_symbol_ids_dict, dest_speaker_ids_dict = result
assert overwrite
if dest_merge_dir.is_dir():
shutil.rmtree(dest_merge_dir)
dest_merge_dir.mkdir(parents=True, exist_ok=False)
save_merged_data(dest_merge_dir, dest_data)
save_merged_symbol_converter(dest_merge_dir, dest_symbol_ids_dict)
save_merged_speakers_json(dest_merge_dir, dest_speaker_ids_dict)
logger.info("Done.")
|
StarcoderdataPython
|
6464990
|
# BUG: RangeIndex.where throws AssertionError #43240
import numpy as np
import pandas as pd
print(pd.__version__)
idx = pd.RangeIndex(0, 5)
v = np.array([False, False, True, True, True])
result = idx.where(v, 10)
print(result)
expected = pd.Index([10, 10, 2, 3, 4], dtype="int64")
pd.testing.assert_index_equal(result, expected)
|
StarcoderdataPython
|
12813597
|
# Copyright 2022 <NAME>
# See LICENSE file for licensing details.
#
# Learn more about testing at: https://juju.is/docs/sdk/testing
import unittest
from ops.model import ActiveStatus, MaintenanceStatus
from ops.testing import Harness
from charm import DeferWithDispatchCharm
class TestCharm(unittest.TestCase):
def setUp(self):
self.harness = Harness(DeferWithDispatchCharm)
self.addCleanup(self.harness.cleanup)
def test_defer_with_dispatch(self):
# Verify that our core charm logic works. The invocation of defer_with_dispatch
# won't do anything in the test, because it spawns a child process. We'll test it
# in an integration test.
self.harness.begin_with_initial_hooks()
self.assertEqual(self.harness.model.unit.status, MaintenanceStatus("Deferred"))
# Simulate second run.
self.harness.charm.on.config_changed.emit()
self.assertEqual(self.harness.model.unit.status, ActiveStatus())
|
StarcoderdataPython
|
9676722
|
import requests
import time
from dotenv import load_dotenv
import os
load_dotenv() # loads .env
PASTERY_API_KEY = os.getenv("PASTERY_API_KEY")
# POST https://www.pastery.net/api/paste/
# ?api_key=<api_key>
# &duration=<duration>
# &title=<title>
# &language=<language>
# &max_views=<max_views>
# GET "https://www.pastery.net/api/paste/?api_key=" + PASTERY_API_KEY
# send_headers = {'Accept': 'application/json'}
response = requests.get('https://www.pastery.net/api/paste/?api_key='
+ PASTERY_API_KEY)
response.encoding = 'utf-8' # Optional: requests infers this internally
print(response.json())
def get_joke():
"""
Get's joke from web api
"""
# print(response.content)
# print(response.text)
# print(response.headers)
# print(response.json()) # This is the JSON response
response_json = response.json()
joke = response_json['joke']
# print(joke)
joke_split = joke.split("? ")
print(joke_split[0])
print("--------------------------------------------------------")
time.sleep(1.5) # Sleep for 1.5 seconds
print("3")
time.sleep(1.5) # Sleep for 1.5 seconds
print("2")
time.sleep(1.5) # Sleep for 1.5 seconds
print("1")
time.sleep(1.5) # Sleep for 1.5 seconds
print(joke_split[1])
# get_joke()
|
StarcoderdataPython
|
3261830
|
"""Verify that instantiation and operation of the CNN/CLDNN do not crash.
.. note::
This doesn't actually check if the attack "works" because "works" is pretty
subjective (what "adversarial success" is "good"?). Therefore, whether it "works"
is better left to human analysis of experiment outputs but this unit test would
catch silly mistakes that cause the code to not at least complete.
"""
__author__ = "<NAME> <<EMAIL>>"
# External Includes
import numpy as np
import os
import torch
from typing import Tuple
from unittest import TestCase
# Internal Includes
from rfml.attack import fgsm, compute_signed_gradient, scale_perturbation
from rfml.nn.F import energy
from rfml.nn.model import CLDNN, CNN
class TestFGSM(TestCase):
def _test_fgsm(self, model_xtor, batch_size: int):
sps = 8
input_samples = 128
n_classes = 10
model = model_xtor(input_samples=input_samples, n_classes=n_classes)
# Fake up some data to ensure we don't depend on our modem implementations
x, y = _generate_fake_data(
input_samples=input_samples, batch_size=batch_size, n_classes=n_classes
)
for spr in np.linspace(0.0, 20.0, num=20 + 1):
_x = fgsm(x=x, y=y, input_size=input_samples, net=model, spr=spr, sps=sps)
p = x - _x
perturbation_power = energy(p, sps=sps).detach().numpy()
# The code base assumes that Es is 1, therefore we can compute spr without
# a correctly crafted input signal and ensure it still works
spr_estimate = 10.0 * np.log10(1.0 / np.mean(perturbation_power))
# Ensure the errror is within 1/10 dB
self.assertLessEqual(np.abs(spr - spr_estimate), 0.1)
def _test_compute_signed_gradient(self, model_xtor, batch_size: int):
input_samples = 128
n_classes = 10
model = model_xtor(input_samples=input_samples, n_classes=n_classes)
# Fake up some data to ensure we don't depend on our modem implementations
x, y = _generate_fake_data(
input_samples=input_samples, batch_size=batch_size, n_classes=n_classes
)
for spr in np.linspace(0.0, 20.0, num=20 + 1):
_sg = compute_signed_gradient(x=x, y=y, input_size=input_samples, net=model)
# Ensure the perturbation size matches the input size (since they're adding)
for i, (x_shape, sg_shape) in enumerate(zip(x.size(), _sg.size())):
self.assertEqual(x_shape, sg_shape)
# Taking absolute value ensures that all of the values should be ~1.0
_x = torch.abs(_sg)
_x = _x.detach().numpy()
# We can then compare the extremes (max/min) to the 1.0 to ensure all match
self.assertAlmostEqual(np.max(_x), 1.0, places=4)
self.assertAlmostEqual(np.min(_x), 1.0, places=4)
def test_scale_perturbation(self):
sps = 8
# +/- 1 values
sg = (np.random.randint(low=0, high=1, size=(100, 1, 2, 128)) * 2) - 1
sg = torch.from_numpy(sg).float()
for spr in np.linspace(0.0, 20.0, num=20 + 1):
p = scale_perturbation(sg=sg, spr=spr, sps=sps)
perturbation_power = energy(p, sps=sps).detach().numpy()
# The code base assumes that Es is 1, therefore we can compute spr without
# a correctly crafted input signal and ensure it still works
spr_estimate = 10.0 * np.log10(1.0 / np.mean(perturbation_power))
# Ensure the errror is within 1/10 dB
self.assertLessEqual(np.abs(spr - spr_estimate), 0.1)
def test_CNN(self):
self._test_fgsm(CNN, batch_size=1)
self._test_fgsm(CNN, batch_size=256)
self._test_compute_signed_gradient(CNN, batch_size=1)
self._test_compute_signed_gradient(CNN, batch_size=256)
def test_CLDNN(self):
self._test_fgsm(CLDNN, batch_size=1)
self._test_fgsm(CLDNN, batch_size=256)
self._test_compute_signed_gradient(CLDNN, batch_size=1)
self._test_compute_signed_gradient(CLDNN, batch_size=256)
def _generate_fake_data(
input_samples: int, batch_size: int, n_classes: int
) -> Tuple[torch.Tensor, torch.Tensor]:
x = np.random.normal(loc=0.0, scale=1.0, size=(batch_size, 1, 2, input_samples))
y = np.random.randint(low=0, high=n_classes, size=batch_size)
x = torch.from_numpy(x)
y = torch.from_numpy(y)
return x.float(), y.long()
|
StarcoderdataPython
|
8030016
|
import matplotlib.pyplot as plt
#from kneed import KneeLocator
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from sklearn.preprocessing import StandardScaler
import numpy as np
import pandas as pd
enc_df = pd.read_csv("enc_tok.csv", header=0,names = ['label', 'features'])
features = enc_df['features'].values
labels = enc_df['label'].values
labels = np.array(labels)
X = []
for feature in features:
temp = feature[1:-1].split()
X.append([float(temp[0]), float(temp[1])])
#print(X)
wcss=[]
#this loop will fit the k-means algorithm to our data and
#second we will compute the within cluster sum of squares and #appended to our wcss list.
for i in range(1,11):
kmeans = KMeans(n_clusters=i, init ='k-means++', max_iter=300, n_init=10,random_state=0 )
kmeans.fit(X)
wcss.append(kmeans.inertia_)
#kmeans inertia_ attribute is: Sum of squared distances of samples #to their closest cluster center.
#4.Plot the elbow graph
# plt.plot(range(1,11),wcss)
# plt.title('The Elbow Method Graph')
# plt.xlabel('Number of clusters')
# plt.ylabel('WCSS')
# plt.show()
kmeans = KMeans(n_clusters=5, init ='k-means++', max_iter=300, n_init=10,random_state=0 )
# We are going to use the fit predict method that returns for each #observation which cluster it belongs to. The cluster to which #client belongs and it will return this cluster numbers into a #single vector that is called y K-means
y_kmeans = kmeans.fit_predict(X)
X = np.array(X)
indices1 = (y_kmeans==0).nonzero()[0]
indices2 = (y_kmeans==1).nonzero()[0]
indices3 = (y_kmeans==2).nonzero()[0]
indices4 = (y_kmeans==3).nonzero()[0]
indices5 = (y_kmeans==4).nonzero()[0]
plt.scatter(X[indices1, 0], X[indices1, 1], s=10, c='red', label ='Cluster 1')
plt.text(X[indices1, 0] + 0.3, X[indices1, 1] + 0.3, labels[indices1], fontsize = 9 )
plt.scatter(X[indices2, 0], X[indices2, 1], s=10, c='blue', label ='Cluster 2')
plt.scatter(X[indices3, 0], X[indices3, 1], s=10, c='green', label ='Cluster 3')
plt.scatter(X[indices4, 0], X[indices4, 1], s=10, c='cyan', label ='Cluster 4')
plt.scatter(X[indices5, 0], X[indices5, 1], s=10, c='magenta', label ='Cluster 5')
#Plot the centroid. This time we're going to use the cluster centres #attribute that returns here the coordinates of the centroid.
# plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s=300, c='yellow', label = 'Centroids')
plt.title('Clusters of Customers')
plt.xlabel('Annual Income(k$)')
plt.ylabel('Spending Score(1-100')
plt.show()
|
StarcoderdataPython
|
12819154
|
<reponame>project-origin/example-backend<gh_stars>0
"""
Asynchronous tasks for refreshing access tokens which
are close to expiring.
"""
from datetime import datetime, timezone
from celery import group
from originexample import logger
from originexample.db import inject_session, atomic
from originexample.auth import UserQuery, AuthBackend
from originexample.tasks import celery_app
# Settings
RETRY_DELAY = 10
MAX_RETRIES = (60 * 15) / RETRY_DELAY
# Services
backend = AuthBackend()
def start_refresh_expiring_tokens_pipeline():
"""
Starts a pipeline which refreshes all tokens that are
soon to expire.
:rtype: celery.Task
"""
return get_soon_to_expire_tokens \
.s() \
.apply_async()
def start_refresh_token_for_subject_pipeline(subject):
"""
Starts a pipeline which refreshes token for a specific subject.
:rtype: celery.Task
"""
return refresh_token \
.s(subject=subject) \
.apply_async()
@celery_app.task(
name='refresh_token.get_soon_to_expire_tokens',
default_retry_delay=RETRY_DELAY,
max_retries=MAX_RETRIES,
)
@logger.wrap_task(
title='Getting soon-to-expire tokens',
pipeline='refresh_token',
task='get_soon_to_expire_tokens',
)
@inject_session
def get_soon_to_expire_tokens(session):
"""
:param Session session:
"""
users = UserQuery(session) \
.is_active() \
.should_refresh_token()
tasks = [refresh_token.si(subject=user.sub) for user in users]
group(*tasks).apply_async()
@celery_app.task(
name='refresh_token.refresh_token_for_user',
default_retry_delay=RETRY_DELAY,
max_retries=MAX_RETRIES,
)
@logger.wrap_task(
title='Refreshing user\'s access token',
pipeline='refresh_token',
task='refresh_token',
)
@atomic
def refresh_token(subject, session):
"""
:param str subject:
:param Session session:
"""
user = UserQuery(session) \
.is_active() \
.has_sub(subject) \
.one()
token = backend.refresh_token(user.refresh_token)
user.access_token = token['access_token']
user.refresh_token = token['refresh_token']
user.token_expire = datetime \
.fromtimestamp(token['expires_at']) \
.replace(tzinfo=timezone.utc)
|
StarcoderdataPython
|
8077325
|
# Problem: https://www.hackerrank.com/challenges/itertools-combinations-with-replacement/problem
# Score: 10
|
StarcoderdataPython
|
4841041
|
<filename>tests/entities/test_arxiv_document.py<gh_stars>10-100
"""Tests the arxiv model
"""
import uuid
from webminer.entities import arxiv_document as ad
test_url = "https://arxiv.org/abs/1801.06605"
test_title = "A Collaborative Filtering Recommender System"
test_abstract = "The use of relevant metrics of software systems " + \
"could improve various software engineering tasks, but"
test_authors = ["<NAME>", "<NAME>"]
test_publish_date = "Sat, 20 Jan 2018 00:11:42"
test_pdf_url = "https://arxiv.org/pdf/1801.06605"
def test_arxiv_doc_model_init():
"""Tests a successful creation of a arxiv doc model
"""
code = uuid.uuid4()
arxiv_doc = ad.ArxivDocument(
doc_id=code,
url=test_url,
title=test_title,
abstract=test_abstract,
authors=test_authors,
publish_date=test_publish_date,
pdf_url=test_pdf_url,
)
assert_equal(arxiv_doc.doc_id, code)
assert_equal(arxiv_doc.url, test_url)
assert_equal(arxiv_doc.title, test_title)
assert_equal(arxiv_doc.abstract, test_abstract)
assert_equal(arxiv_doc.authors, test_authors)
assert_equal(arxiv_doc.publish_date, test_publish_date)
assert_equal(arxiv_doc.pdf_url, test_pdf_url)
def test_arxiv_doc_model_from_dict():
"""Tests a successful creation of a arxiv doc model from a
dictionary object
"""
code = uuid.uuid4()
arxiv_doc = ad.ArxivDocument.from_dict(
{
"doc_id": code,
"url": test_url,
"title": test_title,
"summary": test_abstract,
"authors": [{"name": "<NAME>"}, {"name": "<NAME>"}],
"published": test_publish_date,
"links": [{"title": "pdf", "href": "https://arxiv.org/pdf/1801.06605"}],
}
)
assert_equal(arxiv_doc.doc_id, code)
assert_equal(arxiv_doc.url, test_url)
assert_equal(arxiv_doc.title, test_title)
assert_equal(arxiv_doc.abstract, test_abstract)
assert_equal(arxiv_doc.authors, test_authors)
assert_equal(arxiv_doc.publish_date, test_publish_date)
assert_equal(arxiv_doc.pdf_url, test_pdf_url)
def assert_equal(arg1, arg2):
if arg1 != arg2:
raise AssertionError("Assert equal failed - values are not equal")
|
StarcoderdataPython
|
1676459
|
import sys
from django.shortcuts import render
from PIL import Image, ImageFilter
# Create your views here.
from django.http import HttpResponse
def index(request):
return HttpResponse("Hello, world. You're at the polls index.")
def handle_image(request):
try:
im = Image.open(request)
im = im.filter(ImageFilter.CONTOUR)
response = HttpResponse(content_type="image/jpeg")
im.save(response, "JPEG")
return response
except IOError:
red = Image.new('RGB', (50, 50), (255,0,0))
response = HttpResponse(content_type="image/jpeg")
red.save(response, "JPEG")
return response
|
StarcoderdataPython
|
5184820
|
from django.contrib import admin
from boating.models import HirePoint, OpeningTimes, Boat, Booking
class OpeningTimesInline(admin.TabularInline):
model = OpeningTimes
extra = 0
class HirePointBoatInline(admin.TabularInline):
model = Boat
extra = 0
class HirePointAdmin(admin.ModelAdmin):
model = HirePoint
inlines = [HirePointBoatInline, OpeningTimesInline]
admin.site.register(HirePoint, HirePointAdmin)
class BookingAdmin(admin.ModelAdmin):
model = Booking
list_filter = ('hire_point',)
admin.site.register(Booking, BookingAdmin)
|
StarcoderdataPython
|
4968013
|
import discord
import io
import textwrap
import traceback
from contextlib import redirect_stdout
from discord.ext import commands
import datetime
class Owner(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
self._last_result = None
self.owner = bot.get_user(341295279674228737)
self.startup_extensions = ['overwatch', 'information', 'general', 'fun', 'admin', 'owner']
def cleanup_code(self, content):
if content.startswith('```') and content.endswith('```'):
return '\n'.join(content.split('\n')[1:-1])
@commands.group(name='activity')
async def activity(self, ctx):
if ctx.invoked_subcommand is None:
await ctx.send('You forgot listening playing watching etc whatever')
@activity.command()
async def playing(self, ctx, *, vgame: str):
if ctx.author != self.owner:
return
await self.bot.change_presence(activity=discord.Game(name=vgame))
await ctx.send('Done.')
@activity.command()
async def listening(self, ctx, *, thing: str):
if ctx.author != self.owner:
return
await self.bot.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=thing))
await ctx.send('Done.')
@activity.command()
async def watching(self, ctx, *, movie: str):
if ctx.author != self.owner:
return
await self.bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=movie))
await ctx.send('Done.')
@commands.command()
async def annoy(self, ctx, annoyee: discord.Member):
msg = ctx.message
await msg.delete()
ping = await ctx.send(f'''{annoyee.mention}, you're such a noob.''')
await ping.delete()
@commands.command()
async def editusr(self, ctx, newusr: str):
await self.bot.user.edit(username=newusr)
await ctx.send("Done.")
@commands.command(name="eval")
async def _eval(self, ctx, *, body: str):
if ctx.author == self.owner:
env = {
'bot': self.bot,
'ctx': ctx,
'channel': ctx.channel,
'author': ctx.author,
'guild': ctx.guild,
'message': ctx.message,
'_': self._last_result
}
env.update(globals())
body = self.cleanup_code(body)
stdout = io.StringIO()
to_compile = f'async def func():\n{textwrap.indent(body, " ")}'
try:
exec(to_compile, env)
except Exception as e:
return await ctx.send(f'```py\n{e.__class__.__name__}: {e}\n```')
func = env['func']
try:
with redirect_stdout(stdout):
ret = await func()
except Exception as e:
value = stdout.getvalue()
await ctx.send(f'```py\n{value}{traceback.format_exc()}\n```')
else:
value = stdout.getvalue()
await ctx.message.add_reaction('\u2705')
if ret is None:
if value:
await ctx.send(f'```py\n{value}\n```')
else:
self._last_result = ret
await ctx.send(f'```py\n{value}{ret}\n```')
else:
return
@commands.command()
async def shutdown(self, ctx):
if ctx.author != self.owner:
return
await ctx.send('Shutting down... ')
await self.bot.logout()
await self.bot.close()
@commands.command()
async def broadcast(self, ctx, *, body: str):
if ctx.author != self.owner:
return
for guild in self.bot.guilds:
incharge = guild.owner
await incharge.send(content=body)
await ctx.send("Done")
@commands.command()
async def cogreload(self, ctx, extension_name: str):
self.bot.reload_extension(extension_name)
await ctx.send(f"{extension_name} reloaded.")
def setup(bot):
bot.add_cog(Owner(bot))
|
StarcoderdataPython
|
1993458
|
<reponame>igapon50/training
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##
# @file const.py
# @version 1.0.0
# @author <NAME>(HN:igapon)
# @date 2021/04/26
# @brief 共通な定数を定義する
# @details 共通な定数を定義する
# @warning
# @note
__author__ = "<NAME>(HN:igapon)"
__copyright__ = "Copyright (c) 2021 igapon"
__credits__ = ["<NAME>"]
__license__ = "MIT License"
__version__ = "1.0.0"
__maintainer__ = "igapon"
__email__ = "<EMAIL>"
__status__ = "Development" # "Prototype" or "Development" or "Production"
msg_error_exit = 'エラー終了します。'
HEADERS_DIC = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36"}
DEFAULT_TARGET_URL = 'https://www.hot-ishikawa.jp/photo/'
RESULT_FILE_PATH = './result.txt' # タイトルと、ダウンロードするファイルのURLの列挙を書き込むファイル
OUTPUT_FOLDER_PATH = '.\\folder01' # ダウンロードしたファイルの保存パス
# img_css_select = 'html body section.entry-content img'
# img_attr = 'src'
# img_css_select = 'html body main div.content img.content-img'
# img_attr = 'src'
# img_css_select = 'html body section.entry-content img.alignnone'
# img_attr = 'src'
# img_css_select = 'html body section.entry-content img'
# img_attr = 'src'
# img_css_select = 'img[data-src]'
# img_attr = 'data-src'
# img_css_select = 'html body div.kijibox p a'
# img_attr = 'href'
img_css_select = 'html body noscript img.list-img'
img_attr = 'src'
# img_css_select = 'html body div .content a'
# img_attr = 'href'
# img_css_select = 'html body main noscript img.vimg'
# img_attr = 'src'
# img_css_select = 'html body div .photoItem img'
# img_attr = 'src'
|
StarcoderdataPython
|
3555641
|
<reponame>2z1c/Community-document
'''
File: Photoresistor.py
Project: adc
File Created: Thursday, 24th December 2020 5:44:08 pm
Author: chengzhu.zhou
-----
Last Modified: Wednesday, 30th December 2020 10:10:33 am
Modified By: chengzhu.zhou
-----
Copyright 2020 - 2020 quectel
'''
from misc import ADC
import utime as time
import _thread
# unit as Ω
def Voltage_to_Resistance(Volt):
#
Va = 2 * Volt
resistance = (2 * 4700 * 40200 * Va)/(2 * 4700 * (3300 - Va) - (40200 * Va))
return resistance
def Photoresistor_thread(delay, retryCount):
# creat a adc device
AdcDevice = ADC()
while retryCount:
retryCount = retryCount - 1
# get ADC.ADC0 value
adcvalue = AdcDevice.read(ADC.ADC0)
print("get ADC.ADC0 Voltage value as {0}mv".format(adcvalue))
# Converted to resistance
resistance = Voltage_to_Resistance(adcvalue)
print("Photoresistor resistance as {0}Ω".format(resistance))
time.sleep(delay)
pass
if __name__ == "__main__":
# creat a thread Convert ADC to Voltage
_thread.start_new_thread(Photoresistor_thread, (1, 10))
print("creent main thread has exit")
|
StarcoderdataPython
|
32486
|
<filename>cheatsheets/py/pyython.6a.keras.py
from keras.datasets import imdb
top_words = 10000
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=top_words)
#imdb.get_word_index()
word_dict = imdb.get_word_index()
word_dict = { key:(value + 3) for key, value in word_dict.items() }
word_dict[''] = 0 # Padding
word_dict['>'] = 1 # Start
word_dict['?'] = 2 # Unknown word
reverse_word_dict = { value:key for key, value in word_dict.items() }
print(' '.join(reverse_word_dict[id] for id in x_train[0]))
from keras.preprocessing import sequence
max_review_length = 500
x_train = sequence.pad_sequences(x_train, maxlen=max_review_length)
x_test = sequence.pad_sequences(x_test, maxlen=max_review_length)
from keras.models import Sequential
from keras.layers import Dense
from keras.layers.embeddings import Embedding
from keras.layers import Flatten
embedding_vector_length = 32
model = Sequential()
model.add(Embedding(top_words, embedding_vector_length, input_length=max_review_length))
model.add(Flatten())
model.add(Dense(16, activation='relu'))
model.add(Dense(16, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',optimizer='adam', metrics=['accuracy'])
print(model.summary())
hist = model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=5, batch_size=128)
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
sns.set()
acc = hist.history['acc']
val = hist.history['val_acc']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, '-', label='Training accuracy')
plt.plot(epochs, val, ':', label='Validation accuracy')
plt.title('Training and Validation Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend(loc='upper left')
plt.plot()
scores = model.evaluate(x_test, y_test, verbose=0)
print("Accuracy: %.2f%%" % (scores[1] * 100))
import string
import numpy as np
def analyze(text):
# Prepare the input by removing punctuation characters, converting
# characters to lower case, and removing words containing numbers
translator = str.maketrans('', '', string.punctuation)
text = text.translate(translator)
text = text.lower().split(' ')
text = [word for word in text if word.isalpha()]
# Generate an input tensor
input = [1]
for word in text:
if word in word_dict and word_dict[word] < top_words:
input.append(word_dict[word])
else:
input.append(2)
padded_input = sequence.pad_sequences([input], maxlen=max_review_length)
# Invoke the model and return the result
result = model.predict(np.array([padded_input][0]))[0][0]
return result
|
StarcoderdataPython
|
3578230
|
from typing import Dict, Union
from news_scraper import config, ProjectVariables
from functools import wraps
from news_scraper.enums.site import Site
logging = ProjectVariables.root_logger
def accepts_dict(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if isinstance(args[1], dict):
return fn(*args, **kwargs)
else:
logging.error(f'{fn.__name__} received None or other type')
return True
return wrapper
class AtomicDict(dict):
@accepts_dict
def add(self, obj: Dict[str, Union[str, dict]], site: Site, yesterday_data=None) -> bool:
if yesterday_data is None:
yesterday_data = AtomicDict()
title = obj['title']
values = obj['values']
yesterday_article = yesterday_data.get(title, None)
if title in self.keys() or (yesterday_article and site.value == yesterday_article["site"]):
return True
self[title] = values
return False
@accepts_dict
def add_all(self, objs: Dict[str, Union[str, dict]], site: Site, yesterday_data=None) -> bool:
if yesterday_data is None:
yesterday_data = AtomicDict()
max_collisions = int(config.get('Settings', 'MaxCollisions'))
for title, values in objs.items():
res = self.add({
'title': title,
'values': values
}, site, yesterday_data=yesterday_data)
if res:
max_collisions -= 1
if max_collisions == 0:
return False
return True
@accepts_dict
def add_additional_data(self, additional_data: Dict[str, Union[str, dict]]) -> None:
title = additional_data['title']
new_values = additional_data['values']
if self[title] is None:
logging.error(f'title {title} was not find in data list')
return
for key in new_values.keys():
self[title][key] = new_values[key]
|
StarcoderdataPython
|
1899993
|
#!/usr/bin/env python3
# Copyright (c) 2018-2019 The Unit-e developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the RPC call related to the runstringcommand command.
Test corresponds to code in rpc/misc.cpp.
"""
from test_framework.test_framework import UnitETestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, Decimal
from test_framework.regtest_mnemonics import regtest_mnemonics
class RunstringcommandTest(UnitETestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [['-wallet=w1', '-wallet=w2'], []]
def run_test(self):
self._test_uptime()
def _test_uptime(self):
assert_raises_rpc_error(-8, 'Invalid method',
self.nodes[0].runstringcommand, 'runstringcommand', 'generate', 'w1', '1')
assert_raises_rpc_error(-8, 'Parameters must all be strings',
self.nodes[0].runstringcommand, 'generate', 'w1', 101)
# Default wallet
assert_equal(Decimal(0), self.nodes[1].runstringcommand('getbalance', ''))
# Explicitly specified wallet
self.nodes[0].runstringcommand('importmasterkey', 'w1', regtest_mnemonics[0]['mnemonics'])
self.nodes[0].initial_balance = regtest_mnemonics[0]['balance']
self.nodes[0].runstringcommand(
'sendtoaddress',
'w1',
self.nodes[1].getnewaddress(),
'10', # amount
'', # comment
'', # comment_to
'true' # subtractfeefromamount
)
assert_equal(self.nodes[0].initial_balance - Decimal(10), self.nodes[0].runstringcommand('getbalance', 'w1'))
assert_equal(Decimal(0), self.nodes[0].runstringcommand('getbalance', 'w2'))
if __name__ == '__main__':
RunstringcommandTest().main()
|
StarcoderdataPython
|
1755042
|
<reponame>cdeck3r/3DScanner<filename>src/homie-nodes/homie-apparatus/node_recentimages.py<gh_stars>1-10
import base64
import datetime
import hashlib
import json
import logging
import os
import pathlib
import re
import shutil
import subprocess
import tempfile
import time
from homie.node.node_base import Node_Base
from homie.node.property.property_datetime import Property_DateTime
from homie.node.property.property_enum import Property_Enum
from homie.node.property.property_integer import Property_Integer
logger = logging.getLogger(__name__)
class Node_RecentImages(Node_Base):
"""The recent image taken by the camnode's camera
scanner/apparatus/recent-images/...
scanner/apparatus/recent-images/save-all
scanner/apparatus/recent-images/last-saved
scanner/apparatus/recent-images/image-count
"""
# allowed function states
RUN_STATES = "run,idle"
def __init__(
self,
device,
id="recent-images",
name="All recent images",
type_="file",
retain=True,
qos=1,
):
super().__init__(device, id, name, type_, retain, qos)
# important function we need
assert self.save_all
assert Node_RecentImages.RUN_STATES
self.device = device
# create image storage location must exist
self.img_dir = os.path.expanduser(self.device.device_settings['img_dir'])
os.makedirs(self.img_dir, mode=0o755, exist_ok=True)
assert os.path.isdir(self.img_dir)
# temp directory to store retrieved images in json format
self.img_tmp_dir = os.path.expanduser(self.device.device_settings['img_tmp_dir'])
os.makedirs(self.img_tmp_dir, mode=0o755, exist_ok=True)
assert os.path.isdir(self.img_tmp_dir)
"""scanner/apparatus/recent-images/save-all"""
# function's default state is 'idle'
self.prop_save_all = Property_Enum(
node=self,
id="save-all",
name="Save all images",
data_format=Node_RecentImages.RUN_STATES,
set_value=self.save_all,
value='idle',
)
self.current_run_state = self.prop_save_all.value
self.add_property(self.prop_save_all)
"""
scanner/apparatus/recent-images/last-saved
scanner/apparatus/recent-images/image-count
"""
self.last_saved = Property_DateTime(
node=self, id='last-saved', name='Most recent image date', data_format='%Y-%m-%dT%H:%M:%S.000',
)
self.image_count = Property_Integer(
node=self, id='image-count', name='Count of saved images', settable=False
)
self.add_property(self.image_count)
self.add_property(self.last_saved)
def __str__(self):
return str(self.__class__.__name__)
def count_dirs_and_files(self, path):
"""Counts the dir and files in path directory"""
dircount, filecount = 0, 0
for _, d, f in os.walk(path):
dircount += len(d)
filecount += len(f)
return (dircount, filecount)
def save_all(self, action):
"""Collects recent images from all camera nodes and stores them"""
def finalize(tmpdir):
# Delete tmpdir and inform clients
try:
shutil.rmtree(tmpdir)
except OSError as e:
logger.error("Error deleting {} : {}".format(dir_path, e.strerror))
self.current_run_state = 'idle'
self.prop_save_all.value = self.current_run_state
if action != 'run':
return
if self.current_run_state == 'run':
return
self.current_run_state = 'run'
self.prop_save_all.value = self.current_run_state
# retrieve recent-image json with b64 encoded images in tmp dir
# decode images
# filter out the ones which are too old (e.g. by camnode defect)
# store them in IMAGE_DIR
# update properties
# Explain: decode is before filter,
# because we need to load the complete json file anyway to filter,
# so we can decode the b64 data
# retrieve images
tmpdir = tempfile.mkdtemp(dir=self.img_tmp_dir)
try:
imgs = self.retrieve_images_from_camnodes(tmpdir)
except Exception as e:
logger.error(
'Download of recent images failed unexpectedly: {}'.format(e.strerror)
)
finalize(tmpdir)
return
# images paths in imgs list: <tmp>/camnode-<hwaddr>_recent-image.json
# decode
imgs = self.decode_images(imgs)
if len(imgs) == 0:
logger.info('No decoded images found.')
finalize(tmpdir)
return
# filter
imgs = self.filter_images(imgs)
# copy & store
img_dir = ImageDir(self.img_dir)
img_dir.copy_files_from(imgs)
# update properties
self.last_saved.value = img_dir.mtime
self.image_count.value = len(img_dir.files)
# cleanup
finalize(tmpdir)
def retrieve_images_from_camnodes(self, tmpdir):
"""Download all b64 encoded images and return list of file paths"""
# download b64 data in json structure in tmp directoy
# collect all names and return
dest = str(tmpdir)
broker = self.device.mqtt_settings['MQTT_BROKER']
port = str(self.device.mqtt_settings['MQTT_PORT'])
# remember: when the homie service starts it sets the cwd
# to the script's directoy
process = subprocess.run(
[
str(os.getcwd()) + '/' + 'node_recentimages_download.sh',
broker,
port,
dest,
],
capture_output=True,
universal_newlines=True,
check=True,
)
return [
os.path.join(tmpdir, filename)
for filename in os.listdir(tmpdir)
if os.path.getsize(os.path.join(tmpdir, filename)) > 0
]
def decode_images(self, imgs):
"""B64 decode all images found in given list"""
decoded_imgs = []
for img in imgs:
try:
with open(os.path.abspath(img), 'r') as jsonfile:
j = json.load(jsonfile)
except Exception as e:
logger.warn('Could not load file {}: {}'.format(img, e.strerror))
continue
try:
filename = os.path.basename(j['file'])
filedir = os.path.dirname(img)
decoded_img = base64.b64decode(j['b64file'])
decoded_img_path = os.path.join(filedir, filename)
with open(decoded_img_path, 'wb+') as f:
f.write(decoded_img)
except Exception as e:
logger.warn('Could not b64decode {}: {}'.format(img, e.strerror))
decoded_imgs.append(decoded_img_path)
return decoded_imgs
def filter_images(self, imgs, minutes_back=10):
"""Filter images which do not match the criteria and return new list"""
# expected camnode filename format: <tmp>/yyyymmddhhmmss_<hwaddr>.png
imgs_filename = [os.path.basename(os.path.abspath(f)) for f in imgs]
imgs_tmpdir = os.path.dirname(os.path.abspath(imgs[-1]))
pattern = re.compile(r'^\d\d\d\d\d\d\d\d\d\d\d\d\d\d_')
camnode_imgs = [
i for i in imgs_filename if re.search(pattern, i) and i.endswith('.png')
]
# extract youngest one; sub couple of minutes to form a threshold
# sort and filter all out which are below the threshold
youngest_img = sorted(camnode_imgs, key=str.lower)[-1]
ts_str = re.split(r'_', youngest_img)[0]
ts = datetime.datetime.strptime(ts_str, '%Y%m%d%H%M%S')
threshold = ts - datetime.timedelta(minutes=minutes_back)
threshold_str = threshold.strftime('%Y%m%d%H%M%S')
filtered_imgs = [
os.path.join(imgs_tmpdir, f)
for f in sorted(camnode_imgs, key=str.lower)
if f > threshold_str
]
return filtered_imgs
class ImageDir(object):
"""Represents all information to store images"""
def __init__(self, root_img_dir):
"""Creates a new directoy within root_img_dir"""
assert os.path.isdir(root_img_dir)
# format is yyyymmdd-HHmmss
now = datetime.datetime.now()
imgdirname = now.strftime('%Y%m%d-%H%M%S')
self.img_dir = os.path.join(root_img_dir, imgdirname)
os.mkdir(self.img_dir)
assert os.path.isdir(self.img_dir)
@property
def path(self):
return os.path.abspath(self.img_dir)
@property
def mtime(self):
modTimesinceEpoc = os.path.getmtime(self.path)
return time.strftime('%Y-%m-%dT%H:%M:%S.000', time.localtime(modTimesinceEpoc))
@property
def files(self):
"""Returns a file list from directoy"""
return os.listdir(self.path)
def copy_to(self, dest_dir):
"""Copy the content from image dir to destination directory"""
# TODO: not pythonic :-(
raise NotImplementedError
def copy_from(self, src_dir):
"""Copy all files from src_dir to image directory"""
self.copy_files_from(os.listdir(src_dir))
def copy_files_from(self, src_files):
"""Copy files from list to image directory"""
for f in src_files:
try:
shutil.copy2(f, self.path)
except Exception:
logger.warn('File copy failed: {}'.format(f))
|
StarcoderdataPython
|
6505745
|
<reponame>Savasw/ansible-provider
#!/usr/bin/python
# Copyright (c) 2017 Alibaba Group Holding Limited. <NAME> <<EMAIL>>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see http://www.gnu.org/licenses/.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: alicloud_bucket_object
version_added: "1.0.9"
short_description: Manage object in OSS
description:
- This module allows the user to manage OSS objects within bucket. Includes support for uploading and downloading
objects, retrieving object keys.
deprecated:
removed_in: "1.5.0"
why: Alibaba Cloud module name prefix "ali" will be more concise.
alternative: Use M(ali_oss_object) instead.
options:
mode:
description:
- Switches the module behaviour between put (upload), get (download), list (list objects) and delete (delete object).
required: true
choices: ['get', 'put', 'delete', 'list']
bucket:
description:
- Bucket name.
required: true
permission:
description:
- This option lets the user set the canned permissions on the objects that are put. The permissions that
can be set are 'private', 'public-read', 'public-read-write'.
default: 'private'
choices: [ 'private', 'public-read', 'public-read-write' ]
aliases: [ 'acl' ]
headers:
description:
- Custom headers for PUT or GET operation, as a dictionary of 'key=value' and 'key=value,key=value'.
overwrite:
description:
- Force overwrite specified object content when putting object.
If it is true/false, object will be normal/appendable. Appendable Object can be convert to Noraml by setting
overwrite to true, but conversely, it won't be work.
default: False
type: bool
content:
description:
- The object content that will be upload. It is conflict with 'file_name' when mode is 'put'.
file_name:
description:
- The name of file that used to upload or download object.
aliases: [ "file" ]
object:
description:
- Name to object after uploaded to bucket
required: true
aliases: [ 'key', 'object_name' ]
byte_range:
description:
- The range of object content that would be download.
Its format like 1-100 that indicates range from one to hundred bytes of object.
aliases: [ 'range' ]
requirements:
- "python >= 2.6"
- "footmark >= 1.1.16"
extends_documentation_fragment:
- alicloud
author:
- "<NAME> (@xiaozhu36)"
'''
EXAMPLES = '''
# basic provisioning example to upload a content
- name: simple upload to bucket
hosts: localhost
connection: local
vars:
alicloud_access_key: <your-alicloud-access-key-id>
alicloud_secret_key: <your-alicloud-access-secret-key>
alicloud_region: cn-hangzhou
mode: put
bucket: bucketname
content: 'Hello world! I come from alicloud.'
object: 'remote_file.txt'
headers:
Content-Type: 'text/html'
Content-Encoding: md5
tasks:
- name: simple upload to bucket
alicloud_bucket_object:
alicloud_access_key: '{{ alicloud_access_key }}'
alicloud_secret_key: '{{ alicloud_secret_key }}'
alicloud_region: '{{ alicloud_region }}'
mode: '{{ mode }}'
bucket: '{{ bucket }}'
content: '{{ content }}'
headers: '{{ headers }}'
register: result
- debug: var=result
# basic provisioning example to upload a file
- name: simple upload to bucket
hosts: localhost
connection: local
vars:
alicloud_access_key: <your-alicloud-access-key-id>
alicloud_secret_key: <your-alicloud-access-secret-key>
alicloud_region: cn-hangzhou
mode: put
bucket: bucketname
file_name: 'test_oss.yml'
object: 'remote_file.txt'
headers:
Content-Type: 'text/html'
Content-Encoding: md5
tasks:
- name: simple upload to bucket
alicloud_bucket_object:
alicloud_access_key: '{{ alicloud_access_key }}'
alicloud_secret_key: '{{ alicloud_secret_key }}'
alicloud_region: '{{ alicloud_region }}'
mode: '{{ mode }}'
file_name: '{{ file_name }}'
content: '{{ content }}'
headers: '{{ headers }}'
register: result
- debug: var=result
# basic provisioning example to download a object
- name: simple upload to bucket
hosts: localhost
connection: local
vars:
alicloud_access_key: <your-alicloud-access-key-id>
alicloud_secret_key: <your-alicloud-access-secret-key>
alicloud_region: cn-hangzhou
mode: get
bucket: bucketname
download: 'my_test.json'
byte_range: 0-100
object: 'remote_file.txt'
headers:
Content-Type: 'text/html'
Content-Encoding: md5
tasks:
- name: simple upload to bucket
alicloud_bucket_object:
alicloud_access_key: '{{ alicloud_access_key }}'
alicloud_secret_key: '{{ alicloud_secret_key }}'
alicloud_region: '{{ alicloud_region }}'
mode: '{{ mode }}'
file_name: '{{ download }}'
byte_range: '{{ byte_range }}'
content: '{{ content }}'
headers: '{{ headers }}'
register: result
- debug: var=result
# basic provisioning example to list bucket objects
- name: list bucket objects
hosts: localhost
connection: local
vars:
alicloud_access_key: <your-alicloud-access-key-id>
alicloud_secret_key: <your-alicloud-access-secret-key>
alicloud_region: cn-hangzhou
mode: list
bucket: bucketname
tasks:
- name: list bucket objects
alicloud_bucket_object:
alicloud_access_key: '{{ alicloud_access_key }}'
alicloud_secret_key: '{{ alicloud_secret_key }}'
alicloud_region: '{{ alicloud_region }}'
mode: '{{ mode }}'
bucket: '{{ bucket }}'
register: list_result
- debug: var=list_result
# basic provisioning example to delete bucket object
- name: delete bucket objects
hosts: localhost
connection: local
vars:
alicloud_access_key: <your-alicloud-access-key-id>
alicloud_secret_key: <your-alicloud-access-secret-key>
alicloud_region: cn-hangzhou
mode: delete
bucket: bucketname
object: 'remote_file.txt'
tasks:
- name: delete bucket objects
alicloud_bucket_object:
alicloud_access_key: '{{ alicloud_access_key }}'
alicloud_secret_key: '{{ alicloud_secret_key }}'
alicloud_region: '{{ alicloud_region }}'
mode: '{{ mode }}'
bucket: '{{ bucket }}'
object: '{{ object }}'
register: delete_object_result
- debug: var=delete_object_result
'''
RETURN = '''
changed:
description: current operation whether changed the resource
returned: when success
type: bool
sample: true
key:
description: the name of oss object
returned: expect list
type: bool
sample: true
object:
description: the object's information
returned: on put or get
type: dict
sample: {
"etag": "A57B09D4A76BCF486DDD755900000000",
"key": "newobject-2",
"last_modified": "2017-07-24 19:43:41",
"next_append_position": 11,
"size": "11 B",
"storage_class": "Standard",
"type": "Appendable"
}
objects:
description: the list all objects that has the prefix of 'object' value in the specified bucket
returned: when list
type: list
sample: [
{
"etag": "54739B1D5AEBFD38C83356D8A8A3EDFC",
"key": "newobject-1",
"last_modified": "2017-07-24 19:42:46",
"size": "2788 B",
"storage_class": "Standard",
"type": "Normal"
},
{
"etag": "EB8BDADA044D58D58CDE755900000000",
"key": "newobject-2",
"last_modified": "2017-07-24 19:48:28",
"next_append_position": 5569,
"size": "5569 B",
"storage_class": "Standard",
"type": "Appendable"
}
]
'''
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.alicloud_oss import oss_bucket_argument_spec, oss_bucket_connect
import time
HAS_FOOTMARK = False
try:
from footmark.exception import ECSResponseError, OSSResponseError
HAS_FOOTMARK = True
except ImportError:
HAS_FOOTMARK = False
def get_object_info(obj):
result = {'key': obj.key, 'last_modified': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(obj.last_modified)),
'etag': obj.etag, 'type': obj.type, 'size': str(obj.size) + ' B', 'storage_class': obj.storage_class}
if obj.type == 'Appendable':
result['next_append_position'] = obj.size
return result
def main():
argument_spec = oss_bucket_argument_spec()
argument_spec.update(dict(
bucket=dict(type='str', required=True),
mode=dict(type='str', required=True, choices=['put', 'get', 'list', 'delete']),
permission=dict(type='str', default='private', choices=['private', 'public-read', 'public-read-write']),
headers=dict(type='dict'),
overwrite=dict(type='bool', default=False),
content=dict(type='str'),
file_name=dict(type='str', aliases=['file']),
object=dict(type='str', aliases=['key', 'object_name']),
byte_range=dict(type='str', aliases=['range'])
)
)
module = AnsibleModule(argument_spec=argument_spec)
if HAS_FOOTMARK is False:
module.fail_json(msg="Package 'footmark' required for the module alicloud_bucket_object.")
oss_bucket = oss_bucket_connect(module)
mode = module.params['mode']
file_name = module.params['file_name']
object_key = module.params['object']
headers = module.params['headers']
changed = False
if mode == 'put':
content = module.params['content']
if content and file_name:
module.fail_json(msg="'content' and 'file_name' only one can be specified when mode is put.")
overwrite = module.params['overwrite']
permission = module.params['permission']
try:
if content:
oss_bucket.put_object(object_key, content, overwrite, headers=headers)
changed = True
elif file_name:
oss_bucket.put_object_from_file(object_key, file_name, overwrite, headers=headers)
changed = True
elif oss_bucket.is_object_exist(object_key):
if permission:
oss_bucket.put_object_acl(object_key, permission)
changed = True
if headers:
oss_bucket.update_object_headers(object_key, headers)
changed = True
module.exit_json(changed=changed, key=object_key, object=get_object_info(oss_bucket.get_object_info(object_key)))
except Exception as e:
module.fail_json(msg="Unable to upload an object {0} or "
"modify its permission and headers, and got an error: {1}".format(object_key, e))
elif mode == 'get':
byte_range = module.params['byte_range']
try:
if file_name:
oss_bucket.get_object_to_file(object_key, file_name, byte_range=byte_range, headers=headers)
else:
module.fail_json(msg="'file_name' must be specified when mode is get.")
module.exit_json(changed=changed, key=object_key, object=get_object_info(oss_bucket.get_object_info(object_key)))
except Exception as e:
module.fail_json(msg="Unable to download object {0}, and got an error: {1}".format(object_key, e))
elif mode == 'list':
objects = []
max_keys = 500
try:
while True:
results = oss_bucket.list_objects(prefix=object_key, max_keys=max_keys)
for obj in results:
objects.append(get_object_info(obj))
if len(results) < max_keys:
break
module.exit_json(changed=False, objects=objects)
except Exception as e:
module.fail_json(msg="Unable to retrieve all objects, and got an error: {0}".format(e))
else:
try:
oss_bucket.delete_object(object_key)
module.exit_json(changed=changed, key=object_key)
except Exception as e:
module.fail_json(msg="Unable to delete an object {0}, and got an error: {1}".format(object_key, e))
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
8025203
|
<reponame>Ricardo191998/sistop-2020-1<filename>tareas/2/RomeroCristian_LopezUlysses/main.py
#!/usr/bin/python3
# -*-coding: utf-8 -*-x
from procesos.Proceso import Proceso
from procesos.Spn import Spn
from procesos.RRobin import RRobin
from procesos.Fcfs import Fcfs
def main():
procesos = list()
for _ in range(5):
procesos.append(Proceso([70, 600], [0, 15]))
fcfs = Fcfs(70, procesos)
spn = Spn(70, procesos)
roundr = RRobin(70, procesos)
rr4 = RRobin(70*4, procesos)
fcfs.start()
spn.start()
roundr.start()
rr4.start()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
8149428
|
from Format import NewFormat
from AbsentError import AbsentError
import psycopg2
class database:
def __init__(self):
self.conn = psycopg2.connect(
host="localhost",
database="postgres",
user="root",
password="<PASSWORD>"
)
self.conn.autocommit = True
def __del__(self):
try:
# in case no connection to the DB was made
self.conn.close()
except AttributeError:
pass
def create_table(self):
s1 = 'DROP TABLE IF EXISTS jsonobjects'
s2 = 'CREATE TABLE jsonobjects (id serial NOT NULL PRIMARY KEY,info json NOT NULL);'
cursor = self.conn.cursor()
cursor.execute(s1)
cursor.execute(s2)
def insert(self,newFormat):
if not newFormat.validateCorrectly():
raise AbsentError
json_string = newFormat.getJson()
s = '''INSERT INTO jsonobjects (info) VALUES(\''''+json_string+'''\');'''
cursor = self.conn.cursor()
cursor.execute(s)
def close_connection(self):
self.conn.close()
def open_connection(self):
self.conn = psycopg2.connect(
host="localhost",
database="postgres",
user="root",
password="<PASSWORD>"
)
self.conn.autocommit = True
def retrieve_all(self):
cursor = self.conn.cursor()
query = '''select * from jsonobjects'''
cursor.execute(query)
rows = cursor.fetchall()
return rows
def retrieve_object_with_id(self,id):
cursor = self.conn.cursor()
query = '''select info from jsonobjects where id='''+id
cursor.execute(query)
row = cursor.fetchone()
return row
|
StarcoderdataPython
|
110849
|
class LCRecommendation:
TURN_LEFT = -1
TURN_RIGHT = 1
STRAIGHT_AHEAD = 0
CHANGE_TO_EITHER_WAY = 2
change_lane = True
change_to_either_way = False
recommendation = 0
def __init__(self, lane, recommendation):
self.lane = lane
self.recommendation = recommendation
if recommendation == self.TURN_RIGHT:
self.target_lane = lane - 1
elif recommendation == self.TURN_LEFT:
self.target_lane = lane + 1
elif recommendation == self.CHANGE_TO_EITHER_WAY:
self.change_to_either_way = True
elif recommendation == self.STRAIGHT_AHEAD:
self.change_lane = False
|
StarcoderdataPython
|
3293025
|
import xadmin
from enterprise_manage.apps.user_center.models import *
class UserProfileAdmin(object):
list_display = ['id', 'name', 'to_user', 'mobile_phone', 'email', 'wechat', 'qq']
list_editable = ['id', 'name', 'to_user', 'mobile_phone', 'email', 'wechat', 'qq', 'leader']
xadmin.site.register(UserProfile, UserProfileAdmin)
|
StarcoderdataPython
|
5092689
|
import json
import boto3
from botocore.exceptions import ClientError
import urllib3
region = 'us-east-1'
def lambda_handler(event, context):
try:
dictionary={}
service=0
ec2 = boto3.client('ec2', region_name=region)
ec2_response = ec2.describe_instances()
instances_full_details = ec2_response['Reservations']
for instance_detail in instances_full_details:
group_instances = instance_detail['Instances']
for instance in group_instances:
dict_count=0
string={}
try:
#Checking if instance is public or not
publicIp="The public IP of EC2 is :"+instance['PublicIpAddress']
except Exception as error:
publicIp="Instance is private"
finally:
#Instance id
dict_count+=1
string[str(dict_count)]="Your EC2 Instance Id is :"+instance['InstanceId']
#AMI description
image_id=instance['ImageId']
di_response=ec2.describe_images(ImageIds=[
image_id,
])
description=di_response['Images'][0]['Description']
#EC2 architecture and ami description
dict_count+=1
string[str(dict_count)]="The EC2 Architecture is "+instance['InstanceType']+" and AMI is "+description
#Public ip of EC2
dict_count+=1
string[str(dict_count)]=publicIp
try:
#Checking security group
sg_dict={}
sg=instance['SecurityGroups'][0]['GroupId']
securitygroup="The Security group id is "+sg
security_group_response=ec2.describe_security_groups(GroupIds=[sg])
#Checking for ports of sg
port_response=security_group_response['SecurityGroups'][0]['IpPermissions']
count=0
for i in port_response:
count+=1
try:
cidr_sg=i['UserIdGroupPairs'][0]['GroupId']
sg_dict[count]="Port No "+str(i['FromPort'])+" and CIDR "+str(cidr_sg)
except Exception as error:
sg_dict[count]="Port No "+str(i['FromPort'])+" and CIDR "+str(i['IpRanges'][0]['CidrIp'])
#Security group
dict_count+=1
string[str(dict_count)]=securitygroup
#Ports of sg
dict_count+=1
string[str(dict_count)]="The ports are : "
dict_count+=1
string[str(dict_count)]=sg_dict
except Exception as error:
securitygroup="No security group present"
dict_count+=1
string[str(dict_count)]=securitygroup
finally:
#Checking key-pair
try:
keypair="The key Pair of EC2 instance is :"+instance['KeyName']+".pem"
except Exception as error:
keypair="No keypair found"
finally:
dict_count+=1
string[str(dict_count)]=keypair
try:
#Checking apache installation
ip="http://"+instance['PublicIpAddress']
http = urllib3.PoolManager()
request1 = http.request('GET',ip)
dict_count+=1
string[str(dict_count)]="Installed Apache in the server"
try:
#Checking test.html is installed or not
ip="http://"+instance['PublicIpAddress']+"/test.html"
request2=http.request('GET',ip)
if request2.status==200:
dict_count+=1
string[str(dict_count)]="Added test.html file in the Server"
else:
dict_count+=1
string[str(dict_count)]="test.html not found in the Server"
except Exception as error:
print(error)
except Exception as error:
dict_count+=1
string[str(dict_count)]="Apache not installed in the server"
finally:
#Volume id and size
volume_id=instance['BlockDeviceMappings'][0]['Ebs']['VolumeId']
ec2_resource = boto3.resource('ec2')
volume=str(ec2_resource.Volume(volume_id).size)
dict_count+=1
string[str(dict_count)]="Root Volume Id of EC2 is "+volume_id+" and the volume size is "+volume+" GB"
service+=1
qw="EC2-"+str(service)
dictionary[qw]=string
elbv2=boto3.client('elbv2', region_name=region)
elbv2_response = elbv2.describe_load_balancers()['LoadBalancers']
res_count=0
try:
#Checking for elb
elbv2_response[0]['LoadBalancerArn']
for elb in elbv2_response:
dict_count=0
dict={}
dict_count+=1
#Spliting id for ELB dns name
elb_id=elb['DNSName'].split('.')[0]
dict[str(dict_count)]="Your Application LB Id is :"+elb_id
dict_count+=1
#Checking for public facing or private lb
dict[str(dict_count)]="The ELB is "+elb['Scheme']
dict_count+=1
#Printing dns of elb
elb_dns=elb['DNSName']
dict[str(dict_count)]="The ELB DNS is "+elb_dns
try:
#Checking for target group
elb_arn=elb['LoadBalancerArn']
targetgroup_response=elbv2.describe_target_groups(LoadBalancerArn=elb_arn)['TargetGroups'][0]
dict_count+=1
tg_arn=targetgroup_response['TargetGroupArn']
dict[str(dict_count)]="The target group of ELB is "+tg_arn
try:
#Checking for any instance attached to the target group
targetgrouphealth_response=elbv2.describe_target_health(TargetGroupArn=tg_arn)['TargetHealthDescriptions']
# print(targetgrouphealth_response)
tg_dict={}
tg_count=0
try:
for tg in targetgrouphealth_response:
instance=tg['Target']['Id']
tg_count+=1
tg_dict[str(tg_count)]="EC2 with id : "+instance
except Exception as error:
print("No instance found")
dict_count+=1
dict[str(dict_count)]=tg_dict
except Exception as error:
print("Error with target group heath")
finally:
#Printing health and unhealth threshold of elb
dict_count+=1
dict[str(dict_count)]="Health threshold of ELB is "+str(targetgroup_response['HealthyThresholdCount'])
dict_count+=1
dict[str(dict_count)]="Unhealth threshold of ELB is "+str(targetgroup_response['UnhealthyThresholdCount'])
try:
#Accesing elb dns
http=urllib3.PoolManager()
ip="http://"+elb_dns
dns_req=http.request('GET',ip,timeout=1.0)
dict_count+=1
dict[str(dict_count)]="Able to access the ELB URL"
except Exception as error:
dict_count+=1
dict[str(dict_count)]="Cannot access the ELB URL"
res_count+=1
qw="ELB-"+str(res_count)
dictionary[qw]=dict
except Exception as error:
print("Error with target group")
result=json.dumps(dictionary)
return result
# return dictionary
except Exception as error:
print("No loadbalancer found")
except Exception as error:
print("Boto3 Assignment Failed")
|
StarcoderdataPython
|
6486108
|
<filename>src/hub/management/commands/categorize-hubs.py
from django.core.management.base import BaseCommand
from hub.models import Hub, HubCategory
class Command(BaseCommand):
def update_hub_category(self, hub_name, category_name):
"""
Updates the category of a hub.
Parameters:
hub_name (str): The name of the hub to update.
category_name (str): The name of the category we're updating to.
"""
if Hub.objects.filter(name=hub_name).exists():
hub = Hub.objects.get(name=hub_name)
category = HubCategory.objects.get(category_name=category_name)
hub.category = category
hub.save()
def handle(self, *args, **options):
categories_and_hubs = {
'Math': [
'mathematics',
'abstract algebra',
'data analysis, statistics and probability',
'number theory',
'logic',
],
'Physics': [
'physics',
'astrophysics',
'atomic physics',
'fluid dynamics',
'nuclear theory',
'optics',
'quantum physics',
],
'Computer Science': [
'computer science',
'artificial intelligence',
'distributed, parallel, and cluster computing',
'cryptography and security',
'blockchain',
'cryptocurrency',
'programming languages',
'data structures and algorithms',
'human-computer interaction',
'machine learning',
'software engineering',
],
'Biology': [
'biology',
'bioinformatics',
'biomolecules',
'biophysics',
'biochemistry',
'botany',
'cancer-biology',
'cell biology',
'developmental-biology',
'ecology',
'environmental science',
'evolutionary-biology',
'genetics',
'geology',
'microbiology',
'molecular biology',
'systems-biology',
'paleontology',
'pathology',
'pharmacology-and-toxicology',
'physiology',
'neuroscience',
'synthetic biology',
'zoology',
],
'Medicine': [
'anesthesiology',
'medicine',
'covid-19 / coronavirus',
'clinical-trials',
'dermatology',
'epidemiology',
'endocrinology',
'immunology',
'internal medicine',
'kinesiology',
'longevity',
'mental illness',
'nutrition',
],
'Chemistry': [
'chemistry',
'chemical physics',
'materials science',
],
'Engineering': [
'engineering',
'biotechnology',
'chemical engineering',
'robotics',
'emerging technologies',
'photovoltaics',
],
'Social and Behavioral Sciences': [
'sociology',
'psychology',
'political science',
'geography',
'legal',
'economics',
'general finance',
'methodology',
'metascience',
],
'Arts and Humanities': [
'art',
'design',
'philosophy',
'history',
'general literature',
'anthropology',
],
'Other': [
'other',
],
}
for category_name in categories_and_hubs:
for hub_name in categories_and_hubs[category_name]:
self.update_hub_category(hub_name, category_name)
|
StarcoderdataPython
|
1937254
|
#!/usr/bin/env python
"""End to end tests that run ArtifactCollectorFlow."""
from grr.endtoend_tests import base
from grr.lib import aff4
from grr.lib import rdfvalue
class TestDarwinPersistenceMechanisms(base.AutomatedTest):
"""Test DarwinPersistenceMechanisms."""
platforms = ["Darwin"]
flow = "ArtifactCollectorFlow"
test_output_path = "analysis/persistence/testing"
args = {"artifact_list": ["DarwinPersistenceMechanisms"],
"output": test_output_path}
def CheckFlow(self):
output_urn = self.client_id.Add(self.test_output_path)
collection = aff4.FACTORY.Open(output_urn, mode="r", token=self.token)
self.assertIsInstance(collection, aff4.RDFValueCollection)
persistence_list = list(collection)
# Make sure there are at least some results.
self.assertGreater(len(persistence_list), 5)
launchservices = "/System/Library/CoreServices/launchservicesd"
for p in persistence_list:
if p.pathspec.path == launchservices:
return
self.fail("Service listing does not contain launchservices: %s." %
launchservices)
class TestRootDiskVolumeUsage(base.AutomatedTest):
"""Test RootDiskVolumeUsage."""
platforms = ["Linux", "Darwin"]
flow = "ArtifactCollectorFlow"
test_output_path = "analysis/diskusage/testing"
args = {"artifact_list": ["RootDiskVolumeUsage"],
"output": test_output_path}
def CheckFlow(self):
output_urn = self.client_id.Add(self.test_output_path)
collection = aff4.FACTORY.Open(output_urn, mode="r", token=self.token)
self.assertIsInstance(collection, aff4.RDFValueCollection)
volume_list = list(collection)
# Make sure there are at least some results.
self.assertEqual(len(volume_list), 1)
self.assertEqual(volume_list[0].unix.mount_point, "/")
self.assertTrue(isinstance(volume_list[0].FreeSpacePercent(), float))
class TestParserDependency(base.AutomatedTest):
"""Test Artifacts complete when KB is empty."""
platforms = ["Windows"]
flow = "ArtifactCollectorFlow"
test_output_path = "analysis/testing/TestParserDependency"
args = {"artifact_list": ["WinPathEnvironmentVariable"], "dependencies":
"FETCH_NOW", "output": test_output_path}
def setUp(self):
# Set the KB to an empty object
client = aff4.FACTORY.Open(self.client_id, mode="rw", token=self.token)
self.old_kb = client.Get(client.Schema.KNOWLEDGE_BASE)
client.Set(client.Schema.KNOWLEDGE_BASE, rdfvalue.KnowledgeBase())
client.Flush()
super(TestParserDependency, self).setUp()
def CheckFlow(self):
output_urn = self.client_id.Add(self.test_output_path)
self.collection = aff4.FACTORY.Open(output_urn, mode="r", token=self.token)
self.assertIsInstance(self.collection, aff4.RDFValueCollection)
volume_list = list(self.collection)
# Make sure there are at least some results.
self.assertEqual(len(volume_list), 1)
def tearDown(self):
# Set the KB to an empty object
client = aff4.FACTORY.Open(self.client_id, mode="rw", token=self.token)
client.Set(client.Schema.KNOWLEDGE_BASE, self.old_kb)
client.Flush()
super(TestParserDependency, self).tearDown()
class TestParserDependencyWinDir(TestParserDependency):
test_output_path = "analysis/testing/TestParserDependencyWinDir"
args = {"artifact_list": ["WinDirEnvironmentVariable"], "dependencies":
"FETCH_NOW", "output": test_output_path}
class TestParserDependencyTemp(TestParserDependency):
test_output_path = "analysis/testing/TestParserDependencyTemp"
args = {"artifact_list": ["TempEnvironmentVariable"], "dependencies":
"FETCH_NOW", "output": test_output_path}
class TestParserDependencyUserShellFolders(TestParserDependency):
test_output_path = "analysis/testing/TestParserDependencyUserShellFolders"
args = {"artifact_list": ["UserShellFolders"], "dependencies": "FETCH_NOW",
"output": test_output_path}
def CheckFlow(self):
super(TestParserDependencyUserShellFolders, self).CheckFlow()
for userobj in self.collection:
self.assertTrue(userobj.appdata)
self.assertTrue(userobj.temp)
|
StarcoderdataPython
|
178142
|
<reponame>Linekio/bin<gh_stars>0
import pygments
from pygments.lexers import get_lexer_by_name
from pygments.formatters.html import HtmlFormatter
class TableHtmlFormatter(HtmlFormatter):
def __init__(self, **options):
super().__init__(**options)
if options.get('linenos', False) == 'bin-table':
self.linenos = 3
def wrap(self, source, outfile):
if self.linenos == 3:
source = self._wrap_table(source)
yield from source
def _wrap_table(self, inner):
yield 0, '<table class="highlight"><tbody>'
for i, (t, l) in enumerate([*inner, (1, '')]):
yield t, f'<tr><td class="line-number" id=L{i + 1} value={i + 1}></td><td class="line-content">{l}</td></tr>\n'
yield 0, '</tbody></table>'
_html_formatter = TableHtmlFormatter(linenos='bin-table', style='monokai')
def highlight(code, language):
lexer = get_lexer_by_name(language)
return pygments.highlight(code, lexer, _html_formatter)
|
StarcoderdataPython
|
3555236
|
<reponame>guilhermeleobas/rbc
"""https://en.cppreference.com/w/c/numeric/math
"""
from collections import namedtuple
from . import make_intrinsic
arg = namedtuple("arg", ("name", "ty"))
cmath = {
# Trigonometric
"cos": ("double", [arg(name="x", ty="double")]),
"cosf": ("float", [arg(name="x", ty="float")]),
"sin": ("double", [arg(name="x", ty="double")]),
"sinf": ("float", [arg(name="x", ty="float")]),
"tan": ("double", [arg(name="x", ty="double")]),
"tanf": ("float", [arg(name="x", ty="float")]),
"acos": ("double", [arg(name="x", ty="double")]),
"acosf": ("float", [arg(name="x", ty="float")]),
"asin": ("double", [arg(name="x", ty="double")]),
"asinf": ("float", [arg(name="x", ty="float")]),
"atan": ("double", [arg(name="x", ty="double")]),
"atanf": ("float", [arg(name="x", ty="float")]),
"atan2": ("double", [arg(name="y", ty="double"), arg(name="x", ty="double")]),
"atan2f": ("float", [arg(name="y", ty="float"), arg(name="x", ty="float")]),
# Hyperbolic
"cosh": ("double", [arg(name="x", ty="double")]),
"coshf": ("float", [arg(name="x", ty="float")]),
"sinh": ("double", [arg(name="x", ty="double")]),
"sinhf": ("float", [arg(name="x", ty="float")]),
"tanh": ("double", [arg(name="x", ty="double")]),
"tanhf": ("float", [arg(name="x", ty="float")]),
"acosh": ("double", [arg(name="x", ty="double")]),
"acoshf": ("float", [arg(name="x", ty="float")]),
"asinh": ("double", [arg(name="x", ty="double")]),
"asinhf": ("float", [arg(name="x", ty="float")]),
"atanh": ("double", [arg(name="x", ty="double")]),
"atanhf": ("float", [arg(name="x", ty="float")]),
# Exponential and logarithmic functions
"exp": ("double", [arg(name="x", ty="double")]),
"expf": ("float", [arg(name="x", ty="float")]),
"frexp": ("double", [arg(name="x", ty="double"), arg(name="exp", ty="int*")]),
"frexpf": ("float", [arg(name="x", ty="float"), arg(name="exp", ty="int*")]),
"ldexp": ("double", [arg(name="x", ty="double"), arg(name="exp", ty="int")]),
"ldexpf": ("float", [arg(name="x", ty="float"), arg(name="exp", ty="int")]),
"log": ("double", [arg(name="x", ty="double")]),
"logf": ("float", [arg(name="x", ty="float")]),
"log10": ("double", [arg(name="x", ty="double")]),
"log10f": ("float", [arg(name="x", ty="float")]),
"modf": ("double", [arg(name="x", ty="double"), arg(name="intpart", ty="double*")]),
"modff": ("float", [arg(name="x", ty="float"), arg(name="intpart", ty="float*")]),
"exp2": ("double", [arg(name="x", ty="double")]),
"exp2f": ("float", [arg(name="x", ty="float")]),
"expm1": ("double", [arg(name="x", ty="double")]),
"expm1f": ("float", [arg(name="x", ty="float")]),
"ilogb": ("double", [arg(name="x", ty="double")]),
"ilogbf": ("float", [arg(name="x", ty="float")]),
"log1p": ("double", [arg(name="x", ty="double")]),
"log1pf": ("float", [arg(name="x", ty="float")]),
"log2": ("double", [arg(name="x", ty="double")]),
"log2f": ("float", [arg(name="x", ty="float")]),
"logb": ("double", [arg(name="x", ty="double")]),
"logbf": ("float", [arg(name="x", ty="float")]),
# power functions
"pow": (
"double",
[arg(name="base", ty="double"), arg(name="exponent", ty="double")],
),
"powf": (
"float",
[arg(name="base", ty="float"), arg(name="exponent", ty="float")],
),
"sqrt": ("double", [arg(name="x", ty="double")]),
"sqrtf": ("float", [arg(name="x", ty="float")]),
"cbrt": ("double", [arg(name="x", ty="double")]),
"cbrtf": ("float", [arg(name="x", ty="float")]),
"hypot": ("double", [arg(name="x", ty="double"), arg(name="y", ty="double")]),
"hypotf": ("float", [arg(name="x", ty="float"), arg(name="y", ty="float")]),
# error and gamma functions
"erf": ("double", [arg(name="x", ty="double")]),
"erff": ("float", [arg(name="x", ty="float")]),
"erfc": ("double", [arg(name="x", ty="double")]),
"erfcf": ("float", [arg(name="x", ty="float")]),
"tgamma": ("double", [arg(name="x", ty="double")]),
"tgammaf": ("float", [arg(name="x", ty="float")]),
"lgamma": ("double", [arg(name="x", ty="double")]),
"lgammaf": ("float", [arg(name="x", ty="float")]),
# Rounding
"ceil": ("double", [arg(name="x", ty="double")]),
"ceilf": ("float", [arg(name="x", ty="float")]),
"floor": ("double", [arg(name="x", ty="double")]),
"floorf": ("float", [arg(name="x", ty="float")]),
"fmod": (
"double",
[arg(name="numer", ty="double"), arg(name="denom", ty="double")],
),
"fmodf": (
"float",
[arg(name="numer", ty="float"), arg(name="denom", ty="float")],
),
"trunc": ("double", [arg(name="x", ty="double")]),
"truncf": ("float", [arg(name="x", ty="float")]),
"round": ("double", [arg(name="x", ty="double")]),
"roundf": ("float", [arg(name="x", ty="float")]),
"lround": ("long int", [arg(name="x", ty="double")]),
"lroundf": ("long int", [arg(name="x", ty="float")]),
"llround": ("long long int", [arg(name="x", ty="double")]),
"llroundf": ("long long int", [arg(name="x", ty="float")]),
"rint": ("double", [arg(name="x", ty="double")]),
"rintf": ("float", [arg(name="x", ty="float")]),
"lrint": ("long int", [arg(name="x", ty="double")]),
"lrintf": ("long int", [arg(name="x", ty="float")]),
"llrint": ("long long int", [arg(name="x", ty="double")]),
"llrintf": ("long long int", [arg(name="x", ty="float")]),
"nearbyint": ("double", [arg(name="x", ty="double")]),
"nearbyintf": ("float", [arg(name="x", ty="float")]),
"remainder": (
"double",
[arg(name="numer", ty="double"), arg(name="denom", ty="double")],
),
"remainderf": (
"float",
[arg(name="numer", ty="float"), arg(name="denom", ty="float")],
),
# Floating-point manipulation
"copysign": (
"double",
[arg(name="x", ty="double"), arg(name="y", ty="double")],
),
"copysignf": (
"float",
[arg(name="x", ty="float"), arg(name="y", ty="float")],
),
"nan": ("double", [arg(name="tagp", ty="const char*")]),
"nanf": ("float", [arg(name="tagp", ty="const char*")]),
"nextafter": (
"double",
[arg(name="x", ty="double"), arg(name="y", ty="double")],
),
"nextafterf": (
"float",
[arg(name="x", ty="float"), arg(name="y", ty="float")],
),
"nexttoward": (
"double",
[arg(name="x", ty="double"), arg(name="y", ty="double")],
),
"nexttowardf": (
"float",
[arg(name="x", ty="float"), arg(name="y", ty="float")],
),
# Minimum, maximum, difference functions
"fdim": ("double", [arg(name="x", ty="double"), arg(name="y", ty="double")]),
"fdimf": ("float", [arg(name="x", ty="float"), arg(name="y", ty="float")]),
"fmax": ("double", [arg(name="x", ty="double"), arg(name="y", ty="double")]),
"fmaxf": ("float", [arg(name="x", ty="float"), arg(name="y", ty="float")]),
"fmin": ("double", [arg(name="x", ty="double"), arg(name="y", ty="double")]),
"fminf": ("float", [arg(name="x", ty="float"), arg(name="y", ty="float")]),
# Other functions
"fabs": ("double", [arg(name="x", ty="double")]),
"fabsf": ("float", [arg(name="x", ty="float")]),
"abs": ("long long int", [arg(name="x", ty="double")]),
"absf": ("long", [arg(name="x", ty="float")]),
"fma": (
"double",
[
arg(name="x", ty="double"),
arg(name="y", ty="double"),
arg(name="z", ty="double"),
],
),
"fmaf": (
"float",
[
arg(name="x", ty="float"),
arg(name="y", ty="float"),
arg(name="z", ty="float"),
],
),
}
for fname, (retty, args) in cmath.items():
argnames = [arg.name for arg in args]
doc = f"C math function {fname}"
fn = make_intrinsic(fname, retty, argnames, __name__, globals(), doc)
|
StarcoderdataPython
|
1857500
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-04-11 15:58
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='File',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Name for the object', max_length=255)),
('date_modified', models.DateTimeField(auto_now=True, help_text='DateTime of last modification')),
('flag', models.CharField(blank=True, choices=[('FLAG', 'Flagged'), ('FLAG_HEART', 'Flagged (Heart)'), ('IMPORTANT', 'Important'), ('REVOKED', 'Revoked'), ('SUPERSEDED', 'Superseded')], help_text='Flag for highlighting the item (optional)', max_length=64, null=True)),
('description', models.CharField(blank=True, help_text='Description (optional)', max_length=255)),
('omics_uuid', models.UUIDField(default=uuid.uuid4, help_text='Filesfolders Omics UUID', unique=True)),
('file', models.FileField(blank=True, help_text='Uploaded file', null=True, upload_to='filesfolders.FileData/bytes/file_name/content_type')),
('public_url', models.BooleanField(default=False, help_text='Allow providing a public URL for the file')),
('secret', models.CharField(help_text='Secret string for creating public URL', max_length=255, unique=True)),
],
options={
'ordering': ['folder', 'name'],
},
),
migrations.CreateModel(
name='FileData',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bytes', models.TextField()),
('file_name', models.CharField(max_length=255)),
('content_type', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Folder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Name for the object', max_length=255)),
('date_modified', models.DateTimeField(auto_now=True, help_text='DateTime of last modification')),
('flag', models.CharField(blank=True, choices=[('FLAG', 'Flagged'), ('FLAG_HEART', 'Flagged (Heart)'), ('IMPORTANT', 'Important'), ('REVOKED', 'Revoked'), ('SUPERSEDED', 'Superseded')], help_text='Flag for highlighting the item (optional)', max_length=64, null=True)),
('description', models.CharField(blank=True, help_text='Description (optional)', max_length=255)),
('omics_uuid', models.UUIDField(default=uuid.uuid4, help_text='Filesfolders Omics UUID', unique=True)),
],
options={
'ordering': ['project', 'name'],
},
),
migrations.CreateModel(
name='HyperLink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Name for the object', max_length=255)),
('date_modified', models.DateTimeField(auto_now=True, help_text='DateTime of last modification')),
('flag', models.CharField(blank=True, choices=[('FLAG', 'Flagged'), ('FLAG_HEART', 'Flagged (Heart)'), ('IMPORTANT', 'Important'), ('REVOKED', 'Revoked'), ('SUPERSEDED', 'Superseded')], help_text='Flag for highlighting the item (optional)', max_length=64, null=True)),
('description', models.CharField(blank=True, help_text='Description (optional)', max_length=255)),
('omics_uuid', models.UUIDField(default=uuid.uuid4, help_text='Filesfolders Omics UUID', unique=True)),
('url', models.URLField(help_text='URL for the link', max_length=2000)),
('folder', models.ForeignKey(blank=True, help_text='Folder under which object exists (null if root folder)', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='filesfolders_hyperlink_children', to='filesfolders.Folder')),
],
options={
'ordering': ['folder', 'name'],
},
),
]
|
StarcoderdataPython
|
12846275
|
import datetime
from mangrove.datastore.database import get_db_manager,\
_delete_db_and_remove_db_manager
from mangrove.bootstrap.initializer import _find_views
from pytz import UTC
import random
from mangrove.datastore.entity import Entity
from collections import defaultdict
class ViewGenerationTimer(object):
def _set_db_manager(self):
self.manager = get_db_manager('http://localhost:5984/',
'mangrove-test')
def _delete_db_and_remove_db_manager(self):
_delete_db_and_remove_db_manager(self.manager)
def _refresh_db_manager(self):
self._set_db_manager()
self._delete_db_and_remove_db_manager()
self._set_db_manager()
def _reset(self, number_of_entities=0, number_of_data_records_per_entity=8):
self._number_of_entities = number_of_entities
self._refresh_db_manager()
self._setup_entities()
self._setup_datadict_types()
self._add_data_to_entities(number_of_data_records_per_entity)
def _setup_entities(self):
ENTITY_TYPE = ["Health_Facility", "Clinic"]
AGGREGATION_PATH_NAME = "governance"
# Entities for State 1: Maharashtra
# location, aggregation_path
locations = [
['India', 'MH', 'Pune'],
['India', 'MH', 'Mumbai'],
['India', 'Karnataka', 'Bangalore'],
['India', 'Karnataka', 'Hubli'],
['India', 'Kerala', 'Kochi'],
]
aggregation_paths = [
["Director", "Med_Supervisor", "Surgeon"],
["Director", "Med_Supervisor", "Nurse"],
["Director", "Med_Officer", "Doctor"],
["Director", "Med_Officer", "Surgeon"],
["Director", "Med_Officer", "Nurse"],
]
self.entities = []
for i in range(self._number_of_entities):
location = random.choice(locations)
aggregation_path = random.choice(aggregation_paths)
e = Entity(self.manager, entity_type=ENTITY_TYPE, location=location)
e.set_aggregation_path(AGGREGATION_PATH_NAME, aggregation_path)
e.save()
self.entities.append(e)
def _add_data_to_entities(self, number_of_data_records_per_entity):
months = [1]
number_of_years = number_of_data_records_per_entity / (
len(self.dd_types) * len(months)
)
years = range(2011 - max(1, number_of_years), 2011)
event_times = []
for year in years:
for month in months:
event_time = datetime.datetime(year, month, 1, tzinfo=UTC)
event_times.append(event_time)
#for e in self.entities:
# for dd_type in self.dd_types.values():
# for event_time in event_times:
# slug = dd_type.slug
# value = random.random()
# e.add_data(
# data=[(slug, value, self.dd_types[slug])],
# event_time=event_time
# )
def print_csv_of_view_generation_times(self):
iterations = [20, 40, 60, 80, 100]
times_by_view_name = defaultdict(dict)
for number_of_entities in iterations:
times = self._calculate_view_generation_time(number_of_entities, 8)
for k, v in times.items():
times_by_view_name[k][number_of_entities] = str(v)
print ",".join(["number of entities"] + [str(i) for i in iterations])
for name, times in times_by_view_name.items():
row = [name] + [times_by_view_name[name][number_of_entities] for number_of_entities in iterations]
print ",".join(row)
def print_view_generation_times(self):
times = self._calculate_view_generation_time(100, 8)
import operator
sorted_times = sorted(times.iteritems(), key=operator.itemgetter(1))
for view_name, generation_time in sorted_times:
print view_name + ": " + str(generation_time)
def _calculate_view_generation_time(self, number_of_entities, number_of_data_records_per_entity):
self._reset(number_of_entities, number_of_data_records_per_entity)
js_views = _find_views()
times = {}
for v in js_views.keys():
funcs = js_views[v]
js_map = (funcs['map'] if 'map' in funcs else None)
js_reduce = (funcs['reduce'] if 'reduce' in funcs else None)
start = datetime.datetime.now()
self.manager.create_view(v, js_map, js_reduce, view_document=v)
all_rows = self.manager.load_all_rows_in_view(v + "/" + v)
# we need to hit the view to make sure it compiles
number_of_rows = len(all_rows)
end = datetime.datetime.now()
times[v] = (end - start).total_seconds()
return times
if __name__ == "__main__":
divider = "-" * 70
timer = ViewGenerationTimer()
print divider
timer.print_view_generation_times()
print divider
timer.print_csv_of_view_generation_times()
print divider
|
StarcoderdataPython
|
3368645
|
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.ticker as plticker
def plot_image(value_fn):
# Values over all initial dealer scores and player scores
# Given the policy, the value should be near zero everywhere except when the player has a score of 21
dealer_scores = np.arange(1, 11, 1) # 10
player_scores = np.arange(11, 22, 1) # 11
V = np.zeros(shape=(len(dealer_scores), len(player_scores)))
for d_idx, dealer_score in enumerate(dealer_scores):
for p_idx, player_score in enumerate(player_scores):
value = value_fn(dealer_score, player_score)
V[d_idx][p_idx] = value
fig, ax = plt.subplots()
ax.imshow(V)
ax.set_ylabel("Dealer initial showing")
ax.yaxis.set_ticklabels(np.arange(0, 11, 1))
ax.yaxis.set_major_locator(plticker.MultipleLocator(base=1.0))
ax.set_xlabel("Player sum")
ax.xaxis.set_ticklabels(np.arange(10, 22, 1))
ax.xaxis.set_major_locator(plticker.MultipleLocator(base=1.0))
plt.show()
def plot(value_fn):
# Values over all initial dealer scores and player scores
# Given the policy, the value should be near zero everywhere except when the player has a score of 21
dealer_scores = np.arange(1, 11, 1) # 10
player_scores = np.arange(11, 22, 1) # 11
V = np.zeros(shape=(len(dealer_scores), len(player_scores)))
for d_idx, dealer_score in enumerate(dealer_scores):
for p_idx, player_score in enumerate(player_scores):
value = value_fn(dealer_score, player_score)
V[d_idx][p_idx] = value
D, P = np.meshgrid(dealer_scores, player_scores)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(D, P, V.transpose()) # somehow we have to transpose here
ax.set_xlabel("Dealer initial showing")
ax.set_ylabel("Player sum")
ax.set_zlabel("Value")
ax.xaxis.set_major_locator(plticker.MultipleLocator(base=1.0))
ax.yaxis.set_major_locator(plticker.MultipleLocator(base=1.0))
plt.show()
|
StarcoderdataPython
|
6662628
|
import numpy as np
import random
from openrec.tf1.utils.samplers import Sampler
def YouTubeSampler(dataset, batch_size, max_seq_len, user_feature, num_process=5, seed=100, sort=True):
random.seed(seed)
def batch(dataset, user_feature=user_feature, max_seq_len=max_seq_len, batch_size=batch_size):
while True:
input_npy = np.zeros(batch_size, dtype=[('seq_item_id', (np.int32, max_seq_len)),
('seq_len', np.int32),
('label', np.int32),
('user_gender', np.int32),
('user_geo', np.int32)])
for ind in range(batch_size):
user_id = random.randint(0, dataset.total_users()-1)
item_list = dataset.get_positive_items(user_id, sort=sort)
while len(item_list) <= 1:
user_id = random.randint(0, dataset.total_users()-1)
item_list = dataset.get_positive_items(user_id, sort=sort)
predict_pos = random.randint(1, len(item_list) - 1)
train_items = item_list[max(0, predict_pos-max_seq_len):predict_pos]
pad_train_items = np.zeros(max_seq_len, np.int32)
pad_train_items[:len(train_items)] = train_items
input_npy[ind] = (pad_train_items,
len(train_items),
item_list[predict_pos],
user_feature[user_id]['user_gender'],
user_feature[user_id]['user_geo'])
yield input_npy
s = Sampler(dataset=dataset, generate_batch=batch, num_process=num_process)
return s
|
StarcoderdataPython
|
3253655
|
<gh_stars>0
# Copyright 2015 <NAME>.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from fp_vdev_remote import vdev_utils
from neutron.agent.common import utils as neutron_utils
from networking_6wind.common import constants
from oslo_config import cfg
cfg.CONF.import_group('ml2_fp', 'networking_6wind.common.config')
FP_VDEV_CMD = None
def get_socket_settings():
global FP_VDEV_CMD
if FP_VDEV_CMD is None:
FP_VDEV_CMD = vdev_utils.get_vdev_cmd()
path = neutron_utils.execute(cmd=[FP_VDEV_CMD, 'get', 'sockfolder'],
run_as_root=True)
mode = neutron_utils.execute(cmd=[FP_VDEV_CMD, 'get', 'sockmode'],
run_as_root=True)
return (path.strip(), mode.strip())
def get_socket_path(socket_dir, port_id):
if cfg.CONF.ml2_fp.vhostuser_socket_use_devname:
vhostuser_socket_name = (constants.VHOSTUSER_SOCKET_DEVNAME_PREFIX
+ port_id)[:14]
else:
vhostuser_socket_name = constants.VHOSTUSER_SOCKET_PREFIX + port_id
return os.path.join(socket_dir, vhostuser_socket_name)
|
StarcoderdataPython
|
3347595
|
n, y = map(int, input().split())
l = set([int(input()) for i in range(y)])
for i in range(n):
if i not in l:
print(i)
print(f"Mario got {len(l)} of the dangerous obstacles.")
|
StarcoderdataPython
|
5148936
|
<gh_stars>1-10
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
from typing import Any
from flask import g, Response
from flask_appbuilder.api import expose, protect, rison, safe
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_babel import ngettext
from superset.constants import RouteMethod
from superset.databases.filters import DatabaseFilter
from superset.models.sql_lab import SavedQuery
from superset.queries.saved_queries.commands.bulk_delete import (
BulkDeleteSavedQueryCommand,
)
from superset.queries.saved_queries.commands.exceptions import (
SavedQueryBulkDeleteFailedError,
SavedQueryNotFoundError,
)
from superset.queries.saved_queries.filters import SavedQueryFilter
from superset.queries.saved_queries.schemas import (
get_delete_ids_schema,
openapi_spec_methods_override,
)
from superset.views.base_api import BaseSupersetModelRestApi, statsd_metrics
logger = logging.getLogger(__name__)
class SavedQueryRestApi(BaseSupersetModelRestApi):
datamodel = SQLAInterface(SavedQuery)
include_route_methods = RouteMethod.REST_MODEL_VIEW_CRUD_SET | {
RouteMethod.RELATED,
RouteMethod.DISTINCT,
"bulk_delete", # not using RouteMethod since locally defined
}
class_permission_name = "SavedQueryView"
resource_name = "saved_query"
allow_browser_login = True
base_filters = [["id", SavedQueryFilter, lambda: []]]
show_columns = [
"created_by.first_name",
"created_by.id",
"created_by.last_name",
"database.database_name",
"database.id",
"description",
"id",
"label",
"schema",
"sql",
"sql_tables",
]
list_columns = [
"created_by.first_name",
"created_by.id",
"created_by.last_name",
"database.database_name",
"database.id",
"db_id",
"description",
"label",
"schema",
"sql",
"sql_tables",
]
add_columns = ["db_id", "description", "label", "schema", "sql"]
edit_columns = add_columns
order_columns = [
"schema",
"label",
"description",
"sql",
"created_by.first_name",
"database.database_name",
]
apispec_parameter_schemas = {
"get_delete_ids_schema": get_delete_ids_schema,
}
openapi_spec_tag = "Queries"
openapi_spec_methods = openapi_spec_methods_override
related_field_filters = {
"database": "database_name",
}
filter_rel_fields = {"database": [["id", DatabaseFilter, lambda: []]]}
allowed_rel_fields = {"database"}
allowed_distinct_fields = {"schema"}
def pre_add(self, item: SavedQuery) -> None:
item.user = g.user
def pre_update(self, item: SavedQuery) -> None:
self.pre_add(item)
@expose("/", methods=["DELETE"])
@protect()
@safe
@statsd_metrics
@rison(get_delete_ids_schema)
def bulk_delete(
self, **kwargs: Any
) -> Response: # pylint: disable=arguments-differ
"""Delete bulk Saved Queries
---
delete:
description: >-
Deletes multiple saved queries in a bulk operation.
parameters:
- in: query
name: q
content:
application/json:
schema:
$ref: '#/components/schemas/get_delete_ids_schema'
responses:
200:
description: Saved queries bulk delete
content:
application/json:
schema:
type: object
properties:
message:
type: string
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
item_ids = kwargs["rison"]
try:
BulkDeleteSavedQueryCommand(g.user, item_ids).run()
return self.response(
200,
message=ngettext(
"Deleted %(num)d saved query",
"Deleted %(num)d saved queries",
num=len(item_ids),
),
)
except SavedQueryNotFoundError:
return self.response_404()
except SavedQueryBulkDeleteFailedError as ex:
return self.response_422(message=str(ex))
|
StarcoderdataPython
|
1930905
|
<gh_stars>1-10
from . import core # noqa
from . import sql_lab # noqa
from . import user_attributes # noqa
|
StarcoderdataPython
|
86913
|
"""Algo class definition."""
import os
import sys
import logging
import argparse
import pandas as pd
from datetime import datetime
from aqtlib import utils, Broker, Porter
from aqtlib.objects import DataStore
from abc import abstractmethod
from .instrument import Instrument
__all__ = ['Algo']
class Algo(Broker):
"""Algo class initilizer.
Args:
instruments : list
List of IB contract tuples.
resolution : str
Desired bar resolution (using pandas resolution: 1T, 1H, etc).
Default is 1T (1min)
bars_window : int
Length of bars lookback window to keep. Defaults to 120
timezone : str
Convert IB timestamps to this timezone (eg. US/Central).
Defaults to UTC
backtest: bool
Whether to operate in Backtest mode (default: False)
start: str
Backtest start date (YYYY-MM-DD [HH:MM:SS[.MS]). Default is None
end: str
Backtest end date (YYYY-MM-DD [HH:MM:SS[.MS]). Default is None
data : str
Path to the directory with AQTLib-compatible CSV files (Backtest)
output: str
Path to save the recorded data (default: None)
"""
defaults = dict(
instruments=[],
resolution="1D",
bars_window=120,
timezone='UTC',
backtest=False,
start=None,
end=None,
data=None,
output=None
)
def __init__(self, instruments, *args, **kwargs):
super(Algo, self).__init__(instruments, *args, **kwargs)
# strategy name
self.name = self.__class__.__name__
# initilize strategy logger
self._logger = logging.getLogger(self.name)
# override args with (non-default) command-line args
self.update(**self.load_cli_args())
self.backtest_csv = self.data
# sanity checks for backtesting mode
if self.backtest:
self._check_backtest_args()
# initilize output file
self.record_ts = None
if self.output:
self.datastore = DataStore(self.output)
self.bars = pd.DataFrame()
self.bar_hashes = {}
# -----------------------------------
# signal collector
self.signals = {}
for sym in self.symbols:
self.signals[sym] = pd.DataFrame()
self.initialize()
# ---------------------------------------
def _check_backtest_args(self):
if self.output is None:
self._logger.error(
"Must provide an output file for Backtest mode")
sys.exit(0)
if self.start is None:
self._logger.error(
"Must provide start date for Backtest mode")
sys.exit(0)
if self.end is None:
self.end = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if self.backtest_csv is not None:
self.backtest_csv = os.path.expanduser(self.backtest_csv)
if not os.path.exists(self.backtest_csv):
self._logger.error(
"CSV directory cannot be found ({dir})".format(dir=self.backtest_csv))
sys.exit(0)
elif self.backtest_csv.endswith("/"):
self.backtest_csv = self.backtest_csv[:-1]
# ---------------------------------------
def load_cli_args(self):
"""
Parse command line arguments and return only the non-default ones
:Retruns: dict
a dict of any non-default args passed on the command-line.
"""
parser = argparse.ArgumentParser(
description='AQTLib Algorithm',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--backtest', default=self.defaults['backtest'],
help='Work in Backtest mode (flag)',
action='store_true')
parser.add_argument('--start', default=self.defaults['start'],
help='Backtest start date')
parser.add_argument('--end', default=self.defaults['end'],
help='Backtest end date')
parser.add_argument('--data', default=self.defaults['data'],
help='Path to backtester CSV files')
parser.add_argument('--output', default=self.defaults['output'],
help='Path to save the recorded data')
# only return non-default cmd line args
# (meaning only those actually given)
cmd_args, _ = parser.parse_known_args()
args = {arg: val for arg, val in vars(
cmd_args).items() if val != parser.get_default(arg)}
return args
# ---------------------------------------
def run(self):
"""Starts the algo
Connects to the Porter, processes data and passes
bar data to the ``on_bar`` function.
"""
history = pd.DataFrame()
if self.backtest:
self._logger.info('Algo start backtesting...')
# history from csv dir
if self.backtest_csv:
dfs = self._fetch_csv()
# prepare history data
history = Porter.prepare_bars_history(
data=pd.concat(dfs, sort=True),
resolution=self.resolution,
tz=self.timezone
)
history = history[(history.index >= self.start) & (history.index <= self.end)]
else:
# history from porter
import nest_asyncio
nest_asyncio.apply()
# connect to database
self.porter.connect_sql()
history = self.porter.get_history(
symbols=self.symbols,
start=self.start,
end=self.end if self.end else datetime.now(),
resolution=self.resolution,
tz=self.timezone
)
history = utils.prepare_data(('AAPL', 'STK'), history, index=history.datetime)
# optimize pandas
if not history.empty:
history['symbol'] = history['symbol'].astype('category')
history['symbol_group'] = history['symbol_group'].astype('category')
history['asset_class'] = history['asset_class'].astype('category')
# drip history
Porter.drip(history, self._bar_handler)
# ---------------------------------------
def _fetch_csv(self):
"""
Get bars history from AQTLib-compatible csv file.
"""
dfs = []
for symbol in self.symbols:
file = "{data}/{symbol}.{kind}.csv".format(data=self.backtest_csv, symbol=symbol, kind="BAR")
if not os.path.exists(file):
self._logger.error(
"Can't load data for {symbol} ({file} doesn't exist)".format(
symbol=symbol, file=file))
sys.exit(0)
try:
df = pd.read_csv(file)
if not Porter.validate_csv(df, "BAR"):
self._logger.error("{file} isn't a AQTLib-compatible format".format(file=file))
sys.exit(0)
if df['symbol'].values[-1] != symbol:
self._logger.error(
"{file} doesn't content data for {symbol}".format(file=file, symbol=symbol))
sys.exit(0)
dfs.append(df)
except Exception as e:
self._logger.error(
"Error reading data for {symbol} ({errmsg})", symbol=symbol, errmsg=e)
sys.exit(0)
return dfs
# ---------------------------------------
def _bar_handler(self, bar):
"""
Invoked on every bar captured for the selected instrument.
"""
symbol = bar['symbol'].values
if len(symbol) == 0:
return
symbol = symbol[0]
# self_bars = self.bars.copy() # work on copy
self.bars = self._update_window(self.bars, bar,
window=self.bars_window)
# optimize pandas
if len(self.bars) == 1:
self.bars['symbol'] = self.bars['symbol'].astype('category')
self.bars['symbol_group'] = self.bars['symbol_group'].astype('category')
self.bars['asset_class'] = self.bars['asset_class'].astype('category')
# new bar?
hash_string = bar[:1]['symbol'].to_string().translate(
str.maketrans({key: None for key in "\n -:+"}))
this_bar_hash = abs(hash(hash_string)) % (10 ** 8)
newbar = True
if symbol in self.bar_hashes.keys():
newbar = self.bar_hashes[symbol] != this_bar_hash
self.bar_hashes[symbol] = this_bar_hash
if newbar:
if self.bars[(self.bars['symbol'] == symbol) | (
self.bars['symbol_group'] == symbol)].empty:
return
instrument = self.get_instrument(symbol)
if instrument:
self.record_ts = bar.index[0]
self._logger.debug('BAR TIME: {}'.format(self.record_ts))
self.on_bar(instrument)
self.record(bar)
# ---------------------------------------
def _update_window(self, df, data, window=None, resolution=None):
"""
No. of bars to keep.
"""
df = df.append(data, sort=True) if df is not None else data
# return
if window is None:
return df
return self._get_window_per_symbol(df, window)
# ---------------------------------------
@staticmethod
def _get_window_per_symbol(df, window):
"""
Truncate bars window per symbol.
"""
dfs = []
for symbol in list(df["symbol"].unique()):
dfs.append(df[df['symbol'] == symbol][-window:])
return pd.concat(dfs, sort=True).sort_index()
# ---------------------------------------
def get_instrument(self, symbol):
"""
A string subclass that provides easy access to misc
symbol-related methods and information using shorthand.
Call from within your strategy:
``instrument = self.get_instrument("SYMBOL")``
"""
instrument = Instrument(symbol)
instrument.attach_strategy(self)
return instrument
# ---------------------------------------
@abstractmethod
def on_bar(self, instrument):
"""
Invoked on every bar captured for the selected instrument.
This is where you'll write your strategy logic for bar events.
"""
# raise NotImplementedError("Should implement on_bar()")
pass
# ---------------------------------------
@abstractmethod
def initialize(self):
"""
Invoked once when algo starts. Used for when the strategy
needs to initialize parameters upon starting.
"""
# raise NotImplementedError("Should implement initialize()")
pass
def order(self, signal, symbol, quantity=0, **kwargs):
""" Send an order for the selected instrument
:Parameters:
direction : string
Order Type (BUY/SELL, EXIT/FLATTEN)
symbol : string
instrument symbol
quantity : int
Order quantiry
:Optional:
limit_price : float
In case of a LIMIT order, this is the LIMIT PRICE
expiry : int
Cancel this order if not filled after *n* seconds
(default 60 seconds)
order_type : string
Type of order: Market (default),
LIMIT (default when limit_price is passed),
MODIFY (required passing or orderId)
orderId : int
If modifying an order, the order id of the modified order
target : float
Target (exit) price
initial_stop : float
Price to set hard stop
stop_limit: bool
Flag to indicate if the stop should be STOP or STOP LIMIT.
Default is ``False`` (STOP)
trail_stop_at : float
Price at which to start trailing the stop
trail_stop_type : string
Type of traiing stop offset (amount, percent).
Default is ``percent``
trail_stop_by : float
Offset of trailing stop distance from current price
fillorkill: bool
Fill entire quantiry or none at all
iceberg: bool
Is this an iceberg (hidden) order
tif: str
Time in force (DAY, GTC, IOC, GTD). default is ``DAY``
"""
self._logger.debug('ORDER: %s %4d %s %s', signal,
quantity, symbol, kwargs)
position = self.get_positions(symbol)
if signal.upper() == "EXIT" or signal.upper() == "FLATTEN":
if position['position'] == 0:
return
kwargs['symbol'] = symbol
kwargs['quantity'] = abs(position['position'])
kwargs['direction'] = "BUY" if position['position'] < 0 else "SELL"
# print("EXIT", kwargs)
try:
self.record({symbol + '_POSITION': 0})
except Exception as e:
pass
else:
if quantity == 0:
return
kwargs['symbol'] = symbol
kwargs['quantity'] = abs(quantity)
kwargs['direction'] = signal.upper()
# print(signal.upper(), kwargs)
# record
try:
quantity = abs(quantity)
if kwargs['direction'] != "BUY":
quantity = -quantity
self.record({symbol + '_POSITION': quantity + position['position']})
except Exception as e:
pass
# ---------------------------------------
def record(self, *args, **kwargs):
"""Records data for later analysis.
Values will be logged to the file specified via
``--output [file]`` (along with bar data) as
csv/pickle/h5 file.
Call from within your strategy:
``self.record(key=value)``
:Parameters:
** kwargs : mixed
The names and values to record
"""
if self.output:
try:
self.datastore.record(self.record_ts, *args, **kwargs)
except Exception as e:
pass
|
StarcoderdataPython
|
306768
|
#!/usr/bin/env python3
#
# Copyright 2021 Graviti. Licensed under MIT License.
#
import pytest
from tensorbay import GAS
from tensorbay.dataset import Data, Dataset, Frame, FusionDataset, FusionSegment, Segment
from tensorbay.exception import ResourceNotExistError
from tensorbay.label import Catalog, Classification, Label
from tensorbay.sensor import Lidar
from tests.test_upload import CATALOG, LABEL
from tests.utility import get_dataset_name
_LOCAL_CONFIG_NAME = "HDFS_本地1"
class TestCloudStorage:
@pytest.mark.parametrize("config_name", ["azure_china_config", "oss_config", "s3_config"])
def test_create_dataset_with_config(self, accesskey, url, config_name):
gas_client = GAS(access_key=accesskey, url=url)
try:
gas_client.get_cloud_client(config_name)
except ResourceNotExistError:
pytest.skip(f"skip this case because there's no {config_name} config")
dataset_name = get_dataset_name()
gas_client.create_dataset(dataset_name, config_name=config_name)
gas_client.get_dataset(dataset_name)
gas_client.delete_dataset(dataset_name)
@pytest.mark.parametrize("config_name", ["azure_china_config", "oss_config", "s3_config"])
def test_import_cloud_files_to_dataset(self, accesskey, url, config_name):
gas_client = GAS(access_key=accesskey, url=url)
try:
cloud_client = gas_client.get_cloud_client(config_name)
except ResourceNotExistError:
pytest.skip(f"skip this case because there's no {config_name} config")
auth_data = cloud_client.list_auth_data("tests")
dataset_name = get_dataset_name()
dataset_client = gas_client.create_dataset(dataset_name, config_name=config_name)
dataset = Dataset(name=dataset_name)
segment = dataset.create_segment("Segment1")
for data in auth_data:
data.label.classification = Classification("cat", attributes={"color": "red"})
segment.append(data)
dataset_client = gas_client.upload_dataset(dataset, jobs=5)
dataset_client.commit("import data")
segment1 = Segment("Segment1", client=dataset_client)
assert len(segment1) == len(segment)
assert segment1[0].path == segment[0].path.split("/")[-1]
assert segment1[0].label.classification.category == "cat"
assert segment1[0].label.classification.attributes["color"] == "red"
assert len(auth_data) == len(segment)
gas_client.delete_dataset(dataset_name)
@pytest.mark.parametrize("config_name", ["azure_china_config", "oss_config", "s3_config"])
def test_import_cloud_files_to_fusiondataset(self, accesskey, url, config_name):
gas_client = GAS(access_key=accesskey, url=url)
try:
cloud_client = gas_client.get_cloud_client(config_name)
except ResourceNotExistError:
pytest.skip(f"skip this case because there's no {config_name} config")
auth_data = cloud_client.list_auth_data("tests")[:5]
dataset_name = get_dataset_name()
dataset_client = gas_client.create_dataset(dataset_name, True, config_name=config_name)
dataset = FusionDataset(name=dataset_name)
segment = dataset.create_segment("Segment1")
lidar = Lidar("LIDAR")
segment.sensors.add(lidar)
for data in auth_data:
data.label.classification = Classification("cat", attributes={"color": "red"})
frame = Frame()
frame["LIDAR"] = data
segment.append(frame)
dataset_client = gas_client.upload_dataset(dataset, jobs=5)
dataset_client.commit("import data")
segment1 = FusionSegment("Segment1", client=dataset_client)
assert len(segment1) == len(segment)
assert segment1[0]["LIDAR"].path == segment[0]["LIDAR"].path.split("/")[-1]
assert segment1[0]["LIDAR"].label.classification.category == "cat"
assert segment1[0]["LIDAR"].label.classification.attributes["color"] == "red"
assert len(auth_data) == len(segment)
gas_client.delete_dataset(dataset_name)
class TestLocalStorage:
def test_create_and_upload_dataset_with_config(self, accesskey, url, tmp_path):
gas_client = GAS(access_key=accesskey, url=url)
dataset_name = get_dataset_name()
try:
gas_client.get_auth_storage_config(name=_LOCAL_CONFIG_NAME)
except ResourceNotExistError:
pytest.skip(f"skip this case because there's no {_LOCAL_CONFIG_NAME} config")
gas_client.create_dataset(dataset_name, config_name=_LOCAL_CONFIG_NAME)
dataset = Dataset(name=dataset_name)
segment = dataset.create_segment("Segment1")
# When uploading label, upload catalog first.
dataset._catalog = Catalog.loads(CATALOG)
path = tmp_path / "sub"
path.mkdir()
for i in range(5):
local_path = path / f"hello{i}.txt"
local_path.write_text("CONTENT")
data = Data(local_path=str(local_path))
data.label = Label.loads(LABEL)
segment.append(data)
dataset_client = gas_client.upload_dataset(dataset)
assert dataset_client.get_catalog()
segment1 = Segment("Segment1", client=dataset_client)
assert len(segment1) == 5
for i in range(5):
assert segment1[i].path == f"hello{i}.txt"
assert segment1[i].label
gas_client.delete_dataset(dataset_name)
def test_create_local_storage_config(self, accesskey, url):
gas_client = GAS(access_key=accesskey, url=url)
local_storage_name = "local_storage_config"
local_storage = {
"name": local_storage_name,
"file_path": "file_path/",
"endpoint": "http://192.168.0.1:9000",
}
gas_client.create_local_storage_config(**local_storage)
gas_client.delete_storage_config(local_storage_name)
|
StarcoderdataPython
|
12803222
|
import json
from project.models.models import Question, Answer
from project.users.views import auth_decode
from . import BaseTest
class TestQuestions(BaseTest):
def test_get_questions(self):
res = self.client().get('/api/v1/questions', content_type='application/json', headers=dict(token=self.login()))
resp_data = json.loads(res.data.decode())
assert res.status_code == 200
self.assertTrue(resp_data['questions'])
def test_get_questions_no_token(self):
res = self.client().get('/api/v1/questions', content_type='application/json', headers=dict(token='1'))
resp_data = json.loads(res.data.decode())
assert res.status_code == 401
self.assertTrue(resp_data['response'] == 'Invalid token')
def test_get_question_success(self):
res = self.client().get('/api/v1/questions', headers=dict(token=self.login()))
question_id = json.loads(res.data.decode())['questions'][0]['question_id']
resp = self.client().get('/api/v1/questions/%s' % question_id, headers=dict(token=self.login()))
assert resp.status_code == 200
resp_data = json.loads(resp.data)
self.assertTrue(resp_data["question"])
def test_get_question_invalid_token(self):
res = self.client().get('/api/v1/questions/1', headers=dict(token=''))
assert res.status_code == 401
def test_get_question_not_found(self):
resp = self.client().get('/api/v1/questions/30', headers=dict(token=self.login()))
assert resp.status_code == 404
def test_add_question_empty_title(self):
resp = self.client().post('/api/v1/questions',
headers=dict(token=self.login()),
data=json.dumps({'body': 'Is this the real life?', 'title': ''}))
assert resp.status_code == 400
def test_add_question_empty_body(self):
resp = self.client().post('/api/v1/questions',
headers=dict(token=self.login()),
data=json.dumps({'body': '', 'title': 'life'}))
assert resp.status_code == 400
def test_add_question_same_title(self):
question = Question('unique title', 'question 10 body', 1)
question.insert_question()
resp = self.client().post('/api/v1/questions',
headers=dict(token=self.login()),
data=json.dumps({'body': 'body is here', 'title': 'unique title'}))
assert resp.status_code == 409
def test_add_question_user_empty_token(self):
resp = self.client().post('/api/v1/questions',
headers=dict(token=''),
data=json.dumps({'body': 'Is this the real life?', 'title': 'question'}))
assert resp.status_code == 401
def test_add_answer(self):
question = Question('question add answer', 'question 10 body', 1)
question.insert_question()
resp = self.client().post('/api/v1/questions/' + question.question_id.__str__() + '/answers', data=json.dumps(
dict(title='test title', body='some body of quiz')),
content_type='application/json',
headers=dict(token=self.login()))
assert resp.status_code == 201
resp_data = json.loads(resp.data.decode())
self.assertEqual(resp_data['response'], 'answer posted successfully')
def test_add_answer_duplicate(self):
question = Question('question add answer another one', 'question 10 body', 1)
question.insert_question()
Answer('duplicate', 1, question.question_id).insert_answer()
resp = self.client().post('/api/v1/questions/' + question.question_id.__str__() + '/answers', data=json.dumps(
dict(body='duplicate')),
content_type='application/json',
headers=dict(token=self.login()))
assert resp.status_code == 409
def test_add_answer_invalid_token(self):
res = self.client().get('/api/v1/questions', headers=dict(token=self.login()))
question_id = json.loads(res.data.decode())['questions'][0]['question_id']
resp = self.client().post('/api/v1/questions/' + question_id + '/answers', data=json.dumps(
dict(title='test title', body='some body of quiz')),
content_type='application/json',
headers=dict(token=''))
assert resp.status_code == 401
resp_data = json.loads(resp.data.decode())
self.assertEqual(resp_data['response'], 'Invalid token')
def test_add_answer_question_not_found(self):
resp = self.client().post('/api/v1/questions/1/answers', data=json.dumps(
dict(title='test title 1 not found', body='some body of quiz')),
content_type='application/json',
headers=dict(token=self.login()))
assert resp.status_code == 404
def test_delete_question(self):
question = Question('question delete title', 'question 10 body', 1)
question.insert_question()
Answer('answer delete title', 1, question.question_id).insert_answer()
resp = self.client().delete('api/v1/questions/' + question.question_id.__str__(),
content_type='application/json',
headers=dict(token=self.login()))
assert resp.status_code == 200
resp_data = json.loads(resp.data.decode())
self.assertEqual(resp_data['response'], 'question deleted successfully')
def test_delete_question_invalid_token(self):
resp = self.client().delete('api/v1/questions/1',
content_type='application/json',
headers=dict(token=''))
assert resp.status_code == 401
def test_delete_question_not_found(self):
resp = self.client().delete('api/v1/questions/1',
content_type='application/json',
headers=dict(token=self.login()))
assert resp.status_code == 404
def test_get_popular_question(self):
question = Question('question 10 title', 'question 10 body', 1)
question.insert_question()
Answer('answer 10 title', 1, question.question_id).insert_answer()
Answer('answer 101 title', 1, question.question_id).insert_answer()
Answer('answer 102 title', 1, question.question_id).insert_answer()
Answer('answer 103 title', 1, question.question_id).insert_answer()
res = self.client().get('api/v1/questions/popular', content_type='application/json',
headers=dict(token=self.login()))
assert res.status_code == 200
resp_data = json.loads(res.data.decode())
assert resp_data['question']
def test_get_popular_question_invalid_token(self):
res = self.client().get('api/v1/questions/popular', content_type='application/json',
headers=dict(token=''))
assert res.status_code == 401
def test_update_answer_body(self):
question = Question('update answer', 'question 10 body', 1)
question.insert_question()
token = self.login()
user_id = auth_decode(token)
answer = Answer('update answer', user_id, question.question_id)
answer.insert_answer()
resp = self.client().put('/api/v1/questions/' + question.question_id.__str__() + '/answers/' +
answer.answer_id.__str__(), data=json.dumps(
dict(body='newly updated answer')),
content_type='application/json',
headers=dict(token=token))
assert resp.status_code == 200
resp_data = json.loads(resp.data.decode())
self.assertEqual(resp_data['response'], 'answer updated successfully')
def test_update_answer_body_invalid_json(self):
question = Question('update answer 1', 'question 10 body', 1)
question.insert_question()
token = self.login()
user_id = auth_decode(token)
answer = Answer('update answer 1', user_id, question.question_id)
answer.insert_answer()
resp = self.client().put('/api/v1/questions/' + question.question_id.__str__() + '/answers/' +
answer.answer_id.__str__(), data=json.dumps(
dict(bod='newly updated answer')),
content_type='application/json',
headers=dict(token=token))
assert resp.status_code == 400
resp_data = json.loads(resp.data.decode())
self.assertEqual(resp_data['response'], 'Invalid json')
def test_update_answer_body_invalid_token(self):
question = Question('update answer 2', 'question 10 body', 1)
question.insert_question()
token = self.login()
user_id = auth_decode(token)
answer = Answer('update answer 2', user_id, question.question_id)
answer.insert_answer()
resp = self.client().put('/api/v1/questions/' + question.question_id.__str__() + '/answers/' +
answer.answer_id.__str__(), data=json.dumps(
dict(bod='newly updated answer')),
content_type='application/json',
headers=dict(token=''))
assert resp.status_code == 401
resp_data = json.loads(resp.data.decode())
self.assertEqual(resp_data['response'], 'Invalid token')
def test_update_answer_body_mark_as_preferred(self):
token = self.login()
user_id = auth_decode(token)
question = Question('update answer preferred', 'question 10 body', user_id)
question.insert_question()
answer = Answer('update answer preferred', 1, question.question_id)
answer.insert_answer()
resp = self.client().put('/api/v1/questions/' + question.question_id.__str__() + '/answers/' +
answer.answer_id.__str__(),
content_type='application/json',
headers=dict(token=token))
assert resp.status_code == 200
resp_data = json.loads(resp.data.decode())
self.assertEqual(resp_data['response'], 'preferred answer marked successfully')
def test_endpoint_not_found(self):
resp = self.client().get('/api/v1/questions/answers/',
content_type='application/json',
headers=dict(token=''))
resp.status_code = 404
def test_endpoint_not_found(self):
resp = self.client().patch('/api/v1/questions/answers/',
content_type='application/json',
headers=dict(token=''))
resp.status_code = 405
|
StarcoderdataPython
|
8121452
|
"""
helper module for finding installed device types
Author: <NAME>
Date: March 2018
Contact: <EMAIL>
"""
import os
import re
import json
from tiflash.utils import xmlhelper
from tiflash.utils.connections import get_connections_directory
from tiflash.utils.cpus import get_cpus_directory
DEVICES_DIR = "/ccs_base/common/targetdb/devices"
BOARD_IDS_PATH = "/ccs_base/cloudagent/src/targetDetection/board_ids.json"
# Place this file in utils/ folder to use a custom board_ids file
CUSTOM_BOARD_IDS_FILE = "board_ids.json"
class DeviceError(Exception):
"""Generic Device Error"""
pass
def get_devices_directory(ccs_path):
"""Returns full path to devices directory
Args:
ccs_path (str): full path to ccs installation to use
Returns:
str: full path to devices directory
"""
devices_directory = os.path.normpath(ccs_path + "/" + DEVICES_DIR)
if not os.path.exists(devices_directory):
raise DeviceError("Could not find 'devices' directory.")
return devices_directory
def get_device_xml_from_devicetype(devicetype, ccs_path):
"""Returns full path to device xml given a devicetype if exists, else returns None.
Args:
devicetype (str): devicetype to search xmls for
ccs_path (str): path to ccs installation to use for searching xmls
Returns:
str or None: full path to device xml if exists, otherwise returns None
Raises:
DeviceError: raises exception if devices directory can not
be found
"""
device_xml = None
# Get devices xmls
device_xmls = get_device_xmls(ccs_path, full_path=True)
for dxml in device_xmls:
try: # Some xmls are not valid device xml files
device = get_devicetype(dxml)
except Exception:
continue
if device == devicetype:
device_xml = os.path.normpath(dxml)
break
else:
raise DeviceError("Could not find device xml for %s. Please install "
"drivers for %s.""" % (devicetype, devicetype))
return device_xml
def get_devicetype(device_xml):
"""Returns the devicetype from the device xml file
Args:
device_xml (str): full path to device xml file
Returns:
str: devicetype set in device xml file
"""
devicetype = None
root = __get_device_root(device_xml)
if root.tag != "device":
raise DeviceError("Error parsing devicetype from device xml: %s" %
device_xml)
devicetype = xmlhelper.get_attrib_value(root.attrib, ["desc", "partnum", "id"])
return devicetype
def get_cpu(device_xml):
"""Returns the cpu name from device xml file.
Args:
device_xml (str): full path to the device xml file to parse
Returns:
str: cpu name
"""
cpu = None
root = __get_device_root(device_xml)
cpu_element = root.find(".//cpu")
if cpu_element is None:
raise DeviceError("Error parsing cpu from device xml: %s" % device_xml)
cpu = xmlhelper.get_attrib_value(cpu_element.attrib, ["desc", "id"])
return cpu
def get_default_connection_xml(device_xml, ccs_path):
"""Returns the default connection xml from the device xml file
Args:
device_xml (str): full path to device xml file
Returns:
str: default connection xml set in device xml file
Raises:
DeviceError: raised if device xml does not contain 'default connection'
"""
connection_xml = None
root = __get_device_root(device_xml)
conn_element = root.find(".//property[@id='DefaultConnection']")
if conn_element is None:
raise DeviceError("Device XML: %s does not contain a Default "
"Connection type." % device_xml)
xml_name = xmlhelper.get_attrib_value(conn_element.attrib, ["Value"])
connection_xml = get_connections_directory(ccs_path) + '/' + xml_name
connection_xml = os.path.normpath(connection_xml)
return connection_xml
def get_device_xmls(ccs_path, full_path=False):
"""Gets a list of the device xmls files
Args:
ccs_path (str): path to ccs installation
full_path (boolean, optional): returns full path of each device xml
Returns:
list: list of device xml files
"""
device_dir = get_devices_directory(ccs_path)
devices = [f for f in os.listdir(device_dir) if f.endswith('.xml')]
if full_path:
devices = [ os.path.abspath(device_dir + '/' + c) for c in devices ]
return devices
def get_device_xml_path(xml_name, ccs_path):
"""Returns full path to device xml if exists, else returns None.
Args:
xml_name (str): name of device to search xmls for
ccs_path (str): path to ccs installation to use for searching xmls
Returns:
str or None: full path to device xml if exists otherwise returns None
Raises:
deviceError: raises exception if device directory can not be found
"""
device_xml = None
if not xml_name.endswith('.xml'):
xml_name += ".xml"
device_xmls = get_device_xmls(ccs_path)
if xml_name in device_xmls:
device_xml = os.path.normpath(
get_devices_directory(ccs_path) + "/" + xml_name)
return device_xml
def get_cpu_xml(device_xml, ccs_path):
"""Returns the full path to cpu xml specified in given device xml
Args:
device_xml (str): full path to device xml to parse
Returns:
str: full path to cpu xml
"""
cpu_xml = None
root = __get_device_root(device_xml)
cpu_element = root.find(".//cpu")
p_cpu_element = root.find(".//cpu/..")
if cpu_element is None or p_cpu_element is None:
raise DeviceError("Error parsing cpu from device xml: %s" % device_xml)
instance_element = xmlhelper.get_sibling(cpu_element, p_cpu_element, -1)
if instance_element is None:
raise DeviceError("Error parsing instance-cpu from device xml: %s" % device_xml)
xml_name = xmlhelper.get_attrib_value(instance_element.attrib, ["xml"])
cpu_xml = get_cpus_directory(ccs_path) + '/' + xml_name
cpu_xml = os.path.normpath(cpu_xml)
return cpu_xml
def get_devicetypes(ccs_path):
""" Returns list of installed device names.
Searches "<ccs_path>/ccs_base/common/targetdb/devices" directory for
installed device names.
Args:
ccs_path (str): full path to ccs installation to use
Returns:
list: device names
Raises:
DeviceError: raises exception if devices directory can not
be found
"""
# Set Devices directory
devices_directory = get_devices_directory(ccs_path)
# Get devices xmls
device_xmls = get_device_xmls(ccs_path, full_path=True)
device_list = list()
for cxml in device_xmls:
try: # Some xmls are not valid device xml files
device = get_devicetype(cxml)
except Exception:
continue
device_list.append(device)
return device_list
def find_device(device_name, ccs_path):
""" Returns full device name(s) matching 'device_name'
Uses regex to try to match given 'device_name' to all installed
device types.
Args:
device_name (str): device name to try to match (i.e. xds110)
ccs_path (str): full path to ccs installation to use
Returns:
list: list of full device names that matched the given
device_name
Raises:
DeviceError: raises exception if CCS installation can not be found
"""
devices = get_devicetypes(ccs_path)
match_list = list()
device_re = re.compile(device_name.lower())
for d in devices:
if device_re.search(d.lower()) is not None:
match_list.append(d)
return match_list
def get_device_xml_from_serno(serno, ccs_path):
""" Returns full path to device xml determined by device serial no.
Uses board_ids.json file to determine devicetype from serial no.
Args:
serno (str): device serial number
ccs_path (str): full path to ccs installation to use
Returns:
str: path to device xml determined from serial number
Raises:
DeviceError: raises exception if board_ids.json file can not be found
in given CCS installation or if the devicetype can not be
determined by given serial number
"""
devices_directory = get_devices_directory(ccs_path)
# Allow for using custom boards_id file by placing custom file in utils/
custom_board_ids_path = os.path.normpath(os.path.dirname(__file__) + '/' +
CUSTOM_BOARD_IDS_FILE)
if os.path.isfile(custom_board_ids_path):
board_ids_path = custom_board_ids_path
else:
board_ids_path = os.path.normpath(ccs_path + "/" + BOARD_IDS_PATH)
if not os.path.isfile(board_ids_path):
raise DeviceError("Could not find 'board_ids.json' file: %s"
% board_ids_path)
with open(board_ids_path) as board_ids_f:
board_ids = json.load(board_ids_f)
sernos = board_ids.keys()
for s in sernos:
if serno.startswith(s):
dxml = board_ids[s]['deviceXml'] + ".xml"
break
else:
raise DeviceError(
"Could not determine devicetype from %s." % serno)
dxml_fullpath = os.path.abspath(devices_directory + "/" + dxml)
if not os.path.isfile(dxml_fullpath):
raise DeviceError("Could not find '%s' file." % dxml)
return dxml_fullpath
def get_device_from_serno(serno, ccs_path):
""" Returns full device name determined by device serial no.
Uses board_ids.json file to determine devicetype from serial no.
Args:
serno (str): device serial number
ccs_path (str): full path to ccs installation to use
Returns:
str: devicetype determined from serial number
Raises:
DeviceError: raises exception if board_ids.json file can not be found
in given CCS installation or if the devicetype can not be
determined by given serial number
"""
dxml_fullpath = get_device_xml_from_serno(serno, ccs_path)
return get_devicetype(dxml_fullpath)
def __get_device_root(device_path):
"""Returns the root Element of the device file
Args:
device_path (str): full path to device file to parse
Returns:
xml.Element: root element of device file
"""
if not os.path.exists(device_path):
raise DeviceError("Could not find device: %s" % device_path)
root = xmlhelper.get_xml_root(device_path)
return root
|
StarcoderdataPython
|
8127753
|
import re
from pathlib import Path
from typing import List
from setuptools import find_namespace_packages, setup
def get_version(package: str) -> str:
"""
Extract package version, located in the `src/package/__version__.py`.
"""
version = Path("src", package, "__version__.py").read_text()
pattern = r"__version__ = ['\"]([^'\"]+)['\"]"
return re.match(pattern, version).group(1) # type: ignore
def get_requirements(req_file: str) -> List[str]:
"""
Extract requirements from provided file.
"""
req_path = Path(req_file)
requirements = req_path.read_text().split("\n") if req_path.exists() else []
return requirements
def get_long_description(readme_file: str) -> str:
"""
Extract README from provided file.
"""
readme_path = Path(readme_file)
long_description = (
readme_path.read_text(encoding="utf-8") if readme_path.exists() else ""
)
return long_description
setup(
name="bitcoinrpc",
python_requires=">=3.7",
version=get_version("bitcoinrpc"),
description="Lightweight Bitcoin JSON-RPC Python asynchronous client",
long_description=get_long_description("README.md"),
long_description_content_type="text/markdown",
keywords="bitcoin async json-rpc",
license="MIT",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Software Development :: Libraries :: Python Modules",
],
url="https://github.com/bibajz/bitcoin-python-async-rpc",
author="<NAME>",
author_email="<EMAIL>",
package_dir={"": "src"},
packages=find_namespace_packages(where="src"),
install_requires=get_requirements("requirements.txt"),
)
|
StarcoderdataPython
|
3299662
|
from unittest import TestCase
from regene.expression.string import String, EmptyExpression
class StringTest(TestCase):
def test_string_multiplication(self):
assert String("F") * 4 == "FFFF"
class EmptyTest(TestCase):
def test_empty_multiplication(self):
assert EmptyExpression() == ""
|
StarcoderdataPython
|
6664995
|
<gh_stars>1-10
"""A peak element is an element that is greater than its neighbors.
Given an input array nums, where nums[i] ≠ nums[i+1], find a peak element and return its index.
The array may contain multiple peaks, in that case return the index to any one of the peaks is fine.
You may imagine that nums[-1] = nums[n] = -∞."""
class Solution(object):
def findPeakElement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) == 0:
return -1
low, high = 0, len(nums) - 1
while low < high:
mid = low + (high - low) // 2
if nums[mid] < nums[mid + 1]:
low = mid + 1
else:
high = mid
return low
if __name__ == '__main__':
solution = Solution()
print(solution.findPeakElement([1, 2, 1, 3, 5, 6, 4]))
|
StarcoderdataPython
|
1850545
|
import urllib.request
import re
import os.path
import http
import time
import logging
import timeit
import configparser
from datetime import datetime
# Установка папки со скриптом
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# Путь к файлу настроек settings.ini
SETTINGS = os.path.join(BASE_DIR, "settings.ini")
# Парсер для файла настроек
cfg = configparser.ConfigParser()
# Читаем файл настроек
try:
cfg.read(SETTINGS)
except FileNotFoundError:
# Если файл настроек не найден, то вываливаемся с ошибкой
logging.info("Ошибка! Файл настроек не найден\n")
print("Ошибка! Файл настроек не найден\n")
sys.exit(1)
# Загрузка настроек
GOOD_ACC = cfg.get("folders", "GOOD_ACC")
BAD_WORDS = cfg.get("folders", "BAD_WORDS")
UNSORT = cfg.get("folders", "UNSORT")
LOG = cfg.get("folders", "LOG")
HIDDEN_PROFILE_FILE = cfg.get("folders", "HIDDEN_PROFILE_FILE")
BAD_ACC = cfg.get("folders", "BAD_ACC")
AVATAR_SW = cfg.getboolean("filters", "AVATAR_SW")
HIDDEN_PROFILE = cfg.getboolean("filters", "HIDDEN_PROFILE")
HIDDEN_PROFILE_FILE_SW = cfg.getboolean("filters", "HIDDEN_PROFILE_FILE_SW")
FOLLOW_SW = cfg.getboolean("filters", "FOLLOW_SW")
FOLLOW_COUNT_FROM = cfg.getint("filters", "FOLLOW_COUNT_FROM")
FOLLOW_COUNT_TILL = cfg.getint("filters", "FOLLOW_COUNT_TILL")
FOLLOWED_BY_SW = cfg.getboolean("filters", "FOLLOWED_BY_SW")
FOLLOWED_BY_COUNT_FROM = cfg.getint("filters", "FOLLOWED_BY_COUNT_FROM")
FOLLOWED_BY_COUNT_TILL = cfg.getint("filters", "FOLLOWED_BY_COUNT_TILL")
LAST_POST_SW = cfg.getboolean("filters", "LAST_POST_SW")
POSTS_COUNT_SW = cfg.getboolean("filters", "POSTS_COUNT_SW")
POSTS_COUNT_FROM = cfg.getint("filters", "POSTS_COUNT_FROM")
POSTS_COUNT_TILL = cfg.getint("filters", "POSTS_COUNT_TILL")
LAST_POST_DAYS = cfg.getint("filters", "LAST_POST_DAYS")
RECONNECT_SW = cfg.getboolean("system", "RECONNECT_SW")
# Абсолютные пути к файлу
UNSORT = os.path.join(BASE_DIR, UNSORT)
BAD_WORDS = os.path.join(BASE_DIR, BAD_WORDS)
GOOD_ACC = os.path.join(BASE_DIR, GOOD_ACC)
BAD_ACC = os.path.join(BASE_DIR, BAD_ACC)
HIDDEN_PROFILE_FILE = os.path.join(BASE_DIR, HIDDEN_PROFILE_FILE)
LOG = os.path.join(BASE_DIR, LOG)
logging.basicConfig(format="[%(asctime)s] %(message)s", level=logging.DEBUG, filename=LOG)
HEADERS = {
'User-agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0.12) Gecko/20100101 Firefox/10.0.12 Iceweasel/10.0.12',
'Connection': 'keep-alive',
'Accept-Encoding': 'deflate',
'Accept-Language': 'ru-ru,ru;q=0.8,en-us;q=0.5,en;q=0.3',
'Cache-Control': 'max-age=0',
'Accept-Charset': 'windows-1251,utf-8;q=0.7,*;q=0.3',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'}
description_pt = re.compile("<meta content=\"(.+?)\" name=\"description\" />")
follow_pt = re.compile("follows\":\{\"count\":(\d+)\}")
followed_by_pt = re.compile("\"followed_by\":\{\"count\":(\d+)\}")
last_post_date_pt = re.compile("\"date\":(\d+)")
hidden_acc_pt = re.compile("\"is_private\":(true)")
posts_count_pt = re.compile("\"media\":\{\"count\":(\d+)")
avatar_pt = re.compile("\"profile_pic_url\":\"(.+?)\"")
def get_page(url):
req = urllib.request.Request(url, headers=HEADERS)
data = urllib.request.urlopen(req).read()
text = data.decode("utf-8", "ignore")
return text
def get_txt(fname):
f = open(fname, "r", encoding="utf8")
file_list = [line.strip() for line in f]
f.close()
return file_list
def write_in_file(fname, acc_login):
f = open(fname, "a+", encoding="utf8")
f.write(acc_login + "\n")
f.close()
try:
accounts = get_txt(UNSORT)
except FileNotFoundError:
# Если файл не найден, то вываливаемся с ошибкой
logging.info("Ошибка! Файл с базой аккаунтов не найден\n")
print("Ошибка! Файл с базой аккаунтов не найден\n")
sys.exit(1)
try:
stop_words = get_txt(BAD_WORDS)
except FileNotFoundError:
# Если файл не найден, то вываливаемся с ошибкой
logging.info("Ошибка! Файл с стоп-словами не найден\n")
print("Ошибка! Файл с стоп-словами не найден\n")
sys.exit(1)
old_accounts = []
# Ищем уже отфильтрованные аккаунты
try:
old_accounts.extend(get_txt(GOOD_ACC))
except FileNotFoundError:
pass
try:
old_accounts.extend(get_txt(BAD_ACC))
except FileNotFoundError:
pass
try:
old_accounts.extend(get_txt(HIDDEN_PROFILE_FILE))
except FileNotFoundError:
pass
if old_accounts:
for old_account in old_accounts:
try:
accounts.remove(old_account)
except ValueError:
pass
print("Количество аккаунтов в базе: " + str(len(accounts)) + "\n")
logging.info("Количество аккаунтов в базе: " + str(len(accounts)) + "\n")
for account in accounts:
start_time = timeit.default_timer()
page = ""
print("Проверка аккаунта: " + account)
logging.info("Проверка аккаунта: " + account)
try:
page = get_page("http://instagram.com/" + account + "/")
except urllib.error.HTTPError:
print("Аккаунт " + account + " не существует")
logging.info("Аккаунт " + account + " не существует")
print("Завершено за: {0:.5f} секунд\n".format(timeit.default_timer() - start_time))
logging.info("Завершено за: {0:.5f} секунд\n".format(timeit.default_timer() - start_time))
write_in_file(BAD_ACC, account)
continue
except http.client.RemoteDisconnected:
print("Ошибка доступа к сайту")
logging.info("Ошибка доступа к сайту")
if RECONNECT_SW:
print("Переподключение включено в настройках")
logging.info("Переподключение включено в настройках")
sleep_time = 60
while page == "":
print("Ждем " + str(sleep_time) + " секунд")
logging.info("Ждем " + str(sleep_time) + " секунд")
time.sleep(sleep_time)
print("Переподключение...")
logging.info("Переподключение...")
try:
page = get_page("http://instagram.com/" + account + "/")
except http.client.RemoteDisconnected:
sleep_time += sleep_time
else:
print("Завершение скрипта")
logging.info("Завершение скрипта")
sys.exit(1)
# Проверка на наличие аватара
if AVATAR_SW:
avatar = avatar_pt.findall(page)
if avatar:
avatar = avatar[0]
else:
print("Завершено за: {0:.5f} секунд".format(timeit.default_timer() - start_time))
logging.info("Завершено за: {0:.5f} секунд".format(timeit.default_timer() - start_time))
print("Странная ошибка связанная с аватаром у аккаунта: " + account)
logging.info("Странная ошибка связанная с аватаром у аккаунта: " + account)
print("Завершено за: {0:.5f} секунд\n".format(timeit.default_timer() - start_time))
logging.info("Завершено за: {0:.5f} секунд\n".format(timeit.default_timer() - start_time))
write_in_file(BAD_ACC, account)
continue
if avatar != "https:\/\/scontent.cdninstagram.com\/t51.2885-19\/11906329_960233084022564_1448528159_a.jpg":
pass
else:
print("Аккаунт " + account + " не подходит по аватару")
logging.info("Аккаунт " + account + " не подходит по аватару")
print("Завершено за: {0:.5f} секунд\n".format(timeit.default_timer() - start_time))
logging.info("Завершено за: {0:.5f} секунд\n".format(timeit.default_timer() - start_time))
write_in_file(BAD_ACC, account)
continue
# Конец проверки на наличие аватара
# Проверка по подпискам
if FOLLOW_SW:
follow = follow_pt.findall(page)
if follow:
follow = int(follow[0])
else:
follow = 0
if FOLLOW_COUNT_FROM < follow < FOLLOW_COUNT_TILL:
pass
else:
print("Аккаунт " + account + " не подходит по подпискам")
logging.info("Аккаунт " + account + " не подходит по подпискам")
print("Завершено за: {0:.5f} секунд\n".format(timeit.default_timer() - start_time))
logging.info("Завершено за: {0:.5f} секунд\n".format(timeit.default_timer() - start_time))
write_in_file(BAD_ACC, account)
continue
# Конец проверки по подпискам
# Проверка по подписчикам
if FOLLOWED_BY_SW:
followed_by = follow_pt.findall(page)
if followed_by:
followed_by = int(followed_by[0])
else:
followed_by = 0
if FOLLOWED_BY_COUNT_FROM < followed_by < FOLLOWED_BY_COUNT_TILL:
pass
else:
print("Аккаунт " + account + " не подходит по подписчикам")
logging.info("Аккаунт " + account + " не подходит по подписчикам")
print("Завершено за: {0:.5f} секунд\n".format(timeit.default_timer() - start_time))
logging.info("Завершено за: {0:.5f} секунд\n".format(timeit.default_timer() - start_time))
write_in_file(BAD_ACC, account)
continue
# Конец проверки по подписчикам
description = description_pt.findall(page)[0]
# Проверка на видимость профиля
hidden_aac = hidden_acc_pt.findall(page)
if hidden_aac and hidden_aac[0] == "true":
if HIDDEN_PROFILE:
print("Скрытый аккаунт " + account + " добавлен")
logging.info("Скрытый аккаунт " + account + " добавлен")
print("Завершено за: {0:.5f} секунд\n".format(timeit.default_timer() - start_time))
logging.info("Завершено за: {0:.5f} секунд\n".format(timeit.default_timer() - start_time))
if HIDDEN_PROFILE_FILE_SW:
write_in_file(HIDDEN_PROFILE_FILE, account)
else:
write_in_file(GOOD_ACC, account)
continue
else:
print("Скрытый аккаунт " + account + " добавлен в список негодных аккаунтов")
logging.info("Скрытый аккаунт " + account + " добавлен в список негодных аккаунтов")
print("Завершено за: {0:.5f} секунд\n".format(timeit.default_timer() - start_time))
logging.info("Завершено за: {0:.5f} секунд\n".format(timeit.default_timer() - start_time))
write_in_file(BAD_ACC, account)
continue
# Конец проверки на видимость профиля
# Проверка по количеству постов
if POSTS_COUNT_SW:
posts_count = posts_count_pt.findall(page)
if posts_count:
posts_count = int(posts_count[0])
else:
posts_count = 0
if POSTS_COUNT_FROM < posts_count < POSTS_COUNT_TILL:
pass
else:
print("Аккаунт " + account + " не подходит по количеству постов")
logging.info("Аккаунт " + account + " не подходит по количеству постов")
print("Завершено за: {0:.5f} секунд\n".format(timeit.default_timer() - start_time))
logging.info("Завершено за: {0:.5f} секунд\n".format(timeit.default_timer() - start_time))
write_in_file(BAD_ACC, account)
continue
# Конец проверки по количеству постов
# Проверка на дату последнего поста
if LAST_POST_SW:
last_post_date = last_post_date_pt.findall(page)
if last_post_date:
last_post_date = time.gmtime(int(last_post_date[0]))
last_post_date = datetime(year=last_post_date[0], month=last_post_date[1], day=last_post_date[2])
last_post_date = datetime.toordinal(last_post_date)
now_date = datetime.toordinal(datetime.now())
if (now_date - last_post_date) <= LAST_POST_DAYS:
pass
else:
print("Аккаунт " + account + " не проходит по дате последнего поста")
logging.info("Аккаунт " + account + " не проходит по дате последнего поста")
print("Завершено за: {0:.5f} секунд\n".format(timeit.default_timer() - start_time))
logging.info("Завершено за: {0:.5f} секунд\n".format(timeit.default_timer() - start_time))
write_in_file(BAD_ACC, account)
continue
else:
print("Аккаунт " + account + " не имеет постов")
logging.info("Аккаунт " + account + " не имеет постов")
print("Завершено за: {0:.5f} секунд\n".format(timeit.default_timer() - start_time))
logging.info("Завершено за: {0:.5f} секунд\n".format(timeit.default_timer() - start_time))
write_in_file(BAD_ACC, account)
continue
# Конец проверки на дату последнего поста
# Проверка по ключевым словам
if description.startswith("Смотрите фото и видео от"):
print("Аккаунт " + account + " добавлен")
logging.info("Аккаунт " + account + " добавлен")
print("Завершено за: {0:.5f} секунд\n".format(timeit.default_timer() - start_time))
logging.info("Завершено за: {0:.5f} секунд\n".format(timeit.default_timer() - start_time))
write_in_file(GOOD_ACC, account)
continue
description = description.lower()
for stop_word in stop_words:
if stop_word.lower() in description:
print("Аккаунт " + account + " не подходит по стоп-словам")
logging.info("Аккаунт " + account + " не подходит по стоп-словам")
print("Завершено за: {0:.5f} секунд\n".format(timeit.default_timer() - start_time))
logging.info("Завершено за: {0:.5f} секунд\n".format(timeit.default_timer() - start_time))
write_in_file(BAD_ACC, account)
break
else:
print("Аккаунт " + account + " добавлен")
logging.info("Аккаунт " + account + " добавлен")
print("Завершено за: {0:.5f} секунд\n".format(timeit.default_timer() - start_time))
logging.info("Завершено за: {0:.5f} секунд\n".format(timeit.default_timer() - start_time))
write_in_file(GOOD_ACC, account)
# Конец проверки по ключевым словам
print("Сортировка закончена!")
print("Профильтровано аккаунтов за сеанс: " + str(len(accounts)) + "\n")
if len(accounts) != 0:
logging.info("Сортировка закончена!")
logging.info("Профильтровано аккаунтов за сеанс: " + str(len(accounts)) + "\n")
|
StarcoderdataPython
|
357898
|
import warnings
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from ALDmodel import ALDGrowth
from core import plot_2d, plot_uq
from functools import partial
from scipy.stats import halfcauchy, triang
from scipy.interpolate import interp1d
from sopt import sbostep
def ALDinitialize(system='Al2O3-200C', noise=0.0):
# chem = (p, M, beta, tp)
# chem is the tuple of chemical parameters
# p: precursor pressure (Pa)
# M: molecular mass (atomic mass units)
# beta: sticking probability
# tp: characteristic time of precursor evacuation
if system == 'Al2O3-200C':
chem1 = (26.66, 72, 1e-3, .2, 1.0)
chem2 = (26.66, 18, 1e-4, .2, 0.0)
T = 473 # temperature in K
sitearea = 0.225e-18 # area of a surface site, in m^2
elif system == 'Al2O3-100C':
chem1 = (26.66, 72, 1e-4, 3, 1.0)
chem2 = (26.66, 18, 1e-5, 10, 0.0)
T = 373 # temperature in K
sitearea = 0.251e-18 # area of a surface site, in m^2
elif system == 'TiO2-200C':
chem1 = (0.6665, 284, 1e-4, .2, 1.0)
chem2 = (26.66, 18, 1e-4, .2, 0.0)
T = 473 # temperature in K
sitearea = 1.17e-18 # area of a surface site, in m^2
elif system == 'W-200C':
chem1 = (6.665, 297, 0.2, .2, 1.0)
chem2 = (26.66, 62, 0.05, .2, 0.0)
T = 473 # temperature in K
sitearea = 0.036e-18 # area of a surface site, in m^2
apf = ALDGrowth(T, sitearea, chem1, chem2, noise)
return apf
if __name__ == '__main__':
warnings.filterwarnings("ignore")
sns.set_style('whitegrid')
plt.rcParams["font.family"] = "serif"
plt.rcParams["mathtext.fontset"] = "dejavuserif"
nt = 20
systemL = ['Al2O3-200C', 'Al2O3-100C', 'TiO2-200C', 'W-200C']
chemAL = ['TMA', 'TMA', 'TTIP', '$\mathregular{WF_6}$']
chemBL = ['$\mathregular{H_2O}$', '$\mathregular{H_2O}$',
'$\mathregular{H_2O}$', '$\mathregular{Si_2H_6}$']
bndsL = [
np.array([0.2, 4, 1, 3]),
np.array([7, 60, 6, 180]),
np.array([24, 2, 0.2, 3]),
np.array([0.04, 4, 0.02, 4]),
]
cL = sns.color_palette()[:4]
mL = ['o', 's', 'd', '^']
for system, chemA, chemB, bnds in zip(systemL, chemAL, chemBL, bndsL):
print('system:', system)
apf = ALDinitialize(system)
chemL = ['t1', 't2', 't3', 't4']
tLL = []
labelL = []
for bnd, chem, ii in zip(bnds, chemL, range(4)):
tL = np.linspace(0, bnd, nt)
tLL += [tL]
blab = bnds.astype('str')
blab[ii] = r'$t_{}$'.format(ii+1)
label = [blab[0], 's, ', blab[1], 's, ',
blab[2], 's, ', blab[3], 's']
labelL += ["".join(label)]
zips = zip(range(4), tLL, cL, bnds, labelL)
plt.figure(num=system + '_uptake', figsize=(5, 4))
opttimes = np.zeros((4, 5))
for ii, tL, c, bnd, label in zips:
gL = np.array([])
for t in tL:
bm = bnds.astype('float32')
bm[ii] = t
g = apf.cycle(bm[0], bm[1], bm[2], bm[3], 3)[-1]
gL = np.append(gL, g)
f = interp1d(np.log10(np.abs(1-gL)), tL, fill_value='extrapolate')
print(label, ':', f(-3))
opttimes[ii, :] = f([-3, -2.5, -2, -1.5, -1])
if ii in [0, 2]:
y = 1 - gL
else:
y = gL - 1
plt.semilogy(tL, y, color=c, marker='', ls='-',
alpha=0.9, label=label)
df = pd.DataFrame(opttimes, columns=['0.001', '0.00316', '0.01', '0.0316', '0.1'])
df.to_csv(system + '_uptake.csv')
plt.xlabel(r'$t_i$ (s)', fontsize=12)
plt.ylabel('|Normalized growth per cycle - 1|', fontsize=12)
plt.xlim([-0.02*np.max(bnds), 0.6*np.max(bnds)])
plt.ylim([5e-4, 2e1])
plt.grid(which='minor', axis='y')
plt.grid(which='minor', axis='x')
plt.legend()
plt.tight_layout()
plt.savefig(system + '_uptake.png')
plt.show()
|
StarcoderdataPython
|
6631988
|
# from . import readfile # 先在init里导入一下模块,先初始化一下,然后,包之外的另外一个文件就可以使用from 包 imoprt *来调用了
# from . import writefile
# __init__
# 1、声明文件夹是个包
# 2、可以做初始化操作
# 3、可以声明__all__影响 from 包 import *导入,在__all__里写的才会导入
# 当然这个init文件也可以想其他普通模块文件内一样,可以使用__all__ = ['readfile']这样的方式来确认模块里的哪些方法可以使用
# 这种是为了解决其他模块文件里用from changefile import *这种星花的导入方式,避免其他模块文件里用星花的方式引用了init里指定的模块的所有的方法
|
StarcoderdataPython
|
8187222
|
from . import member_bp
from app.models import *
from .models_members import *
from flask import jsonify, request
from sqlalchemy import and_
from sqlalchemy import or_
import json
from datetime import datetime
from lib.decorators import json
from lib.email1 import send_email2
@member_bp.route('/add_update_member_info', methods=['POST'])
def add_update_member_info():
requestObject = request.get_json()
try:
if "id" in requestObject:
member_info = Member_Info.query.get(requestObject["id"])
else:
member_info = Member_Info()
member_info.import_data(requestObject) #instance is created which tranfers the frontend info to backend
db.session.add(member_info)
db.session.commit()
return jsonify({"message": "success"})
except Exception as e:
print str(e)
db.session.rollback()
return jsonify({"message": "error","error_info" : str(e)})
@member_bp.route('/add_update_family_info', methods=['POST'])
def add_update_family_info():
requestObject = request.get_json()
try:
if "id" in requestObject:
family_info = Family_Info.query.get(requestObject["id"])
else:
family_info = Family_Info()
family_info.import_data(requestObject)
db.session.add(family_info)
db.session.commit()
return jsonify({"message": "success"})
except Exception as e:
print str(e)
db.session.rollback()
return jsonify({"message": "error","error_info" : str(e)})
@member_bp.route('/get_member_info_by_id', methods=['GET']) # getting 1 using 1 argument
def get_member_info_by_id():
id = request.args["id"]
try:
TEMP_API_PARAMETERS = {
"SEARCH_RESPONSES": {
"age": "",
"name": "",
"married": "",
}}
request_settings = TEMP_API_PARAMETERS['SEARCH_RESPONSES']
member = Member_Info.query.get(id)
member_info = {}
if member is not None:
for key in request_settings:
member_info[key] = getattr(member, key)
return jsonify({
"response": "success", "member_detail": member_info})
except Exception as e:
print str(e)
return jsonify({"response": "error"})
@member_bp.route('/get_members_by_age', methods=['GET'])# getting multiple using 1 argument
def get_members_by_age():
age = request.args["age"]
try:
TEMP_API_PARAMETERS = {
"SEARCH_RESPONSES": {
"id": "",
"name": "",
}}
request_settings = TEMP_API_PARAMETERS['SEARCH_RESPONSES']
result = []
members = Member_Info.query.filter(Member_Info.age>=age)
for member in members:
member_data = {}
for key in request_settings:
member_data[key] = getattr(member, key)
result.append(member_data)
return jsonify({
"response": "success", "members_details": result})
except Exception as e:
print str(e)
return jsonify({"response": "error"})
@member_bp.route('/add_member_image', methods=['POST'])
def add_member_image():
requestObject = request.get_json()
try:
member = Member_Info.query.get(requestObject["mid"])
if member is None:
return jsonify({"response": "failure", "error": "no member found"})
image = Member_Images() # creates an object
image.import_data(requestObject) # sends the data from the frontend to the backend
db.session.add(image) # adds the data in the buffer
db.session.flush() # flush allows to use the info in the buffer
image.set_film_id(requestObject["mid"]) # flush is not needed in this case as there is no info which we need in this line
db.session.commit()
return jsonify({"message": "success"})
except Exception as e: #flush is needed to use when we need to use an info from the buffer before
print str(e) #committing the info
db.session.rollback()
return jsonify({"message": "error"})
@member_bp.route('/get_member_images', methods=['GET']) # getting multiple using 1 argument
def get_member_images():
fid = request.args["fid"]
try:
TEMP_API_PARAMETERS = {
"SEARCH_RESPONSES": {
"id": "",
"image_url": ""
}}
request_settings = TEMP_API_PARAMETERS['SEARCH_RESPONSES']
result = []
images = Member_Images.query.filter(fid==Member_Images.mem_id)
for image in images:
image_data = {}
for key in request_settings:
image_data[key] = getattr(image, key)
result.append(image_data)
return jsonify({
"response": "success", "image_details": result})
except Exception as e:
print str(e)
return jsonify({"response": "error"})
@member_bp.route('/add_member_address', methods=['POST'])
def add_member_address():
requestObject = request.get_json()
try:
member = Member_Info.query.get(requestObject["fid"])
if member is None:
return jsonify({"response": "failure", "error": "no member found"})
address = Member_Address() #using class make object
address.import_data(requestObject)
db.session.add(address)
db.session.flush()
member.set_address(address.id) #used for association of the to tables
db.session.commit()
return jsonify({"message": "success"})
except Exception as e:
print str(e)
db.session.rollback()
return jsonify({"message": "error"})
@member_bp.route('/get_member_address', methods=['GET']) # case 2 of foreign
def get_member_address():
fid = request.args["fid"]
try:
TEMP_API_PARAMETERS = {
"SEARCH_RESPONSES": {
"id": "",
"address" : ""
}}
request_settings = TEMP_API_PARAMETERS['SEARCH_RESPONSES']
member= Member_Info.query.get(fid)
#print "hi"
address= Member_Address.query.get(member.id)
#print "hi"
address_data={}
if address is not None:
for key in request_settings:
address_data[key] = getattr(address, key)
return jsonify({
"response": "success", "address_detail": address_data})
except Exception as e:
print str(e)
return jsonify({"response": "error"})
@member_bp.route('/sendquery', methods=['GET', 'POST'])
def sendQuery():
RequestObject = request.get_json()
print "hi"
request_user_query = RequestObject['user_query']
request_user_name = RequestObject['user_name']
request_user_mail = RequestObject['user_mail']
request_user_number = RequestObject['user_mobile_number']
if len(request_user_query) > 0 and len(request_user_name) > 0 and len(request_user_mail) > 0 and len(request_user_number) > 0 :
send_email2('<EMAIL>','New User Query',request_user_query)
return {"message":"success"}
else:
return {"message" : "form not filled"}
|
StarcoderdataPython
|
373633
|
from django.core.management.base import BaseCommand
from oxlos.newsletter.mail import send_newsletters
class Command(BaseCommand):
help = "Send newsletters."
def add_arguments(self, parser):
parser.add_argument(
"--dry-run",
action="store_true",
dest="dry_run",
default=False,
help="Test creation of newsletter emails (without sending)"
),
def handle(self, *args, **options):
dry_run = options["dry_run"]
print("sending newsletters...")
count = send_newsletters(dry_run=dry_run)
print(f"{count} sent.")
|
StarcoderdataPython
|
3305355
|
<reponame>vyahello/quotes
"""ASGI config for manager project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
from django.core.handlers.asgi import ASGIHandler
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "manager.settings")
application: ASGIHandler = get_asgi_application()
|
StarcoderdataPython
|
3337365
|
import os
import sys
import argparse
import importlib
import numpy as np
import tensorflow as tf
import pdb
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '..'))
sys.path.append(os.path.join(BASE_DIR, '../utils'))
import provider
import provider_riconv
import tf_util
import pdb
import time
import scipy
import re
import pickle
import gc
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--model', default='rinet', help='Model name')
parser.add_argument('--load_dir', required=True, default='rinet')
parser.add_argument('--num_point', type=int, default=1024, help='Point Number [256/512/1024/2048] [default: 1024]')
parser.add_argument('--max_epoch', type=int, default=251, help='Epoch to run [default: 251]')
parser.add_argument('--batch_size', type=int, default=4, help='Batch Size during training [default: 32]')
parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]')
parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')
parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]')
parser.add_argument('--decay_step', type=int, default=200000, help='Decay step for lr decay [default: 200000]')
parser.add_argument('--decay_rate', type=float, default=0.7, help='Decay rate for lr decay [default: 0.7]')
parser.add_argument('--rotation', action='store_true', help='Whether to apply rotation during training [default: False]')
parser.add_argument('--finetune', action='store_true', help='Whether to finetune [default: False]')
parser.add_argument('--checkpoint', default='log/model.ckpt', help='Checkpoint directory to finetune [default: log/model.ckpt]')
parser.add_argument('--num_pool', type=int, default=256, help='Number of pooling [default: 64]')
parser.add_argument('--pool_knn1', type=int, default=64, help='Number of neighbors for lf [default: 128]')
parser.add_argument('--num_votes', type=int, default=12)
parser.add_argument('--so3', action='store_true', default=True, help='Whether training in SO3 setting')
parser.add_argument('--azi', action='store_true', help='Whether training in azimuthal rotation')
FLAGS = parser.parse_args()
LOAD_DIR = FLAGS.load_dir
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = FLAGS.num_point
MAX_EPOCH = FLAGS.max_epoch
BASE_LEARNING_RATE = FLAGS.learning_rate
GPU_INDEX = FLAGS.gpu
MOMENTUM = FLAGS.momentum
OPTIMIZER = FLAGS.optimizer
DECAY_STEP = FLAGS.decay_step
DECAY_RATE = FLAGS.decay_rate
FINETUNE = FLAGS.finetune
CHECKPOINT = FLAGS.checkpoint
NUM_VOTES = FLAGS.num_votes
sys.path.append(os.path.join(BASE_DIR, FLAGS.load_dir))
MODEL = importlib.import_module(FLAGS.model)
MODEL_FILE = os.path.join(BASE_DIR, 'models', FLAGS.model+'.py')
print(MODEL_FILE)
LOG_FOUT = open(os.path.join(LOAD_DIR, 'log_test.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
MAX_NUM_POINT = 2048
NUM_CLASSES = 40
BN_INIT_DECAY = 0.5
BN_DECAY_DECAY_RATE = 0.5
BN_DECAY_DECAY_STEP = float(DECAY_STEP)
BN_DECAY_CLIP = 0.99
BASE_REG_WEIGHT = 0.001
REG_WEIGHT_DECAY_RATE = 0.5
REG_WEIGHT_DECAY_STEP = float(DECAY_STEP)
# ModelNet40 official train/test split
TRAIN_FILES = provider.getDataFiles(
os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/train_files.txt'))
TEST_FILES = provider.getDataFiles(
os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/test_files.txt'))
# Load data beforehand
KEYS = ['data', 'label']
TRAIN_DATA, TRAIN_LABEL = \
zip(*[provider.loadDataFile_with_keys(fn, KEYS) for fn in TRAIN_FILES])
TEST_DATA, TEST_LABEL = \
zip(*[provider.loadDataFile_with_keys(fn, KEYS) for fn in TEST_FILES])
# concatenate batches
TRAIN_DATA = np.concatenate(TRAIN_DATA, axis=0)
TRAIN_LABEL = np.squeeze(np.concatenate(TRAIN_LABEL, axis=0))
TEST_DATA = np.concatenate(TEST_DATA, axis=0)
TEST_LABEL = np.squeeze(np.concatenate(TEST_LABEL, axis=0))
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def get_learning_rate(batch):
learning_rate = tf.train.exponential_decay(
BASE_LEARNING_RATE, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
DECAY_STEP, # Decay step.
DECAY_RATE, # Decay rate.
staircase=True)
learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!
return learning_rate
def get_bn_decay(batch):
bn_momentum = tf.train.exponential_decay(
BN_INIT_DECAY,
batch*BATCH_SIZE,
BN_DECAY_DECAY_STEP,
BN_DECAY_DECAY_RATE,
staircase=True)
bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)
return bn_decay
def get_reg_weight(batch):
reg_weight = tf.train.exponential_decay(
BASE_REG_WEIGHT,
batch * BATCH_SIZE,
REG_WEIGHT_DECAY_STEP,
REG_WEIGHT_DECAY_RATE,
staircase=False)
reg_weight = tf.maximum(reg_weight, 0.00001)
return reg_weight
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [atoi(c) for c in re.split('(\d+)', text)]
def train():
with tf.Graph().as_default():
with tf.device('/gpu:'+str(GPU_INDEX)):
pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, 1024)
input_graph = tf.placeholder(tf.float32, shape = (BATCH_SIZE, NUM_POINT, NUM_POINT))
is_training_pl = tf.placeholder(tf.bool, shape=())
flag_pl = tf.placeholder(tf.int32, shape=())
flag1 = tf.placeholder(tf.int32, shape=())
flag2 = tf.placeholder(tf.int32, shape=())
flag3 = tf.placeholder(tf.int32, shape=())
dilation = tf.placeholder(tf.int32, shape=())
gcn1 = tf.placeholder(tf.int32, shape=())
gcn2 = tf.placeholder(tf.int32, shape=())
gcn3 = tf.placeholder(tf.int32, shape=())
print(is_training_pl)
# Note the global_step=batch parameter to minimize.
# That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains.
batch = tf.Variable(0, trainable=False)
bn_decay = get_bn_decay(batch)
tf.summary.scalar('bn_decay', bn_decay)
# Get model and loss
pred, end_points = MODEL.get_model(pointclouds_pl, FLAGS.num_pool, FLAGS.pool_knn1,
is_training_pl, bn_decay=bn_decay, flag=flag_pl, flag2=flag2, flag3=flag3,gcn1=gcn1, gcn2=gcn2, gcn3=gcn3, dilation=dilation)
reg_weight = get_reg_weight(batch)
loss = MODEL.get_loss(pred, labels_pl, end_points)
tf.summary.scalar('loss', loss)
correct = tf.equal(tf.argmax(pred, 1), tf.to_int64(labels_pl))
accuracy = tf.reduce_sum(tf.cast(correct, tf.float32)) / float(BATCH_SIZE)
tf.summary.scalar('accuracy', accuracy)
# Get training operator
learning_rate = get_learning_rate(batch)
tf.summary.scalar('learning_rate', learning_rate)
if OPTIMIZER == 'momentum':
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)
elif OPTIMIZER == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate)
if FINETUNE:
"""THIS IS NOT WORKING CURRENTLY"""
finetune_var_names = ['fc1', 'fc2', 'fc3']
finetuning_vars = [v for v in tf.trainable_variables() if v.name.split('/')[0] in finetune_var_names]
orig_vars = [v for v in tf.trainable_variables() if v.name.split('/')[0] not in finetune_var_names]
gvs = optimizer.compute_gradients(loss, [orig_vars, finetuning_vars])
scaled_gvs = [(grad * 0.1, var) for (grad, var) in gvs[:len(orig_vars)]] + gvs[len(orig_vars):]
train_op = optimizer.apply_gradients(scaled_gvs, global_step=batch)
else:
gvs = optimizer.compute_gradients(loss)
train_op = optimizer.apply_gradients(gvs, global_step=batch)
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
# Load parameters before finetuning
if FINETUNE:
variables_to_restore = [v for v in tf.all_variables() if 'rel' not in v.name.split('/')[0]]
variables_to_restore = [v for v in variables_to_restore if not v.name == 'batch']
pre_saver = tf.train.Saver(variables_to_restore)
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess = tf.Session(config=config)
saver.restore(sess, LOAD_DIR + '/model.ckpt')
# Add summary writers
#merged = tf.merge_all_summaries()
merged = tf.summary.merge_all()
test_writer = tf.summary.FileWriter(os.path.join(LOAD_DIR, 'test'))
# Init variables
init = tf.global_variables_initializer()
ops = {'pointclouds_pl': pointclouds_pl,
'labels_pl': labels_pl,
'is_training_pl': is_training_pl,
'pred': pred,
'loss': loss,
'gvs': gvs,
'train_op': train_op,
'merged': merged,
'step': batch,
'flag': flag_pl,
'flag2': flag2,
'flag3': flag3,
'dilation' : dilation,
'gcn1' : gcn1,
'gcn2' : gcn2,
'gcn3' : gcn3
}
acc, cls_avg = eval_one_epoch(sess, ops, test_writer, NUM_VOTES)
print('Overall accuracy: ', acc)
def eval_one_epoch(sess, ops, test_writer, num_votes):
""" ops: dict mapping from string to tf ops """
is_training = False
current_data = TEST_DATA[:, 0:NUM_POINT, :]
current_label = TEST_LABEL
file_size = current_data.shape[0]
num_batches = file_size // BATCH_SIZE
pred_conf = np.zeros((0, 40))
total_correct = 0
total_seen = 0
loss_sum = 0
total_seen_class = [0 for _ in range(NUM_CLASSES)]
total_correct_class = [0 for _ in range(NUM_CLASSES)]
shape_txt = open('data/modelnet40_ply_hdf5_2048/shape_names.txt', 'r')
label_to_class = shape_txt.read().split('\n')
flag1 = 64
flag2 = 32
flag3 = 16
gcn1 = 16
gcn2 = 8
gcn3 = 4
log_string('----------------')
for batch_idx in range(num_batches):
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx + 1 ) * BATCH_SIZE
batch_pred_sum = np.zeros((BATCH_SIZE, NUM_CLASSES))
for vote_idx in range(NUM_VOTES):
shuffle = np.arange(NUM_POINT)
np.random.shuffle(shuffle)
rot_data = provider_riconv.so3_rotate(current_data[start_idx:end_idx, shuffle, :])
feed_dict = {
ops['pointclouds_pl']: rot_data,
ops['labels_pl']: current_label[start_idx:end_idx],
ops['is_training_pl']: is_training,
ops['flag'] : flag1,
ops['flag2'] : flag2,
ops['flag3'] : flag3,
ops['dilation'] : 3,
ops['gcn1'] : gcn1,
ops['gcn2'] : gcn2,
ops['gcn3'] : gcn3
}
summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
ops['loss'], ops['pred']], feed_dict=feed_dict)
batch_pred_sum += pred_val
pred_conf = np.argmax(batch_pred_sum, 1)
test_writer.add_summary(summary, step)
correct = np.sum(pred_conf == current_label[start_idx:end_idx])
total_correct += correct
total_seen += BATCH_SIZE
loss_sum += (loss_val*BATCH_SIZE)
for i in range(start_idx, end_idx):
l = current_label[i]
total_seen_class[l] += 1
total_correct_class[l] += (pred_conf[i-start_idx] == l)
# Handle remaining
if file_size - num_batches * BATCH_SIZE > 0:
start_idx = num_batches * BATCH_SIZE
end_idx = file_size
input_data = np.zeros((BATCH_SIZE, 1024, 3))
input_label = np.zeros(BATCH_SIZE)
input_label[0:end_idx-start_idx] = current_label[start_idx:end_idx]
batch_pred_sum = np.zeros((BATCH_SIZE, NUM_CLASSES))
for vote_idx in range(NUM_VOTES):
shuffle = np.arange(NUM_POINT)
np.random.shuffle(shuffle)
input_data[0:end_idx - start_idx, ...] = provider_riconv.so3_rotate(current_data[start_idx:end_idx, 0:NUM_POINT, :])
feed_dict = {
ops['pointclouds_pl']: input_data,
ops['labels_pl']: input_label,
ops['is_training_pl']: is_training,
ops['flag'] : flag1,
ops['flag2'] : flag2,
ops['flag3'] : flag3,
ops['dilation'] : 3,
ops['gcn1'] : gcn1,
ops['gcn2'] : gcn2,
ops['gcn3'] : gcn3
}
summary, step, loss_val, pred_val= sess.run([ops['merged'], ops['step'],
ops['loss'], ops['pred']], feed_dict=feed_dict)
batch_pred_sum += pred_val
pred_conf = np.argmax(batch_pred_sum, 1)
test_writer.add_summary(summary, step)
pred_val = np.argmax(pred_val, axis=1)
correct = np.sum(pred_conf[0:end_idx-start_idx] == current_label[start_idx:end_idx])
total_correct += correct
total_seen += end_idx - start_idx
loss_sum += (loss_val * (end_idx - start_idx))
for i in range(start_idx, end_idx):
l = current_label[i]
total_seen_class[l] += 1
total_correct_class[l] += (pred_conf[i - start_idx] == l)
return (total_correct / float(total_seen)), np.mean(np.array(total_correct_class) / np.array(total_seen_class, dtype=np.float))
if __name__ == "__main__":
start_time = time.time()
train()
end_time = time.time()
LOG_FOUT.close()
|
StarcoderdataPython
|
3431074
|
<reponame>wanchaosoft/commontools
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
__project__ = 'commontools'
__author__ = 'wanchao'
__create_time__ = '2017/12/11 16:42'
"""
from datetime import datetime
import os
import sys
NOW = None
def print_time(msg=None):
"""打印时间"""
global NOW
if not NOW:
NOW = datetime.now()
if msg is None:
msg = ''
print('%s cost time: %s' % (msg, (datetime.now() - NOW).total_seconds()))
def debugger():
"""在启动debug模式下,启动pdb调试
生产环境DEBUG=False,自动禁用pdb调试
"""
try:
debug = eval(os.environ.get('DEBUG'))
except:
debug = False
if debug:
import pdb
# pdb.set_trace()
_pdb = pdb.Pdb()
_pdb.set_trace(sys._getframe().f_back)
# TODO profile, pstats, hotshot, timeit 用来做性能分析
# profile 性能分析
# pstats 对profile分写结果输出到的文件 进行排序
# hotshot 补充profile时间不精确的问题(毫秒级别),但是不适用于多线程的代码
# timeit小巧实用
|
StarcoderdataPython
|
9781835
|
import pickle
import queue
import time
from pathlib import Path
import numpy as np
from analysis_util import get_numpy_array, BaseFeatureHelper, \
get_sample_feature_value
from util.constants import NEWS_ROOT_NODE, RETWEET_EDGE, REPLY_EDGE, RETWEET_NODE, REPLY_NODE
from util.util import tweet_node
def get_post_tweet_deepest_cascade(prop_graph: tweet_node, edge_type=RETWEET_EDGE):
max_height = 0
max_height_node = None
for node in prop_graph.children:
height = get_tree_height(node, edge_type)
if height > max_height:
max_height = height
max_height_node = node
return max_height_node, max_height
def get_num_cascade(node: tweet_node, edge_type="retweet"):
if edge_type == "retweet":
return len(node.retweet_children)
elif edge_type == "reply":
return len(node.reply_children)
else:
return len(node.children)
def get_temp_num_cascade(node: tweet_node, edge_type="retweet", max_time=time.time()):
if edge_type == "retweet":
children = node.retweet_children
elif edge_type == "reply":
children = node.reply_children
else:
children = node.children
cascade_count = 0
for child in children:
if child.created_time <= max_time:
cascade_count += 1
return cascade_count
def get_node_count_deepest_cascade(news_graphs: tweet_node, edge_type):
node_counts = []
for prop_graph in news_graphs:
max_height_node, max_height = get_post_tweet_deepest_cascade(prop_graph)
node_counts.append(get_nodes_count(max_height_node, edge_type))
return node_counts
def get_max_outdegree(node: tweet_node, edge_type="retweet"):
if node is None:
return 0
if edge_type == "retweet":
children = node.retweet_children
elif edge_type == "reply":
children = node.reply_children
else:
children = node.children
if node.node_type == NEWS_ROOT_NODE:
max_outdegree = 0
else:
max_outdegree = len(children)
for child in children:
max_outdegree = max(max_outdegree, get_max_outdegree(child, edge_type))
return max_outdegree
def get_max_out_degree_node(node: tweet_node, edge_type=RETWEET_EDGE):
if node is None:
return None
if edge_type == "retweet":
children = node.retweet_children
elif edge_type == "reply":
children = node.reply_children
else:
children = node.children
if node.node_type == NEWS_ROOT_NODE:
max_outdegree_node, max_out_degree = None, 0
else:
max_outdegree_node, max_out_degree = node, len(children)
for child in children:
child_max_out_degree_node, child_max_out_degree = get_max_out_degree_node(child, edge_type)
if child_max_out_degree > max_out_degree:
max_out_degree = child_max_out_degree
max_outdegree_node = child_max_out_degree_node
return max_outdegree_node, max_out_degree
def get_target_node_level(root_node: tweet_node, target_node, level=0):
if root_node is None:
return 0
if root_node.tweet_id == target_node.tweet_id:
return level
for child in root_node.children:
res_level = get_target_node_level(child, target_node, level + 1)
if res_level != 0:
return res_level
return 0
def get_depth_of_max_degree_node(prop_graph, edge_type=RETWEET_EDGE):
max_out_degree_node, max_out_degree = get_max_out_degree_node(prop_graph, edge_type)
if max_out_degree_node is None:
return 0
return get_target_node_level(prop_graph, max_out_degree_node, 0)
def get_max_out_degree_depths(prop_graphs, edge_type=RETWEET_EDGE):
out_degree_depths = []
for news_node in prop_graphs:
out_degree_depths.append(get_depth_of_max_degree_node(news_node, edge_type))
return out_degree_depths
def get_tree_height(node, edge_type="retweet"):
if node is None:
return 0
max_child_height = 0
if edge_type == "retweet":
children = node.retweet_children
elif edge_type == "reply":
children = node.reply_children
else:
children = node.children
for child in children:
max_child_height = max(max_child_height, get_tree_height(child, edge_type))
return max_child_height + 1
def get_nodes_count(node: tweet_node, edge_type="retweet"):
if node is None:
return 0
node_count = 0
if edge_type == "retweet":
children = node.retweet_children
elif edge_type == "reply":
children = node.reply_children
else:
children = node.children
for child in children:
node_count += get_nodes_count(child, edge_type)
return node_count + 1
def get_temporal_nodes_count(node: tweet_node, edge_type="retweet", max_time=time.time()):
if node is None or (node.created_time is not None and node.created_time > max_time):
return 0
node_count = 0
if edge_type == "retweet":
children = node.retweet_children
elif edge_type == "reply":
children = node.reply_children
else:
children = node.children
for child in children:
node_count += get_temporal_nodes_count(child, edge_type, max_time)
return node_count + 1
def get_node_size_by_time(prop_graphs: list, edge_type: str, time_interval_sec: list):
temporal_tree_node_size = []
for news_node in prop_graphs:
temp_node_sizes = []
first_post_time = get_first_post_time(news_node)
for time_limit in time_interval_sec:
node_count = get_temporal_nodes_count(news_node, edge_type, first_post_time + time_limit)
temp_node_sizes.append(node_count)
temporal_tree_node_size.append(temp_node_sizes)
return temporal_tree_node_size
def get_temporal_tree_height(node: tweet_node, edge_type="retweet", max_time=time.time()):
if node is None or (node.created_time is not None and node.created_time > max_time):
return 0
max_child_height = 0
if edge_type == "retweet":
children = node.retweet_children
elif edge_type == "reply":
children = node.reply_children
else:
children = node.children
for child in children:
max_child_height = max(max_child_height, get_temporal_tree_height(child, edge_type, max_time))
return max_child_height + 1
def get_num_cascades_by_time(prop_graphs: list, edge_type: str, time_interval_sec: list):
temporal_num_cascades = []
for news_node in prop_graphs:
temp_cascade_num = []
first_post_time = get_first_post_time(news_node)
for time_limit in time_interval_sec:
node_count = get_temp_num_cascade(news_node, edge_type, first_post_time + time_limit)
temp_cascade_num.append(node_count)
temporal_num_cascades.append(temp_cascade_num)
return temporal_num_cascades
def get_tree_heights(news_graphs: list, edge_type):
heights = []
for news_node in news_graphs:
heights.append(get_tree_height(news_node, edge_type))
return heights
def analyze_height(news_graphs: list, edge_type):
heights = get_tree_heights(news_graphs, edge_type)
print("----HEIGHT-----")
print("max", max(heights))
print("min", min(heights))
print("avg", np.mean(heights))
def get_max_outdegrees(news_graphs: list, edge_type):
max_outdegrees = []
for news_node in news_graphs:
max_outdegrees.append(get_max_outdegree(news_node, edge_type))
return max_outdegrees
def analyze_max_outdegree(news_graphs: list, edge_type):
max_outdegrees = get_max_outdegrees(news_graphs, edge_type)
print("-----MAX - OUT DEGREE -----")
print("max", max(max_outdegrees))
print("min", min(max_outdegrees))
print("avg", np.mean(max_outdegrees))
def get_prop_graps_cascade_num(news_graphs: list, edge_type):
cascade_num = []
for news_node in news_graphs:
cascade_num.append(get_num_cascade(news_node, edge_type))
return cascade_num
def analyze_cascade(news_graphs: list, edge_type):
cascade_num = get_prop_graps_cascade_num(news_graphs, edge_type)
print("-----CASCADE-----")
print("max", max(cascade_num))
print("min", min(cascade_num))
print("avg", np.mean(cascade_num))
def get_prop_graphs_node_counts(news_graphs: list, edge_type):
node_counts = []
for news_node in news_graphs:
node_counts.append(get_nodes_count(news_node, edge_type))
return node_counts
def analyze_node_count(news_graphs: list, edge_type):
node_counts = get_prop_graphs_node_counts(news_graphs, edge_type)
print("----NODE SIZE-----")
print("max", max(node_counts))
print("min", min(node_counts))
print("avg", np.mean(node_counts))
def get_height_by_time(prop_graphs: list, edge_type: str, time_interval_sec: list):
temporal_tree_height = []
for news_node in prop_graphs:
temp_heights = []
first_post_time = get_first_post_time(news_node)
for time_limit in time_interval_sec:
height = get_temporal_tree_height(news_node, edge_type, first_post_time + time_limit)
temp_heights.append(height)
temporal_tree_height.append(temp_heights)
return temporal_tree_height
def analyze_height_by_time(prop_graphs: list, edge_type: str, time_interval_sec: list):
temporal_tree_height = get_height_by_time(prop_graphs, edge_type, time_interval_sec)
temporal_tree_height = np.array([np.array(val) for val in temporal_tree_height])
for idx, time_limit_sec in enumerate(time_interval_sec):
heights_at_time = temporal_tree_height[:, idx]
print("Time limit: {}".format(time_limit_sec))
print("Min height : {}".format(np.min(heights_at_time)))
print("Max height : {}".format(np.max(heights_at_time)))
print("Mean height : {}".format(np.mean(heights_at_time)))
print(flush=True)
def analyze_cascade_num_by_time(prop_graphs: list, edge_type: str, time_interval_sec: list):
temporal_cascade_num = get_num_cascades_by_time(prop_graphs, edge_type, time_interval_sec)
temporal_cascade_num = np.array([np.array(val) for val in temporal_cascade_num])
for idx, time_limit_sec in enumerate(time_interval_sec):
heights_at_time = temporal_cascade_num[:, idx]
print("Time limit: {}".format(time_limit_sec))
print("Min num cascade : {}".format(np.min(heights_at_time)))
print("Max num cascade : {}".format(np.max(heights_at_time)))
print("Mean num cascade : {}".format(np.mean(heights_at_time)))
print(flush=True)
def analyze_node_size_by_time(prop_graphs: list, edge_type: str, time_interval_sec: list):
temporal_tree_node_sizes = get_node_size_by_time(prop_graphs, edge_type, time_interval_sec)
temporal_tree_node_sizes = np.array([np.array(val) for val in temporal_tree_node_sizes])
for idx, time_limit_sec in enumerate(time_interval_sec):
heights_at_time = temporal_tree_node_sizes[:, idx]
print("Time limit: {}".format(time_limit_sec))
print("Min node size : {}".format(np.min(heights_at_time)))
print("Max node size : {}".format(np.max(heights_at_time)))
print("Mean node size : {}".format(np.mean(heights_at_time)))
print(flush=True)
def get_first_post_time(node: tweet_node):
first_post_time = time.time()
for child in node.children:
first_post_time = min(first_post_time, child.created_time)
return first_post_time
def get_num_of_cascades_with_retweets(root_node: tweet_node):
num_cascades = 0
for node in root_node.retweet_children:
if len(node.retweet_children) > 0:
num_cascades += 1
return num_cascades
def get_prop_graphs_num_of_cascades_with_retweets(prop_graphs, edge_type=RETWEET_EDGE):
return get_sample_feature_value(prop_graphs, get_num_of_cascades_with_retweets)
def get_fraction_of_cascades_with_retweets(root_node: tweet_node):
total_cascades = len(root_node.retweet_children)
cascade_with_retweet = 0
for node in root_node.retweet_children:
if len(node.retweet_children) > 0:
cascade_with_retweet += 1
return cascade_with_retweet / total_cascades
def get_prop_graphs_fraction_of_cascades_with_retweets(prop_graphs, edge_type=RETWEET_EDGE):
return get_sample_feature_value(prop_graphs, get_fraction_of_cascades_with_retweets)
def get_num_of_cascades_with_replies(root_node: tweet_node):
num_cascades = 0
for node in root_node.reply_children:
if len(node.reply_children) > 0:
num_cascades += 1
return num_cascades
def get_prop_graphs_num_of_cascades_with_replies(prop_graphs, edge_type=RETWEET_EDGE):
return get_sample_feature_value(prop_graphs, get_num_of_cascades_with_replies)
def get_fraction_of_cascades_with_replies(root_node: tweet_node):
total_cascades = len(root_node.reply_children)
cascade_with_replies = 0
for node in root_node.reply_children:
if len(node.reply_children) > 0:
cascade_with_replies += 1
return cascade_with_replies / total_cascades
def get_users_in_network(prop_graph: tweet_node, edge_type=None):
q = queue.Queue()
q.put(prop_graph)
users_list = list()
while q.qsize() != 0:
node = q.get()
if edge_type == RETWEET_EDGE:
children = node.retweet_children
elif edge_type == REPLY_EDGE:
children = node.reply_children
else:
children = node.children
for child in children:
q.put(child)
if child.user_id is not None:
users_list.append(child.user_id)
return users_list
def get_users_replying_in_prop_graph(prop_graph: tweet_node):
q = queue.Queue()
q.put(prop_graph)
users_list = list()
while q.qsize() != 0:
node = q.get()
for child in node.reply_children:
q.put(child)
if child.node_type == REPLY_NODE and child.user_id is not None:
users_list.append(child.user_id)
return users_list
def get_users_retweeting_in_prop_graph(prop_graph: tweet_node):
q = queue.Queue()
q.put(prop_graph)
users_list = list()
while q.qsize() != 0:
node = q.get()
for child in node.retweet_children:
q.put(child)
if child.node_type == RETWEET_NODE and child.user_id is not None:
users_list.append(child.user_id)
return users_list
def get_user_names_retweeting_in_prop_graph(prop_graph: tweet_node):
q = queue.Queue()
q.put(prop_graph)
users_list = list()
while q.qsize() != 0:
node = q.get()
for child in node.retweet_children:
q.put(child)
if child.node_type == RETWEET_NODE and child.user_name is not None:
users_list.append(child.user_name)
return users_list
def get_num_user_retweet_and_reply(prop_graph: tweet_node):
retweet_users = set(get_users_retweeting_in_prop_graph(prop_graph))
replying_users = set(get_users_replying_in_prop_graph(prop_graph))
return len(retweet_users.intersection(replying_users))
def get_ratio_of_retweet_to_reply(prop_graph: tweet_node):
retweet_users = set(get_users_retweeting_in_prop_graph(prop_graph))
replying_users = set(get_users_replying_in_prop_graph(prop_graph))
return (len(retweet_users) + 1) / (len(replying_users) + 1)
def get_prop_graphs_num_user_retweet_and_reply(prop_graphs, edge_type=None):
return get_sample_feature_value(prop_graphs, get_num_user_retweet_and_reply)
def get_prop_graphs_ratio_of_retweet_to_reply(prop_graphs, edge_type=None):
return get_sample_feature_value(prop_graphs, get_ratio_of_retweet_to_reply)
def get_unique_users_in_graph(prop_graph: tweet_node, edge_type=None):
user_list = get_users_in_network(prop_graph, edge_type)
return len(set(user_list))
def get_fraction_of_unique_users(prop_graph: tweet_node, edge_type=None):
user_list = get_users_in_network(prop_graph, edge_type)
try:
return len(set(user_list)) / len(user_list)
except:
print("Exception in fraction of unique users")
return 0
def get_num_bot_users(prop_graph: tweet_node):
q = queue.Queue()
q.put(prop_graph)
num_bot_users = 0
while q.qsize() != 0:
node = q.get()
for child in node.retweet_children:
q.put(child)
if child.node_type == RETWEET_NODE and child.user_id is not None:
if child.botometer_score and child.botometer_score > 0.5:
num_bot_users += 1
return num_bot_users
def get_fraction_of_bot_users_retweeting(prop_graph: tweet_node):
q = queue.Queue()
q.put(prop_graph)
num_bot_users = 1
num_human_users = 1
while q.qsize() != 0:
node = q.get()
for child in node.retweet_children:
q.put(child)
if child.node_type == RETWEET_NODE and child.user_id is not None:
if child.botometer_score:
if child.botometer_score > 0.5:
num_bot_users += 1
else:
num_human_users += 1
return num_bot_users / (num_human_users + num_bot_users)
def get_prop_graphs_num_bot_users_retweeting(prop_graphs: tweet_node, edge_type=None):
global user_id_bot_score_dict
return get_sample_feature_value(prop_graphs, get_num_bot_users)
def get_prop_graphs_fraction_of_bot_users_retweeting(prop_graphs: tweet_node, edge_type=None):
return get_sample_feature_value(prop_graphs, get_fraction_of_bot_users_retweeting)
def get_breadth_at_each_level(prop_graph, edge_type=RETWEET_EDGE):
q1 = queue.Queue()
q2 = queue.Queue()
q1.put(prop_graph)
level_breadths = []
while q1.qsize() != 0 or q2.qsize() != 0:
if q1.qsize() != 0:
level_breadths.append(q1.qsize())
while q1.qsize() != 0:
node = q1.get()
if edge_type == RETWEET_EDGE:
children = node.retweet_children
elif edge_type == REPLY_EDGE:
children = node.reply_children
else:
children = node.children
for child in children:
q2.put(child)
if q2.qsize() != 0:
level_breadths.append(q2.qsize())
while q2.qsize() != 0:
node = q2.get()
if edge_type == RETWEET_EDGE:
children = node.retweet_children
elif edge_type == REPLY_EDGE:
children = node.reply_children
else:
children = node.children
for child in children:
q1.put(child)
return max(level_breadths)
def get_prop_graphs_max_breadth(prop_graphs, edge_type=RETWEET_EDGE):
return get_sample_feature_value(prop_graphs, get_breadth_at_each_level)
def get_prop_graphs_num_unique_users(prop_graphs, edge_type=RETWEET_EDGE):
unique_users_cnts = []
for graph in prop_graphs:
unique_users_cnts.append(get_unique_users_in_graph(graph, edge_type))
return unique_users_cnts
def get_prop_graphs_fraction_of_unique_users(prop_graphs, edge_type=RETWEET_EDGE):
unique_users_fract_cnts = []
for graph in prop_graphs:
unique_users_fract_cnts.append(get_fraction_of_unique_users(graph, edge_type))
return unique_users_fract_cnts
def get_prop_graphs_fraction_of_cascades_with_replies(prop_graphs, edge_type=RETWEET_EDGE):
return get_sample_feature_value(prop_graphs, get_fraction_of_cascades_with_replies)
def get_prop_graphs_min_time_to_reach_level_1(news_graphs: list, edge_type=None):
return get_sample_feature_value(news_graphs, get_min_time_to_reach_level_1)
def get_prop_graphs_min_time_to_reach_level_2(news_graphs: list, edge_type=None):
return get_sample_feature_value(news_graphs, get_min_time_to_reach_level_2)
def get_min_time_to_reach_level_1(new_graph: tweet_node):
return get_min_time_to_reach_level(new_graph, 1)
def get_min_time_to_reach_level_2(news_graph: tweet_node):
return get_min_time_to_reach_level(news_graph, 2)
def get_min_time_to_reach_level(new_graph: tweet_node, target_depth):
time_to_reach_depth = []
for post_node in new_graph.retweet_children:
post_time = post_node.created_time
level_node_times = dfs_traverse(post_node, 0, target_depth)
if len(level_node_times) > 0:
time_to_reach_depth.append(min(level_node_times) - post_time)
if len(time_to_reach_depth) > 0:
return np.mean(time_to_reach_depth)
else:
return 0
def get_unique_users_untill_level(new_graph: tweet_node, target_depth):
dfs_traverse_get_users(new_graph, target_depth)
def dfs_traverse(node: tweet_node, level: int, target: int):
result = []
if level == target:
return [node.created_time]
elif level > target:
return None
else:
for child in node.retweet_children:
level_nodes = dfs_traverse(child, level + 1, target)
if level_nodes:
result.extend(level_nodes)
return result
def get_num_unique_users_under_level_2(node: tweet_node, edge_type=None):
return len(dfs_traverse_get_users(node, 0, 2))
def get_num_unique_users_under_level_4(node: tweet_node, edge_type=None):
return len(dfs_traverse_get_users(node, 0, 4))
def get_prop_graphs_num_unique_user_under_level_2(prop_graphs, edge_type=RETWEET_EDGE):
return get_sample_feature_value(prop_graphs, get_num_unique_users_under_level_2)
def get_prop_graphs_num_unique_user_under_level_4(prop_graphs, edge_type=RETWEET_EDGE):
return get_sample_feature_value(prop_graphs, get_num_unique_users_under_level_4)
def dfs_traverse_get_users(node: tweet_node, level: int, target: int):
result = list()
if level > target:
return None
else:
result.append(node.user_id)
for child in node.retweet_children:
level_nodes = dfs_traverse(child, level + 1, target)
if level_nodes:
result.extend(level_nodes)
return result
def get_all_structural_features(news_graphs, micro_features, macro_features):
all_features = []
target_edge_type = RETWEET_EDGE
if macro_features:
retweet_function_references = [get_tree_heights, get_prop_graphs_node_counts, get_prop_graps_cascade_num,
get_max_outdegrees, get_num_of_cascades_with_retweets,
get_fraction_of_cascades_with_retweets]
for function_ref in retweet_function_references:
features = function_ref(news_graphs, target_edge_type)
all_features.append(features)
if micro_features:
target_edge_type = REPLY_EDGE
reply_function_references = [get_tree_heights, get_prop_graphs_node_counts, get_max_outdegrees]
for function_ref in reply_function_references:
features = function_ref(news_graphs, target_edge_type)
all_features.append(features)
return np.transpose(get_numpy_array(all_features))
class StructureFeatureHelper(BaseFeatureHelper):
def get_feature_group_name(self):
return "struct"
def get_micro_feature_method_references(self):
method_refs = [get_tree_heights, get_prop_graphs_node_counts, get_max_outdegrees,
get_prop_graphs_num_of_cascades_with_replies,
get_prop_graphs_fraction_of_cascades_with_replies]
return method_refs
def get_micro_feature_method_names(self):
feature_names = ["Micro - Tree depth", "Micro - No of nodes", "Micro - Maximum out degree",
"No. of cascades with replies", "Fraction of cascades with replies"]
return feature_names
def get_micro_feature_short_names(self):
feature_names = ["S10", "S11", "S12", "S13", "S14"]
return feature_names
def get_macro_feature_method_references(self):
method_refs = [get_tree_heights, get_prop_graphs_node_counts, get_max_outdegrees, get_prop_graps_cascade_num,
get_max_out_degree_depths,
get_prop_graphs_num_of_cascades_with_retweets,
get_prop_graphs_fraction_of_cascades_with_retweets,
get_prop_graphs_num_bot_users_retweeting,
get_prop_graphs_fraction_of_bot_users_retweeting,
]
return method_refs
def get_macro_feature_method_names(self):
feature_names = ["Macro - Tree depth",
"Macro - No of nodes",
"Macro - Maximum out degree",
"Macro - No of cascades",
"Macro - Max out degree node's level",
"No. of cascades with retweets",
"Fraction of cascades with retweets",
"No. of bot users retweeting",
"Fraction of bot user retweeting"]
return feature_names
feature_names = []
def get_macro_feature_short_names(self):
feature_names = ["S1", "S2", "S3", "S4", "S5", "S6", "S7", "S8", "S9"]
return feature_names
def get_features_array(self, prop_graphs, micro_features, macro_features, news_source=None, label=None,
file_dir="/content/FakeNewsPropagation/data/features", use_cache=False):
all_features = []
file_name = self.get_dump_file_name(news_source, micro_features, macro_features, label, file_dir)
data_file = Path(file_name)
if use_cache and data_file.is_file():
return pickle.load(open(file_name, "rb"))
if micro_features:
target_edge_type = REPLY_EDGE
reply_function_references = self.get_micro_feature_method_references()
for function_ref in reply_function_references:
features = function_ref(prop_graphs, target_edge_type)
all_features.append(features)
if macro_features:
target_edge_type = RETWEET_EDGE
retweet_function_references = self.get_macro_feature_method_references()
for function_ref in retweet_function_references:
features = function_ref(prop_graphs, target_edge_type)
all_features.append(features)
feature_array = np.transpose(get_numpy_array(all_features))
pickle.dump(feature_array, open(file_name, "wb"))
return feature_array
|
StarcoderdataPython
|
327951
|
<filename>src/collective/solr/tests/test_contentlisting.py
from unittest import TestCase
from plone.app.contentlisting.interfaces import IContentListingObject
from plone.uuid.interfaces import IUUID
from zope.interface.verify import verifyClass
from collective.solr.contentlisting import FlareContentListingObject
from collective.solr.flare import PloneFlare
from collective.solr.testing import LEGACY_COLLECTIVE_SOLR_INTEGRATION_TESTING
from DateTime import DateTime
class ContentListingTests(TestCase):
layer = LEGACY_COLLECTIVE_SOLR_INTEGRATION_TESTING
def setUp(self):
self.flare = FlareContentListingObject(
PloneFlare(
{
"getId": "foobar",
"path_string": "/plone/news",
"UID": "test-uid",
"getObjSize": 42,
"review_state": "published",
"listCreators": ["foo", "bar"],
"Creator": "Flare Creator",
"Title": "Flare Title",
"Description": "Flare Description",
"Subject": "Flare Subject",
"Date": "Flare Date",
"expires": DateTime("1.1.2099"),
"created": DateTime("31.12.1969"),
"modified": DateTime("27.07.2016"),
"Language": "de",
"portal_type": "NewsItem",
"Type": "Flare NewsItem",
}
)
)
def testInterfaceComplete(self):
self.assertTrue(verifyClass(IContentListingObject, FlareContentListingObject))
def test_getId(self):
self.assertEqual(self.flare.getId(), "foobar")
def test_getObject(self):
self.assertEqual(self.flare.getObject(), self.layer["portal"]["news"])
def test_getDataOrigin(self):
self.assertEqual(self.flare.getObject(), self.layer["portal"]["news"])
def test_getPath(self):
self.assertEqual(self.flare.getPath(), "/plone/news")
def test_getURL(self):
self.assertEqual(self.flare.getURL(False), "/plone/news")
self.assertEqual(self.flare.getURL(True), "/plone/news")
def test_uuid_key(self):
self.assertEqual(self.flare.uuid(), "test-uid")
def test_uuid_object(self):
del self.flare.flare["UID"]
self.assertEqual(self.flare.uuid(), IUUID(self.layer["portal"]["news"]))
def test_getSize(self):
self.assertEqual(self.flare.getSize(), 42)
def test_review_state(self):
self.assertEqual(self.flare.review_state(), "published")
def test_listCreators(self):
self.assertEqual(self.flare.listCreators(), ["foo", "bar"])
def test_Creator(self):
self.assertEqual(self.flare.Creator(), "Flare Creator")
def test_Subject(self):
self.assertEqual(self.flare.Subject(), "Flare Subject")
def test_Publisher(self):
self.assertRaises(NotImplementedError, self.flare.Publisher)
def test_listContributors(self):
self.assertRaises(NotImplementedError, self.flare.listContributors)
def test_Contributors(self):
self.assertRaises(NotImplementedError, self.flare.Contributors)
def test_Date(self):
self.assertEqual(self.flare.Date(), "Flare Date")
def test_CreationDate(self):
self.assertEqual(self.flare.CreationDate().ISO(), "1969-12-31T00:00:00")
def test_EffectiveDate(self):
self.assertEqual(
self.flare.EffectiveDate(), self.layer["portal"]["news"].EffectiveDate()
)
def test_ExpirationDate(self):
self.assertEqual(self.flare.ExpirationDate().ISO(), "2099-01-01T00:00:00")
def test_ModificationDate(self):
self.assertEqual(self.flare.ModificationDate().ISO(), "2016-07-27T00:00:00")
def test_Format(self):
self.assertRaises(NotImplementedError, self.flare.Format)
def test_Identifier(self):
self.assertEqual(self.flare.Identifier(), "/plone/news")
def test_Language(self):
self.assertEqual(self.flare.Language(), "de")
def test_Rights(self):
self.assertRaises(NotImplementedError, self.flare.Rights)
def test_Title(self):
self.assertEqual(self.flare.Title(), "Flare Title")
def test_Description(self):
self.assertEqual(self.flare.Description(), "Flare Description")
def test_Type(self):
self.assertEqual(self.flare.Type(), "Flare NewsItem")
def test_ContentTypeClass(self):
self.assertEqual(self.flare.ContentTypeClass(), "contenttype-newsitem")
def test_PortalType(self):
self.assertEqual(self.flare.PortalType(), "NewsItem")
def test_Author(self):
self.assertEqual(
self.flare.Author(),
{
"username": "Flare Creator",
"description": "",
"language": "",
"home_page": "/HOMEPAGEURL",
"location": "",
"fullname": "<NAME>",
},
)
def test_CroppedDescription(self):
self.assertEqual(self.flare.CroppedDescription(), "Flare Description")
def test_pretty_title(self):
self.assertEqual(self.flare.flare.pretty_title_or_id(), "Flare Title")
def test_creation_date(self):
self.assertTrue(self.flare.flare.CreationDate.startswith("1969-12-31T"))
def test_UID(self):
self.assertEqual(self.flare.UID, self.flare.flare.UID)
def test_portal_type(self):
self.assertEqual(self.flare.portal_type, self.flare.PortalType())
def test_modified(self):
self.assertEqual(self.flare.modified, self.flare.ModificationDate())
|
StarcoderdataPython
|
3496046
|
from flask import Blueprint
bp = Blueprint('git_class', __name__)
from app.giturl_class import routes
|
StarcoderdataPython
|
8075238
|
import random
'''
Random file mixer
This small piece of code mixes lines randomly from a given file.
It's important to have random dataset to train your machine learning algorithm.
But mostly the data comes from structured sources so that it is may not be random.
It reads data from a file named dataset.csv and prints on stdout.
If needed redirect stdout to a file to have the random data as a file.
# python mixer.py > mixed.csv
-simsek
'''
with open('dataset.csv','r') as source:
data = [ (random.random(), line) for line in source ]
data.sort()
for _, line in data:
print line.strip()
|
StarcoderdataPython
|
3537155
|
from typing import List, Optional
from typing import OrderedDict as OrderedDictType
from typing import Set, TypeVar
from text_selection.metrics_applied import get_rarity
from text_selection.utils import get_filtered_ngrams
_T1 = TypeVar("_T1")
_T2 = TypeVar("_T2")
def get_rarity_ngrams(data: OrderedDictType[int, List[str]], corpus: OrderedDictType[int, List[str]], n_gram: int, ignore_symbols: Optional[Set[str]]) -> OrderedDictType[int, float]:
data_ngrams = get_filtered_ngrams(data, n_gram, ignore_symbols)
corpus_ngrams = get_filtered_ngrams(corpus, n_gram, ignore_symbols)
return get_rarity(
data=data_ngrams,
corpus=corpus_ngrams,
)
|
StarcoderdataPython
|
3430987
|
from django.shortcuts import redirect, render
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
#my scripts
from .forms import CreateUserForm
import os
# Create your views here.
def registerPage(request):
if request.user.is_authenticated:
return render(request, 'index.html')
else:
form= CreateUserForm()
if request.method == 'POST':
form = CreateUserForm(request.POST)
if form.is_valid():
form.save()
user = form.cleaned_data.get('username')
messages.success(request, 'Fue creada tu cuenta '+user+' :)' )
return render(request, 'login.html')
else:
print ('no fue posible crear tu cuenta :(')
context={'form':form}
return render(request,'signup.html',context)
def loginPage(request):
if request.user.is_authenticated:
return render(request, 'index.html')
else:
if request.method=='POST':
user =authenticate(request,username=request.POST.get('username'),password=request.POST.get('password'))
if user is not None:
login(request,user)
return render(request, 'index.html')
else:
messages.info(request, 'Nombre de usuario o contraseña esta incorrecta')
context ={}
return render(request,'login.html',context)
def logoutPage(request):
logout(request)
return render(request, 'index.html')
|
StarcoderdataPython
|
3276448
|
#!/usr/bin/env python3
# https://codeforces.com/problemset/problem/1237/A
def f(l):
dl = [-1,1]
ii = 0
for i in range(len(l)):
if l[i]%2>0:
l[i] += dl[ii]
ii = (ii+1)%2
l[i] = l[i]//2
return l
n = int(input())
l = [int(input()) for _ in range(n)]
[print(r) for r in f(l)]
|
StarcoderdataPython
|
8077037
|
<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Kilt Troll """
import time
class KilnTrol(object):
""" KilnTrol Kiln Controller """
def __init__(self, temperature, heater, clock, target_profile, logger, tick_interval=5):
self.temperature = temperature
self.heater = heater
self.clock = clock
self.target_profile = target_profile
self.logger = logger
self.tick_interval = tick_interval
self.running = False
def run(self):
""" Start the run loop """
self.running = True
while self.running:
try:
self.tick()
time.sleep(self.tick_interval)
if self.target_profile.is_finished(self.clock.now()):
self.heater.off()
self.running = False
self.log_until(self.clock.now() * 1.5)
except KeyboardInterrupt:
self.running = False
def stop(self):
""" Stop the run loop """
self.running = False
def tick(self):
""" Check the current and desired temperature and turn the heater on or off as needed """
now = self.clock.now()
target_temperature = self.target_profile.temperature_at(now)
t = self.temperature.get()
if target_temperature > t:
self.heater.on()
else:
self.heater.off()
self.logger.log(now, t, target_temperature)
def log_until(self, t_stop):
while self.clock.now() < t_stop:
self.logger.log(self.clock.now(), self.temperature.get(), 0)
time.sleep(self.tick_interval)
def main():
""" Run KilnTrol """
import sys
if len(sys.argv) > 1 and sys.argv[1] == "sim":
from clocks import SpeedySimClock as Clock
from heater_sim import HeaterRelay, MAX31855, TICKS_PER_SECOND
tick_interval = 5 / TICKS_PER_SECOND
else:
from clocks import BasicClock as Clock
from max31855 import MAX31855
from heater import HeaterRelay
tick_interval = 5
from target_profile import TargetProfile
from loggers import FileLogger as Logger
from profiles import crystal_profile as target_profile
# from profiles import glaze_profile as target_profile
# from profiles import test_profile as target_profile
# from profiles import sample_profile as target_profile
temperature = MAX31855(cs_pin=27, clock_pin=22,
data_pin=17, units="f")
heater = HeaterRelay(relay_pin=26)
clock = Clock()
logger = Logger('logs/temperature')
target_profile = TargetProfile(target_profile)
kilntrol = KilnTrol(temperature, heater, clock,
target_profile, logger, tick_interval)
kilntrol.run()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
2173
|
<gh_stars>1000+
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import fnmatch
from io import StringIO
import json
import os
import shutil
import zipfile
import re
from datetime import datetime, timedelta, tzinfo
from distutils.util import strtobool
import boto3
import placebo
from botocore.response import StreamingBody
from placebo import pill
from c7n.testing import CustodianTestCore
from .constants import ACCOUNT_ID
# Custodian Test Account. This is used only for testing.
# Access is available for community project maintainers.
###########################################################################
# BEGIN PLACEBO MONKEY PATCH
#
# Placebo is effectively abandoned upstream, since mitch went back to work at AWS, irony...
# These monkeypatch patches represent fixes on trunk of that repo that have not been released
# into an extant version, we carry them here. We can drop this when this issue is resolved
#
# https://github.com/garnaat/placebo/issues/63
#
# License - Apache 2.0
# Copyright (c) 2015 <NAME>
class UTC(tzinfo):
"""UTC"""
def utcoffset(self, dt):
return timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return timedelta(0)
utc = UTC()
def deserialize(obj):
"""Convert JSON dicts back into objects."""
# Be careful of shallow copy here
target = dict(obj)
class_name = None
if "__class__" in target:
class_name = target.pop("__class__")
if "__module__" in obj:
obj.pop("__module__")
# Use getattr(module, class_name) for custom types if needed
if class_name == "datetime":
return datetime(tzinfo=utc, **target)
if class_name == "StreamingBody":
return StringIO(target["body"])
# Return unrecognized structures as-is
return obj
def serialize(obj):
"""Convert objects into JSON structures."""
# Record class and module information for deserialization
result = {"__class__": obj.__class__.__name__}
try:
result["__module__"] = obj.__module__
except AttributeError:
pass
# Convert objects to dictionary representation based on type
if isinstance(obj, datetime):
result["year"] = obj.year
result["month"] = obj.month
result["day"] = obj.day
result["hour"] = obj.hour
result["minute"] = obj.minute
result["second"] = obj.second
result["microsecond"] = obj.microsecond
return result
if isinstance(obj, StreamingBody):
result["body"] = obj.read()
obj._raw_stream = StringIO(result["body"])
obj._amount_read = 0
return result
if isinstance(obj, bytes):
return obj.decode('utf8')
# Raise a TypeError if the object isn't recognized
raise TypeError("Type not serializable")
pill.FakeHttpResponse.raw = None
placebo.pill.serialize = serialize
placebo.pill.deserialize = deserialize
# END PLACEBO MONKEY
##########################################################################
class BluePill(pill.Pill):
def playback(self):
super(BluePill, self).playback()
self._avail = self.get_available()
def get_available(self):
return {
os.path.join(self.data_path, n)
for n in fnmatch.filter(os.listdir(self.data_path), "*.json")
}
def get_next_file_path(self, service, operation):
fn, format = super(BluePill, self).get_next_file_path(service, operation)
# couple of double use cases
if fn in self._avail:
self._avail.remove(fn)
else:
print("\ndouble use %s\n" % fn)
return (fn, format)
def stop(self):
result = super(BluePill, self).stop()
if self._avail:
print("Unused json files \n %s" % ("\n".join(sorted(self._avail))))
return result
class ZippedPill(pill.Pill):
def __init__(self, path, prefix=None, debug=False):
super(ZippedPill, self).__init__(prefix, debug)
self.path = path
self._used = set()
self.archive = None
def playback(self):
self.archive = zipfile.ZipFile(self.path, "r")
self._files = set(self.archive.namelist())
return super(ZippedPill, self).playback()
def record(self):
self.archive = zipfile.ZipFile(self.path, "a", zipfile.ZIP_DEFLATED)
self._files = set()
files = {n for n in self.archive.namelist() if n.startswith(self.prefix)}
if not files:
return super(ZippedPill, self).record()
# We can't update files in a zip, so copy
self.archive.close()
os.rename(self.path, "%s.tmp" % self.path)
src = zipfile.ZipFile("%s.tmp" % self.path, "r")
self.archive = zipfile.ZipFile(self.path, "w", zipfile.ZIP_DEFLATED)
for n in src.namelist():
if n in files:
continue
self.archive.writestr(n, src.read(n))
os.remove("%s.tmp" % self.path)
return super(ZippedPill, self).record()
def stop(self):
super(ZippedPill, self).stop()
if self.archive:
self.archive.close()
def save_response(self, service, operation, response_data, http_response=200):
filepath = self.get_new_file_path(service, operation)
pill.LOG.debug("save_response: path=%s", filepath)
json_data = {"status_code": http_response, "data": response_data}
self.archive.writestr(
filepath,
json.dumps(json_data, indent=4, default=pill.serialize),
zipfile.ZIP_DEFLATED,
)
self._files.add(filepath)
def load_response(self, service, operation):
response_file = self.get_next_file_path(service, operation)
self._used.add(response_file)
pill.LOG.debug("load_responses: %s", response_file)
response_data = json.loads(
self.archive.read(response_file), object_hook=pill.deserialize
)
return (
pill.FakeHttpResponse(response_data["status_code"]), response_data["data"]
)
def get_new_file_path(self, service, operation):
base_name = "{0}.{1}".format(service, operation)
if self.prefix:
base_name = "{0}.{1}".format(self.prefix, base_name)
pill.LOG.debug("get_new_file_path: %s", base_name)
index = 0
glob_pattern = os.path.join(self._data_path, base_name + "*")
for file_path in fnmatch.filter(self._files, glob_pattern):
file_name = os.path.basename(file_path)
m = self.filename_re.match(file_name)
if m:
i = int(m.group("index"))
if i > index:
index = i
index += 1
return os.path.join(self._data_path, "{0}_{1}.json".format(base_name, index))
def get_next_file_path(self, service, operation):
base_name = "{0}.{1}".format(service, operation)
if self.prefix:
base_name = "{0}.{1}".format(self.prefix, base_name)
pill.LOG.debug("get_next_file_path: %s", base_name)
next_file = None
while next_file is None:
index = self._index.setdefault(base_name, 1)
fn = os.path.join(self._data_path, base_name + "_{0}.json".format(index))
fn = fn.replace('\\', '/')
if fn in self._files:
next_file = fn
self._index[base_name] += 1
self._files.add(fn)
elif index != 1:
self._index[base_name] = 1
else:
# we are looking for the first index and it's not here
raise IOError("response file ({0}) not found".format(fn))
return fn
def attach(session, data_path, prefix=None, debug=False):
pill = ZippedPill(data_path, prefix=prefix, debug=debug)
pill.attach(session, prefix)
return pill
class RedPill(pill.Pill):
def datetime_converter(self, obj):
if isinstance(obj, datetime):
return obj.isoformat()
def save_response(self, service, operation, response_data,
http_response=200):
"""
Override to sanitize response metadata and account_ids
"""
# aws sso setups involve a short lived credential transfer
if service == "portal.sso":
return
if 'ResponseMetadata' in response_data:
response_data['ResponseMetadata'] = {}
response_data = json.dumps(response_data, default=serialize)
response_data = re.sub(r"\b\d{12}\b", ACCOUNT_ID, response_data) # noqa
response_data = json.loads(response_data, object_hook=deserialize)
super(RedPill, self).save_response(service, operation, response_data,
http_response)
class PillTest(CustodianTestCore):
archive_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "placebo_data.zip"
)
placebo_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "data", "placebo"
)
output_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "data", "output"
)
recording = False
def cleanUp(self):
self.pill = None
def record_flight_data(self, test_case, zdata=False, augment=False, region=None):
self.recording = True
test_dir = os.path.join(self.placebo_dir, test_case)
if not (zdata or augment):
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
os.makedirs(test_dir)
session = boto3.Session(region_name=region)
default_region = session.region_name
if not zdata:
pill = RedPill()
pill.attach(session, test_dir)
else:
pill = attach(session, self.archive_path, test_case, debug=True)
pill.record()
self.pill = pill
self.addCleanup(pill.stop)
self.addCleanup(self.cleanUp)
class FakeFactory:
def __call__(fake, region=None, assume=None):
new_session = None
# slightly experimental for test recording, using
# cross account assumes, note this will record sts
# assume role api calls creds into test data, they will
# go stale, but its best to modify before commiting.
# Disabled by default.
if 0 and (assume is not False and fake.assume_role):
client = session.client('sts')
creds = client.assume_role(
RoleArn=fake.assume_role,
RoleSessionName='CustodianTest')['Credentials']
new_session = boto3.Session(
aws_access_key_id=creds['AccessKeyId'],
aws_secret_access_key=creds['SecretAccessKey'],
aws_session_token=creds['SessionToken'],
region_name=region or fake.region or default_region)
elif region and region != default_region:
new_session = boto3.Session(region_name=region)
if new_session:
assert not zdata
new_pill = placebo.attach(new_session, test_dir, debug=True)
new_pill.record()
self.addCleanup(new_pill.stop)
return new_session
return session
return FakeFactory()
def replay_flight_data(self, test_case, zdata=False, region=None):
"""
The `region` argument is to allow functional tests to override the
default region. It is unused when replaying stored data.
"""
if strtobool(os.environ.get('C7N_FUNCTIONAL', 'no')):
self.recording = True
return lambda region=region, assume=None: boto3.Session(region_name=region)
if not zdata:
test_dir = os.path.join(self.placebo_dir, test_case)
if not os.path.exists(test_dir):
raise RuntimeError("Invalid Test Dir for flight data %s" % test_dir)
session = boto3.Session(region_name=region)
if not zdata:
pill = placebo.attach(session, test_dir)
# pill = BluePill()
# pill.attach(session, test_dir)
else:
pill = attach(session, self.archive_path, test_case, False)
pill.playback()
self.addCleanup(pill.stop)
self.addCleanup(self.cleanUp)
return lambda region=None, assume=None: session
|
StarcoderdataPython
|
3558012
|
"""Module wrapping the Client of Jina."""
import argparse
from typing import overload, Optional, Union
__all__ = ['Client']
if False:
from .base import BaseClient
from .asyncio import AsyncClient, AsyncWebSocketClient
from .grpc import GRPCClient
from .websocket import WebSocketClient
# overload_inject_start_client
@overload
def Client(
asyncio: Optional[bool] = False,
continue_on_error: Optional[bool] = False,
host: Optional[str] = '0.0.0.0',
port_expose: Optional[int] = None,
proxy: Optional[bool] = False,
request_size: Optional[int] = 100,
restful: Optional[bool] = False,
return_results: Optional[bool] = False,
show_progress: Optional[bool] = False,
**kwargs
) -> 'BaseClient':
"""Create a Client. Client is how user interact with Flow
:param asyncio: If set, then the input and output of this Client work in an asynchronous manner.
:param continue_on_error: If set, a Request that causes error will be logged only without blocking the further requests.
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param port_expose: The port of the host exposed to the public
:param proxy: If set, respect the http_proxy and https_proxy environment variables. otherwise, it will unset these proxy variables before start. gRPC seems to prefer no proxy
:param request_size: The number of Documents in each Request.
:param restful: If set, use RESTful interface instead of gRPC as the main interface. This expects the corresponding Flow to be set with --restful as well.
:param return_results: This feature is only used for AsyncClient.
If set, the results of all Requests will be returned as a list. This is useful when one wants
process Responses in bulk instead of using callback.
:param show_progress: If set, client will show a progress bar on receiving every request.
:return: the new Client object
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_client
def Client(
args: Optional['argparse.Namespace'] = None, **kwargs
) -> Union['AsyncWebSocketClient', 'WebSocketClient', 'AsyncClient', 'GRPCClient']:
"""Jina Python client.
:param args: Namespace args.
:param kwargs: Additional arguments.
:return: An instance of :class:`GRPCClient` or :class:`WebSocketClient`.
"""
is_restful = (args and args.restful) or kwargs.get('restful', False)
is_async = (args and args.asyncio) or kwargs.get('asyncio', False)
if is_restful:
if is_async:
from .asyncio import AsyncWebSocketClient
return AsyncWebSocketClient(args, **kwargs)
else:
from .websocket import WebSocketClient
return WebSocketClient(args, **kwargs)
else:
if is_async:
from .asyncio import AsyncClient
return AsyncClient(args, **kwargs)
else:
from .grpc import GRPCClient
return GRPCClient(args, **kwargs)
|
StarcoderdataPython
|
1896759
|
from nose.tools import istest, assert_equal
from mammoth import documents
from mammoth.docx.xmlparser import element as xml_element
from mammoth.docx.comments_xml import read_comments_xml_element
from mammoth.docx import body_xml
@istest
def id_and_body_of_comment_is_read():
body = [xml_element("w:p")]
comments = read_comments_xml_element(xml_element("w:comments", {}, [
xml_element("w:comment", {"w:id": "1"}, body),
]), body_reader=body_xml.reader())
assert_equal(1, len(comments.value))
assert_equal(comments.value[0].body, [documents.paragraph(children=[])])
assert_equal("1", comments.value[0].comment_id)
@istest
def when_optional_attributes_of_comment_are_missing_then_they_are_read_as_none():
comments = read_comments_xml_element(xml_element("w:comments", {}, [
xml_element("w:comment", {"w:id": "1"}, []),
]), body_reader=body_xml.reader())
comment, = comments.value
assert_equal(None, comment.author_name)
assert_equal(None, comment.author_initials)
@istest
def when_optional_attributes_of_comment_are_blank_then_they_are_read_as_none():
comments = read_comments_xml_element(xml_element("w:comments", {}, [
xml_element("w:comment", {"w:id": "1", "w:author": " ", "w:initials": " "}, []),
]), body_reader=body_xml.reader())
comment, = comments.value
assert_equal(None, comment.author_name)
assert_equal(None, comment.author_initials)
@istest
def when_optional_attributes_of_comment_are_not_blank_then_they_are_read():
comments = read_comments_xml_element(xml_element("w:comments", {}, [
xml_element("w:comment", {"w:id": "1", "w:author": "<NAME>", "w:initials": "TP"}, []),
]), body_reader=body_xml.reader())
comment, = comments.value
assert_equal("<NAME>", comment.author_name)
assert_equal("TP", comment.author_initials)
|
StarcoderdataPython
|
4939600
|
from __future__ import annotations
import logging
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
log = logging.getLogger(__name__)
engine = create_engine(
"sqlite:///dev.db", connect_args={"check_same_thread": False}, future=True
)
Session = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
def get_db():
db = Session()
log.debug(f"opening db session at {db.__repr__}")
try:
yield db
finally:
db.close()
log.debug("closing db")
|
StarcoderdataPython
|
1681943
|
import unittest
from aviation_weather import Wind
from aviation_weather.exceptions import WindDecodeError
class TestWind(unittest.TestCase):
"""Unit tests for aviation_weather.components.wind.Wind"""
def _test_valid(self, raw, direction, speed, gusts, variable):
w = Wind(raw)
self.assertEqual(raw, w.raw)
self.assertEqual(direction, w.direction)
self.assertEqual(speed, w.speed)
self.assertEqual(gusts, w.gusts)
self.assertEqual(variable, w.variable)
def test_valid_simpleKT(self):
self._test_valid("35007KT", 350, 7, None, None)
def test_valid_simpleMPS(self):
self._test_valid("21007MPS", 210, 7, None, None)
def test_valid_strong_winds(self):
self._test_valid("350107KT", 350, 107, None, None)
def test_valid_gusts(self):
self._test_valid("32013G17KT", 320, 13, 17, None)
def test_valid_strong_gusts(self):
self._test_valid("05013G127KT", 50, 13, 127, None)
def test_valid_variable_weak(self):
self._test_valid("VRB01MPS", "VRB", 1, None, None)
def test_valid_variable_strong(self):
self._test_valid("14003KT 110V170", 140, 3, None, (110, 170))
def _test_invalid(self, raw):
with self.assertRaises(WindDecodeError):
Wind(raw)
def test_invalid_empty(self):
self._test_invalid("")
def test_invalid_short(self):
self._test_invalid("1012KT")
def test_invalid_long(self):
self._test_invalid("3210123KT")
def test_invalid_bad_unit(self):
self._test_invalid("21012KN")
def test_invalid_strength(self):
self._test_invalid("VRBMPS")
def test_invalid_no_unit(self):
self._test_invalid("21012G21")
|
StarcoderdataPython
|
9791245
|
# Standardize a crashes CSV into compatible JSON document.
# Author terryf82 https://github.com/terryf82
import argparse
import os
import pandas as pd
from pandas.io.json import json_normalize
import yaml
from collections import OrderedDict
import csv
import calendar
import random
import pytz
import dateutil.parser as date_parser
from .standardization_util import parse_date, validate_and_write_schema
from shapely.geometry import Point
import geopandas as gpd
from data.util import read_geocode_cache
CURR_FP = os.path.dirname(
os.path.abspath(__file__))
BASE_FP = os.path.dirname(os.path.dirname(CURR_FP))
def read_standardized_fields(raw_crashes, fields, opt_fields,
timezone, datadir, city,
startdate=None, enddate=None):
crashes = {}
# Drop times from startdate/enddate in the unlikely event
# they're passed in
if startdate:
startdate = parse_date(startdate, timezone)
startdate = date_parser.parse(startdate).date()
if enddate:
enddate = parse_date(enddate, timezone)
enddate = date_parser.parse(enddate).date()
min_date = None
max_date = None
cached_addresses = {}
if (not fields['latitude'] or not fields['longitude']):
if 'address' in opt_fields and opt_fields['address']:
# load cache for geocode lookup
geocoded_file = os.path.join(
datadir, 'processed', 'geocoded_addresses.csv')
if os.path.exists(geocoded_file):
cached_addresses = read_geocode_cache(
filename=os.path.join(
datadir, 'processed', 'geocoded_addresses.csv'))
else:
raise SystemExit(
"Need to geocode addresses before standardizing crashes")
else:
raise SystemExit(
"Can't standardize crash data, no lat/lon or address found"
)
no_geocoded_count = 0
for i, crash in enumerate(raw_crashes):
if i % 10000 == 0:
print(i)
lat = crash[fields['latitude']] if fields['latitude'] else None
lon = crash[fields['longitude']] if fields['longitude'] else None
if not lat or not lon:
# skip any crashes that don't have coordinates
if 'address' not in opt_fields or opt_fields['address'] not in crash:
continue
address = crash[opt_fields['address']] + ' ' + city
# If we have an address, look it up in the geocoded cache
if address in cached_addresses:
address, lat, lon, _ = cached_addresses[address]
if not address:
no_geocoded_count += 1
continue
else:
no_geocoded_count += 1
continue
# construct crash date based on config settings, skipping any crashes without date
if fields["date_complete"]:
if not crash[fields["date_complete"]]:
continue
else:
crash_date = crash[fields["date_complete"]]
elif fields["date_year"] and fields["date_month"]:
if fields["date_day"]:
crash_date = str(crash[fields["date_year"]]) + "-" + str(
crash[fields["date_month"]]) + "-" + crash[fields["date_day"]]
# some cities do not supply a day of month for crashes, randomize if so
else:
available_dates = calendar.Calendar().itermonthdates(
crash[fields["date_year"]], crash[fields["date_month"]])
crash_date = str(random.choice(
[date for date in available_dates if date.month == crash[fields["date_month"]]]))
# skip any crashes that don't have a date
else:
continue
crash_time = None
if fields["time"]:
crash_time = crash[fields["time"]]
if fields["time_format"]:
crash_date_time = parse_date(
crash_date,
timezone,
crash_time,
fields["time_format"]
)
else:
crash_date_time = parse_date(
crash_date,
timezone,
crash_time
)
# Skip crashes where date can't be parsed
if not crash_date_time:
continue
crash_day = date_parser.parse(crash_date_time).date()
# Drop crashes that occur outside of the range, if specified
if ((startdate is not None and crash_day < startdate) or
(enddate is not None and crash_day > enddate)):
continue
if min_date is None or crash_day < min_date:
min_date = crash_day
if max_date is None or crash_day > max_date:
max_date = crash_day
formatted_crash = OrderedDict([
("id", crash[fields["id"]]),
("dateOccurred", crash_date_time),
("location", OrderedDict([
("latitude", float(lat)),
("longitude", float(lon))
]))
])
formatted_crash = add_city_specific_fields(crash, formatted_crash,
opt_fields)
crashes[formatted_crash["id"]] = formatted_crash
if min_date and max_date:
print("Including crashes between {} and {}".format(
min_date.isoformat(), max_date.isoformat()))
elif min_date:
print("Including crashes after {}".format(
min_date.isoformat()))
elif max_date:
print("Including crashes before {}".format(
max_date.isoformat()))
# Making sure we have enough entries with lat/lon to continue
if len(crashes) > 0 and no_geocoded_count/len(raw_crashes) > .9:
raise SystemExit("Not enough geocoded addresses found, exiting")
return crashes
def add_city_specific_fields(crash, formatted_crash, fields):
# Add summary and address
if "summary" in list(fields.keys()) and fields["summary"]:
formatted_crash["summary"] = crash[fields["summary"]]
if "address" in list(fields.keys()) and fields["address"]:
formatted_crash["address"] = crash[fields["address"]]
# setup a vehicles list for each crash
formatted_crash["vehicles"] = []
# check for car involvement
if "vehicles" in list(fields.keys()) and fields["vehicles"] == "mode_type":
# this needs work, but for now any of these mode types
# translates to a car being involved, quantity unknown
if crash[fields["vehicles"]] == "mv" or crash[fields["vehicles"]] == "ped" or crash[fields["vehicles"]] == "":
formatted_crash["vehicles"].append({"category": "car"})
elif "vehicles" in list(fields.keys()) and fields["vehicles"] == "TOTAL_VEHICLES":
if crash[fields["vehicles"]] != 0 and crash[fields["vehicles"]] != "":
formatted_crash["vehicles"].append({
"category": "car",
"quantity": int(crash[fields["vehicles"]])
})
# check for bike involvement
if "bikes" in list(fields.keys()) and fields["bikes"] == "mode_type":
# assume bike and car involved, quantities unknown
if crash[fields["bikes"]] == "bike":
formatted_crash["vehicles"].append({"category": "car"})
formatted_crash["vehicles"].append({"category": "bike"})
elif "bikes" in list(fields.keys()) and fields["bikes"] == "TOTAL_BICYCLES":
if crash[fields["bikes"]] != 0 and crash[fields["bikes"]] != "":
formatted_crash['vehicles'].append({
"category": "bike",
"quantity": int(crash[fields["bikes"]])
})
return formatted_crash
def add_id(csv_file, id_field):
"""
If the csv_file does not contain an id, create one
"""
rows = []
with open(csv_file) as f:
csv_reader = csv.DictReader(f)
count = 1
for row in csv_reader:
if id_field in row:
break
row.update({id_field: count})
rows.append(row)
count += 1
if rows:
with open(csv_file, 'w') as f:
writer = csv.DictWriter(f, list(rows[0].keys()))
writer.writeheader()
for row in rows:
writer.writerow(row)
def calculate_crashes_by_location(df):
"""
Calculates total number of crashes that occurred at each unique lat/lng pair and
generates a comma-separated string of the dates that crashes occurred at that location
Inputs:
- a dataframe where each row represents one unique crash incident
Output:
- a dataframe with the total number of crashes at each unique crash location
and list of unique crash dates
"""
crashes_agg = df.groupby(['latitude', 'longitude']).agg(['count', 'unique'])
crashes_agg.columns = crashes_agg.columns.get_level_values(1)
crashes_agg.rename(columns={'count': 'total_crashes', 'unique': 'crash_dates'}, inplace=True)
crashes_agg.reset_index(inplace=True)
crashes_agg['crash_dates'] = crashes_agg['crash_dates'].str.join(',')
return crashes_agg
def make_crash_rollup(crashes_json):
"""
Generates a GeoDataframe with the total number of crashes and a comma-separated string
of crash dates per unique lat/lng pair
Inputs:
- a json of standardized crash data
Output:
- a GeoDataframe with the following columns:
- total number of crashes
- list of unique dates that crashes occurred
- GeoJSON point features created from the latitude and longitude
"""
df_std_crashes = json_normalize(crashes_json)
df_std_crashes = df_std_crashes[["dateOccurred", "location.latitude", "location.longitude"]]
df_std_crashes.rename(columns={"location.latitude": "latitude", "location.longitude": "longitude"}, inplace=True)
crashes_agg = calculate_crashes_by_location(df_std_crashes)
crashes_agg["coordinates"] = list(zip(crashes_agg.longitude, crashes_agg.latitude))
crashes_agg["coordinates"] = crashes_agg["coordinates"].apply(Point)
crashes_agg = crashes_agg[["coordinates", "total_crashes", "crash_dates"]]
crashes_agg_gdf = gpd.GeoDataFrame(crashes_agg, geometry="coordinates")
print(crashes_agg_gdf.columns)
return crashes_agg_gdf
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", type=str, required=True,
help="config file")
parser.add_argument("-d", "--datadir", type=str, required=True,
help="data directory")
args = parser.parse_args()
# load config
config_file = args.config
with open(config_file) as f:
config = yaml.safe_load(f)
# by default standardize all available crashes
startdate = None
enddate = None
if config['startdate']:
startdate = str(config['startdate'])
if config['enddate']:
enddate = str(config['enddate'])
crash_dir = os.path.join(args.datadir, "raw/crashes")
if not os.path.exists(crash_dir):
raise SystemExit(crash_dir + " not found, exiting")
print("searching "+crash_dir+" for raw files:")
dict_crashes = {}
for csv_file in list(config['crashes_files'].keys()):
csv_config = config['crashes_files'][csv_file]
if not os.path.exists(os.path.join(crash_dir, csv_file)):
raise SystemExit(os.path.join(
crash_dir, csv_file) + " not found, exiting")
add_id(
os.path.join(crash_dir, csv_file), csv_config['required']['id'])
print("processing {}".format(csv_file))
df_crashes = pd.read_csv(os.path.join(
crash_dir, csv_file), na_filter=False)
raw_crashes = df_crashes.to_dict("records")
std_crashes = read_standardized_fields(
raw_crashes,
csv_config['required'],
csv_config['optional'],
pytz.timezone(config['timezone']),
args.datadir,
config['city'],
startdate,
enddate
)
print("{} crashes loaded with standardized fields, checking for specific fields".format(
len(std_crashes)))
dict_crashes.update(std_crashes)
print("{} crashes loaded, validating against schema".format(len(dict_crashes)))
schema_path = os.path.join(BASE_FP, "standards", "crashes-schema.json")
list_crashes = list(dict_crashes.values())
crashes_output = os.path.join(args.datadir, "standardized/crashes.json")
validate_and_write_schema(schema_path, list_crashes, crashes_output)
crashes_agg_gdf = make_crash_rollup(list_crashes)
crashes_agg_path = os.path.join(args.datadir, "standardized/crashes_rollup.geojson")
if os.path.exists(crashes_agg_path):
os.remove(crashes_agg_path)
crashes_agg_gdf.to_file(os.path.join(args.datadir, "standardized/crashes_rollup.geojson"), driver="GeoJSON")
|
StarcoderdataPython
|
8067735
|
<filename>reader/src/clients/python/tomerge.py<gh_stars>0
#!/usr/bin/env python
import sys
import ctypes
import time
import threading
import Queue
lib = ""
callbackList = ""
def callbackHandlerFunction(char_ptr):
callbackString = ""
try:
for i in char_ptr:
if i == "\0":
break
else:
callbackString += i
except:
print "Indexing Error: Pointer to string conversion in wrapper\n"
callbackList[0](callbackString)
return 0
def init():
global lib, callbackList
lib = ctypes.CDLL("libmercuryrfid.so.1")
callbackTemplate = ctypes.CFUNCTYPE(ctypes.c_byte, ctypes.POINTER(ctypes.c_char))(callbackHandlerFunction)
lib.RFIDinit(callbackTemplate)
callbackList = {}
def close():
global lib, callbackList
lib.RFIDclose()
lib = ""
callbackList = {}
def startReader(deviceURI, callbackFunction):
global lib, callbackList
#check callbackfunction exists?
readerID = lib.RFIDstartReader(deviceURI)
#lib.RFIDclose()
#lib.getHopTime(readerID)
#lib.setHopTime(readerID, 100)
callbackList[readerID] = callbackFunction
return readerID
def stopReader(readerID):
global lib
lib.RFIDstopReader(readerID)
def getHopTime(readerID):
global lib
time = lib.getHopTime(readerID)
return time
def setHopTime(readerID, newHopTime):
global lib
lib.setHopTime(readerID, newHopTime)
# newEPCdata: string with new hex char values of epc data
# OldEpcData: original value (same length)
# epcMemoryBytes: byte size of EPCData memory bank (at least 12 bytes).
# NOTE => It's half of the length of the string with values!!
# Reader must be stopped and restarted before using it.
def renameTag(readerId, OldEpcData, newEpcData, epcBytes):
global lib
error=lib.renameTag(readerId,OldEpcData, newEpcData,epcBytes)
return error
# newEPCdata is a string with hex char values of epc data
# epcMemoryBytes is size in bytes of EPCData memory bank (at leas 12 bytes).
# It's half of the length of the string with values!!
def writeTag(readerId, newEpcData,epcMemoryBytes):
global lib
error=lib.writeTag(readerId, newEpcData,epcMemoryBytes)
return error
# reenables reader reading
def reStartReader(readerID):
global lib
error=lib.reStartReader(readerID)
return error
# configures READING transmision power to value, given in 100*dBm (should be between 500 and 3000)
# TODO find out minimum step of power values
def setPower(readerID, value):
global lib
lib.setPower(readerID, value)
# prints on screen current power configuration in 100*dBm
# TODO return power value
def getPower(readerID):
global lib
power = lib.getPower(readerID)
return power
|
StarcoderdataPython
|
4857202
|
from sales import models as m_sales
def delete_sale_docs(task, status):
from .models import STATUS_APPROVED
sale_id = int(task.reference)
sale = m_sales.Sale.objects.get(pk=sale_id)
if status == STATUS_APPROVED:
m_sales.Document.objects.filter(sale_id=sale_id).delete()
m_sales.Sale.objects.filter(pk=sale_id).update(agent=None, task=None)
return f'Documents deleted successfully from sale: {sale.sales_order}'
else:
return f'Documents deleted successfully rejected for sale: {sale.sales_order}'
def waive_missing_c2(task, status):
from .models import STATUS_APPROVED
sale_id = int(task.reference)
sale = m_sales.Sale.objects.get(pk=sale_id)
if status == STATUS_APPROVED:
m_sales.Document.objects.filter(sale_id=sale_id).update(status=1)
m_sales.Sale.objects.filter(pk=sale_id).update(task=None, docs_flag=2)
return f'Documents with missing C2 successfully approved for sale: {sale.sales_order}'
else:
m_sales.Document.objects.filter(sale_id=sale_id).delete()
m_sales.Sale.objects.filter(pk=sale_id).update(agent=None, task=None)
return f'Documents with missing C2 successfully rejected for sale: {sale.sales_order}'
EXECUTOR_CHOICES = [
('delete_sale_docs', 'Delete Sale Documents'),
('waive_missing_c2', 'Waive Missing C2 Sale Documents'),
]
|
StarcoderdataPython
|
6507091
|
<reponame>jyxzhang/hknweb<filename>hknweb/academics/models/logistics/__init__.py
from hknweb.academics.models.logistics.course import Course
from hknweb.academics.models.logistics.department import Department
from hknweb.academics.models.logistics.instructor import Instructor
from hknweb.academics.models.logistics.semester import Semester
|
StarcoderdataPython
|
11392571
|
import pytest
from Registry.Registry import RegSZ
from regrippy.plugins.typedurls import Plugin as plugin
from .reg_mock import (LoggerMock, RegistryKeyMock, RegistryMock,
RegistryValueMock)
@pytest.fixture
def mock_reg():
key = RegistryKeyMock.build("Software\\Microsoft\\Internet Explorer\\TypedURLs")
reg = RegistryMock("NTUSER.DAT", "ntuser.dat", key.root())
url1 = RegistryValueMock("url1", "https://outlook.com", RegSZ)
url2 = RegistryValueMock("url2", "https://airbus.com/order", RegSZ)
key.add_value(url1)
key.add_value(url2)
return reg
def test_typedurls(mock_reg):
p = plugin(mock_reg, LoggerMock(), "NTUSER.DAT", "-")
results = list(p.run())
assert len(results) == 2
assert results[0].value_data == "https://outlook.com", "First URL should be Outlook"
assert (
results[1].value_data == "https://airbus.com/order"
), "Second URL should be Airbus"
|
StarcoderdataPython
|
9794417
|
<gh_stars>0
class Handler(object):
context = None
resources = None
action = None
params = None
name = None
def __init__(self, context, resources, action, name, params):
self.context = context
self.resources = resources
self.action = action
self.name = name
self.params = params
def get_full_resource_name(self):
"""
Return full resource name
"""
return ' '.join(self.resources)
def save_to_context(self, key=None, value=None):
"""
Push value into context.
If 'key' is None, the full resource name will be used a key.
If 'value' is None, the object name will be used a value.
"""
if (key == None):
key = self.get_full_resource_name()
if (value == None):
value = self.name
#print("[{0}:{1}]".format(key, value))
self.context[key] = value
|
StarcoderdataPython
|
304070
|
# SPDX-License-Identifier: MIT
# Copyright (C) 2020-2021 Mobica Limited
"""Provide error handling helpers"""
NO_ERROR = 0
CONFIGURATION_ERROR = 1
REQUEST_ERROR = 2
FILESYSTEM_ERROR = 3
INTEGRATION_ERROR = 4 # Indicates that some assumption about how the Jira works seems to be false
INVALID_ARGUMENT_ERROR = 5
INPUT_DATA_ERROR = 6 # There is some problem with input data
JIRA_DATA_ERROR = 7 # There is some problem with the jira stored data
class CjmError(Exception):
"""Exception to be raised by cjm library functions and by cjm-* and sm-* scripts"""
def __init__(self, code):
super().__init__(code)
self.code = code
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.